Merge from Chromium at DEPS revision r167172

This commit was generated by merge_to_master.py.

Change-Id: Iead6b4948cd90f0aac77a0e5e2b6c1749577569b
diff --git a/Tools/Scripts/webkitpy/__init__.py b/Tools/Scripts/webkitpy/__init__.py
new file mode 100644
index 0000000..b376bf2
--- /dev/null
+++ b/Tools/Scripts/webkitpy/__init__.py
@@ -0,0 +1,13 @@
+# Required for Python to search this directory for module files
+
+# Keep this file free of any code or import statements that could
+# cause either an error to occur or a log message to be logged.
+# This ensures that calling code can import initialization code from
+# webkitpy before any errors or log messages due to code in this file.
+# Initialization code can include things like version-checking code and
+# logging configuration code.
+#
+# We do not execute any version-checking code or logging configuration
+# code in this file so that callers can opt-in as they want.  This also
+# allows different callers to choose different initialization code,
+# as necessary.
diff --git a/Tools/Scripts/webkitpy/bindings/__init__.py b/Tools/Scripts/webkitpy/bindings/__init__.py
new file mode 100644
index 0000000..ef65bee
--- /dev/null
+++ b/Tools/Scripts/webkitpy/bindings/__init__.py
@@ -0,0 +1 @@
+# Required for Python to search this directory for module files
diff --git a/Tools/Scripts/webkitpy/bindings/main.py b/Tools/Scripts/webkitpy/bindings/main.py
new file mode 100644
index 0000000..15884bb
--- /dev/null
+++ b/Tools/Scripts/webkitpy/bindings/main.py
@@ -0,0 +1,173 @@
+#!/usr/bin/python
+# Copyright (C) 2011 Google Inc.  All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+#    notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+#    notice, this list of conditions and the following disclaimer in the
+#    documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE COMPUTER, INC. OR
+# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+import os
+import os.path
+import shutil
+import subprocess
+import sys
+import tempfile
+from webkitpy.common.checkout.scm.detection import detect_scm_system
+from webkitpy.common.system.executive import ScriptError
+
+
+class BindingsTests:
+
+    def __init__(self, reset_results, generators, executive):
+        self.reset_results = reset_results
+        self.generators = generators
+        self.executive = executive
+
+    def generate_from_idl(self, generator, idl_file, output_directory, supplemental_dependency_file):
+        cmd = ['perl', '-w',
+               '-IWebCore/bindings/scripts',
+               'WebCore/bindings/scripts/generate-bindings.pl',
+               # idl include directories (path relative to generate-bindings.pl)
+               '--include', '.',
+               '--defines', 'TESTING_%s' % generator,
+               '--generator', generator,
+               '--outputDir', output_directory,
+               '--supplementalDependencyFile', supplemental_dependency_file,
+               idl_file]
+
+        exit_code = 0
+        try:
+            output = self.executive.run_command(cmd)
+            if output:
+                print output
+        except ScriptError, e:
+            print e.output
+            exit_code = e.exit_code
+        return exit_code
+
+    def generate_supplemental_dependency(self, input_directory, supplemental_dependency_file):
+        idl_files_list = tempfile.mkstemp()
+        for input_file in os.listdir(input_directory):
+            (name, extension) = os.path.splitext(input_file)
+            if extension != '.idl':
+                continue
+            os.write(idl_files_list[0], os.path.join(input_directory, input_file) + "\n")
+        os.close(idl_files_list[0])
+
+        cmd = ['perl', '-w',
+               '-IWebCore/bindings/scripts',
+               'WebCore/bindings/scripts/preprocess-idls.pl',
+               '--idlFilesList', idl_files_list[1],
+               '--defines', '',
+               '--supplementalDependencyFile', supplemental_dependency_file,
+               '--idlAttributesFile', 'WebCore/bindings/scripts/IDLAttributes.txt']
+
+        exit_code = 0
+        try:
+            output = self.executive.run_command(cmd)
+            if output:
+                print output
+        except ScriptError, e:
+            print e.output
+            exit_code = e.exit_code
+        os.remove(idl_files_list[1])
+        return exit_code
+
+    def detect_changes(self, generator, work_directory, reference_directory):
+        changes_found = False
+        for output_file in os.listdir(work_directory):
+            cmd = ['diff',
+                   '-u',
+                   '-N',
+                   os.path.join(reference_directory, output_file),
+                   os.path.join(work_directory, output_file)]
+
+            exit_code = 0
+            try:
+                output = self.executive.run_command(cmd)
+            except ScriptError, e:
+                output = e.output
+                exit_code = e.exit_code
+
+            if exit_code or output:
+                print 'FAIL: (%s) %s' % (generator, output_file)
+                print output
+                changes_found = True
+            else:
+                print 'PASS: (%s) %s' % (generator, output_file)
+        return changes_found
+
+    def run_tests(self, generator, input_directory, reference_directory, supplemental_dependency_file):
+        work_directory = reference_directory
+
+        passed = True
+        for input_file in os.listdir(input_directory):
+            (name, extension) = os.path.splitext(input_file)
+            if extension != '.idl':
+                continue
+            # Generate output into the work directory (either the given one or a
+            # temp one if not reset_results is performed)
+            if not self.reset_results:
+                work_directory = tempfile.mkdtemp()
+
+            if self.generate_from_idl(generator,
+                                      os.path.join(input_directory, input_file),
+                                      work_directory,
+                                      supplemental_dependency_file):
+                passed = False
+
+            if self.reset_results:
+                print "Reset results: (%s) %s" % (generator, input_file)
+                continue
+
+            # Detect changes
+            if self.detect_changes(generator, work_directory, reference_directory):
+                passed = False
+            shutil.rmtree(work_directory)
+
+        return passed
+
+    def main(self):
+        current_scm = detect_scm_system(os.curdir)
+        os.chdir(os.path.join(current_scm.checkout_root, 'Source'))
+
+        all_tests_passed = True
+
+        input_directory = os.path.join('WebCore', 'bindings', 'scripts', 'test')
+        supplemental_dependency_file = tempfile.mkstemp()[1]
+        if self.generate_supplemental_dependency(input_directory, supplemental_dependency_file):
+            print 'Failed to generate a supplemental dependency file.'
+            os.remove(supplemental_dependency_file)
+            return -1
+
+        for generator in self.generators:
+            input_directory = os.path.join('WebCore', 'bindings', 'scripts', 'test')
+            reference_directory = os.path.join('WebCore', 'bindings', 'scripts', 'test', generator)
+            if not self.run_tests(generator, input_directory, reference_directory, supplemental_dependency_file):
+                all_tests_passed = False
+
+        os.remove(supplemental_dependency_file)
+        print ''
+        if all_tests_passed:
+            print 'All tests PASS!'
+            return 0
+        else:
+            print 'Some tests FAIL! (To update the reference files, execute "run-bindings-tests --reset-results")'
+            return -1
diff --git a/Tools/Scripts/webkitpy/common/__init__.py b/Tools/Scripts/webkitpy/common/__init__.py
new file mode 100644
index 0000000..ef65bee
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/__init__.py
@@ -0,0 +1 @@
+# Required for Python to search this directory for module files
diff --git a/Tools/Scripts/webkitpy/common/checkout/__init__.py b/Tools/Scripts/webkitpy/common/checkout/__init__.py
new file mode 100644
index 0000000..f385ae4
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/checkout/__init__.py
@@ -0,0 +1,3 @@
+# Required for Python to search this directory for module files
+
+from .checkout import Checkout
diff --git a/Tools/Scripts/webkitpy/common/checkout/baselineoptimizer.py b/Tools/Scripts/webkitpy/common/checkout/baselineoptimizer.py
new file mode 100644
index 0000000..d2d53a5
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/checkout/baselineoptimizer.py
@@ -0,0 +1,274 @@
+# Copyright (C) 2011, Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import copy
+import logging
+
+
+_log = logging.getLogger(__name__)
+
+
+# Yes, it's a hypergraph.
+# FIXME: Should this function live with the ports somewhere?
+# Perhaps this should move onto PortFactory?
+def _baseline_search_hypergraph(host, port_names):
+    hypergraph = {}
+
+    # These edges in the hypergraph aren't visible on build.webkit.org,
+    # but they impose constraints on how we optimize baselines.
+    hypergraph.update(_VIRTUAL_PORTS)
+
+    # FIXME: Should we get this constant from somewhere?
+    fallback_path = ['LayoutTests']
+
+    port_factory = host.port_factory
+    for port_name in port_names:
+        port = port_factory.get(port_name)
+        webkit_base = port.webkit_base()
+        search_path = port.baseline_search_path()
+        if search_path:
+            hypergraph[port_name] = [host.filesystem.relpath(path, webkit_base) for path in search_path] + fallback_path
+    return hypergraph
+
+
+_VIRTUAL_PORTS = {
+    'mac-future': ['LayoutTests/platform/mac-future', 'LayoutTests/platform/mac', 'LayoutTests'],
+    'win-future': ['LayoutTests/platform/win-future', 'LayoutTests/platform/win', 'LayoutTests'],
+    'qt-unknown': ['LayoutTests/platform/qt-unknown', 'LayoutTests/platform/qt', 'LayoutTests'],
+}
+
+
+# FIXME: Should this function be somewhere more general?
+def _invert_dictionary(dictionary):
+    inverted_dictionary = {}
+    for key, value in dictionary.items():
+        if inverted_dictionary.get(value):
+            inverted_dictionary[value].append(key)
+        else:
+            inverted_dictionary[value] = [key]
+    return inverted_dictionary
+
+
+class BaselineOptimizer(object):
+    def __init__(self, host, port_names):
+        self._host = host
+        self._filesystem = self._host.filesystem
+        self._scm = self._host.scm()
+        self._hypergraph = _baseline_search_hypergraph(host, port_names)
+        self._directories = reduce(set.union, map(set, self._hypergraph.values()))
+
+    def read_results_by_directory(self, baseline_name):
+        results_by_directory = {}
+        for directory in self._directories:
+            path = self._filesystem.join(self._scm.checkout_root, directory, baseline_name)
+            if self._filesystem.exists(path):
+                results_by_directory[directory] = self._filesystem.sha1(path)
+        return results_by_directory
+
+    def _results_by_port_name(self, results_by_directory):
+        results_by_port_name = {}
+        for port_name, search_path in self._hypergraph.items():
+            for directory in search_path:
+                if directory in results_by_directory:
+                    results_by_port_name[port_name] = results_by_directory[directory]
+                    break
+        return results_by_port_name
+
+    def _most_specific_common_directory(self, port_names):
+        paths = [self._hypergraph[port_name] for port_name in port_names]
+        common_directories = reduce(set.intersection, map(set, paths))
+
+        def score(directory):
+            return sum([path.index(directory) for path in paths])
+
+        _, directory = sorted([(score(directory), directory) for directory in common_directories])[0]
+        return directory
+
+    def _filter_port_names_by_result(self, predicate, port_names_by_result):
+        filtered_port_names_by_result = {}
+        for result, port_names in port_names_by_result.items():
+            filtered_port_names = filter(predicate, port_names)
+            if filtered_port_names:
+                filtered_port_names_by_result[result] = filtered_port_names
+        return filtered_port_names_by_result
+
+    def _place_results_in_most_specific_common_directory(self, port_names_by_result, results_by_directory):
+        for result, port_names in port_names_by_result.items():
+            directory = self._most_specific_common_directory(port_names)
+            results_by_directory[directory] = result
+
+    def _find_optimal_result_placement(self, baseline_name):
+        results_by_directory = self.read_results_by_directory(baseline_name)
+        results_by_port_name = self._results_by_port_name(results_by_directory)
+        port_names_by_result = _invert_dictionary(results_by_port_name)
+
+        new_results_by_directory = self._optimize_by_most_specific_common_directory(results_by_directory, results_by_port_name, port_names_by_result)
+        if not new_results_by_directory:
+            new_results_by_directory = self._optimize_by_pushing_results_up(results_by_directory, results_by_port_name, port_names_by_result)
+
+        return results_by_directory, new_results_by_directory
+
+    def _optimize_by_most_specific_common_directory(self, results_by_directory, results_by_port_name, port_names_by_result):
+        new_results_by_directory = {}
+        unsatisfied_port_names_by_result = port_names_by_result
+        while unsatisfied_port_names_by_result:
+            self._place_results_in_most_specific_common_directory(unsatisfied_port_names_by_result, new_results_by_directory)
+            new_results_by_port_name = self._results_by_port_name(new_results_by_directory)
+
+            def is_unsatisfied(port_name):
+                return results_by_port_name[port_name] != new_results_by_port_name[port_name]
+
+            new_unsatisfied_port_names_by_result = self._filter_port_names_by_result(is_unsatisfied, port_names_by_result)
+
+            if len(new_unsatisfied_port_names_by_result.values()) >= len(unsatisfied_port_names_by_result.values()):
+                return {}  # Frowns. We do not appear to be converging.
+            unsatisfied_port_names_by_result = new_unsatisfied_port_names_by_result
+
+        return new_results_by_directory
+
+    def _optimize_by_pushing_results_up(self, results_by_directory, results_by_port_name, port_names_by_result):
+        try:
+            results_by_directory = results_by_directory
+            best_so_far = results_by_directory
+            while True:
+                new_results_by_directory = copy.copy(best_so_far)
+                for port_name in self._hypergraph.keys():
+                    fallback_path = self._hypergraph[port_name]
+                    current_index, current_directory = self._find_in_fallbackpath(fallback_path, results_by_port_name[port_name], best_so_far)
+                    current_result = results_by_port_name[port_name]
+                    for index in range(current_index + 1, len(fallback_path)):
+                        new_directory = fallback_path[index]
+                        if not new_directory in new_results_by_directory:
+                            new_results_by_directory[new_directory] = current_result
+                            if current_directory in new_results_by_directory:
+                                del new_results_by_directory[current_directory]
+                        elif new_results_by_directory[new_directory] == current_result:
+                            if current_directory in new_results_by_directory:
+                                del new_results_by_directory[current_directory]
+                        else:
+                            # The new_directory contains a different result, so stop trying to push results up.
+                            break
+
+                if len(new_results_by_directory) >= len(best_so_far):
+                    # We've failed to improve, so give up.
+                    break
+                best_so_far = new_results_by_directory
+
+            return best_so_far
+        except KeyError as e:
+            # FIXME: KeyErrors get raised if we're missing baselines. We should handle this better.
+            return {}
+
+    def _find_in_fallbackpath(self, fallback_path, current_result, results_by_directory):
+        for index, directory in enumerate(fallback_path):
+            if directory in results_by_directory and (results_by_directory[directory] == current_result):
+                return index, directory
+        assert False, "result %s not found in fallback_path %s, %s" % (current_result, fallback_path, results_by_directory)
+
+    def _filtered_results_by_port_name(self, results_by_directory):
+        results_by_port_name = self._results_by_port_name(results_by_directory)
+        for port_name in _VIRTUAL_PORTS.keys():
+            if port_name in results_by_port_name:
+                del results_by_port_name[port_name]
+        return results_by_port_name
+
+    def _platform(self, filename):
+        platform_dir = 'LayoutTests' + self._filesystem.sep + 'platform' + self._filesystem.sep
+        if filename.startswith(platform_dir):
+            return filename.replace(platform_dir, '').split(self._filesystem.sep)[0]
+        platform_dir = self._filesystem.join(self._scm.checkout_root, platform_dir)
+        if filename.startswith(platform_dir):
+            return filename.replace(platform_dir, '').split(self._filesystem.sep)[0]
+        return '(generic)'
+
+    def _move_baselines(self, baseline_name, results_by_directory, new_results_by_directory):
+        data_for_result = {}
+        for directory, result in results_by_directory.items():
+            if not result in data_for_result:
+                source = self._filesystem.join(self._scm.checkout_root, directory, baseline_name)
+                data_for_result[result] = self._filesystem.read_binary_file(source)
+
+        file_names = []
+        for directory, result in results_by_directory.items():
+            if new_results_by_directory.get(directory) != result:
+                file_names.append(self._filesystem.join(self._scm.checkout_root, directory, baseline_name))
+        if file_names:
+            _log.debug("    Deleting:")
+            for platform_dir in sorted(self._platform(filename) for filename in file_names):
+                _log.debug("      " + platform_dir)
+            self._scm.delete_list(file_names)
+        else:
+            _log.debug("    (Nothing to delete)")
+
+        file_names = []
+        for directory, result in new_results_by_directory.items():
+            if results_by_directory.get(directory) != result:
+                destination = self._filesystem.join(self._scm.checkout_root, directory, baseline_name)
+                self._filesystem.maybe_make_directory(self._filesystem.split(destination)[0])
+                self._filesystem.write_binary_file(destination, data_for_result[result])
+                file_names.append(destination)
+        if file_names:
+            _log.debug("    Adding:")
+            for platform_dir in sorted(self._platform(filename) for filename in file_names):
+                _log.debug("      " + platform_dir)
+            self._scm.add_list(file_names)
+        else:
+            _log.debug("    (Nothing to add)")
+
+    def directories_by_result(self, baseline_name):
+        results_by_directory = self.read_results_by_directory(baseline_name)
+        return _invert_dictionary(results_by_directory)
+
+    def write_by_directory(self, results_by_directory, writer, indent):
+        for path in sorted(results_by_directory):
+            writer("%s%s: %s" % (indent, self._platform(path), results_by_directory[path][0:6]))
+
+    def optimize(self, baseline_name):
+        basename = self._filesystem.basename(baseline_name)
+        results_by_directory, new_results_by_directory = self._find_optimal_result_placement(baseline_name)
+        self.new_results_by_directory = new_results_by_directory
+        if new_results_by_directory == results_by_directory:
+            if new_results_by_directory:
+                _log.debug("  %s: (already optimal)" % basename)
+                self.write_by_directory(results_by_directory, _log.debug, "    ")
+            else:
+                _log.debug("  %s: (no baselines found)" % basename)
+            return True
+        if self._filtered_results_by_port_name(results_by_directory) != self._filtered_results_by_port_name(new_results_by_directory):
+            _log.warning("  %s: optimization failed" % basename)
+            self.write_by_directory(results_by_directory, _log.warning, "      ")
+            return False
+
+        _log.debug("  %s:" % basename)
+        _log.debug("    Before: ")
+        self.write_by_directory(results_by_directory, _log.debug, "      ")
+        _log.debug("    After: ")
+        self.write_by_directory(new_results_by_directory, _log.debug, "      ")
+
+        self._move_baselines(baseline_name, results_by_directory, new_results_by_directory)
+        return True
diff --git a/Tools/Scripts/webkitpy/common/checkout/baselineoptimizer_unittest.py b/Tools/Scripts/webkitpy/common/checkout/baselineoptimizer_unittest.py
new file mode 100644
index 0000000..a5fd065
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/checkout/baselineoptimizer_unittest.py
@@ -0,0 +1,194 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#    * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import sys
+import unittest
+
+from webkitpy.common.checkout.baselineoptimizer import BaselineOptimizer
+from webkitpy.common.system.filesystem_mock import MockFileSystem
+from webkitpy.common.host_mock import MockHost
+
+
+class TestBaselineOptimizer(BaselineOptimizer):
+    def __init__(self, mock_results_by_directory):
+        host = MockHost()
+        BaselineOptimizer.__init__(self, host, host.port_factory.all_port_names())
+        self._mock_results_by_directory = mock_results_by_directory
+
+    # We override this method for testing so we don't have to construct an
+    # elaborate mock file system.
+    def read_results_by_directory(self, baseline_name):
+        return self._mock_results_by_directory
+
+    def _move_baselines(self, baseline_name, results_by_directory, new_results_by_directory):
+        self.new_results_by_directory = new_results_by_directory
+
+
+class BaselineOptimizerTest(unittest.TestCase):
+    def _assertOptimization(self, results_by_directory, expected_new_results_by_directory):
+        baseline_optimizer = TestBaselineOptimizer(results_by_directory)
+        self.assertTrue(baseline_optimizer.optimize('mock-baseline.png'))
+        self.assertEqual(baseline_optimizer.new_results_by_directory, expected_new_results_by_directory)
+
+    def _assertOptimizationFailed(self, results_by_directory):
+        baseline_optimizer = TestBaselineOptimizer(results_by_directory)
+        self.assertFalse(baseline_optimizer.optimize('mock-baseline.png'))
+
+    def test_move_baselines(self):
+        host = MockHost()
+        host.filesystem.write_binary_file('/mock-checkout/LayoutTests/platform/chromium-win/another/test-expected.txt', 'result A')
+        host.filesystem.write_binary_file('/mock-checkout/LayoutTests/platform/chromium-mac/another/test-expected.txt', 'result A')
+        host.filesystem.write_binary_file('/mock-checkout/LayoutTests/platform/chromium/another/test-expected.txt', 'result B')
+        baseline_optimizer = BaselineOptimizer(host, host.port_factory.all_port_names())
+        baseline_optimizer._move_baselines('another/test-expected.txt', {
+            'LayoutTests/platform/chromium-win': 'aaa',
+            'LayoutTests/platform/chromium-mac': 'aaa',
+            'LayoutTests/platform/chromium': 'bbb',
+        }, {
+            'LayoutTests/platform/chromium': 'aaa',
+        })
+        self.assertEqual(host.filesystem.read_binary_file('/mock-checkout/LayoutTests/platform/chromium/another/test-expected.txt'), 'result A')
+
+    def test_chromium_linux_redundant_with_win(self):
+        self._assertOptimization({
+            'LayoutTests/platform/chromium-win': '462d03b9c025db1b0392d7453310dbee5f9a9e74',
+            'LayoutTests/platform/chromium-linux': '462d03b9c025db1b0392d7453310dbee5f9a9e74',
+        }, {
+            'LayoutTests/platform/chromium-win': '462d03b9c025db1b0392d7453310dbee5f9a9e74',
+        })
+
+    def test_no_add_mac_future(self):
+        self._assertOptimization({
+            'LayoutTests/platform/mac': '29a1715a6470d5dd9486a142f609708de84cdac8',
+            'LayoutTests/platform/win-xp': '453e67177a75b2e79905154ece0efba6e5bfb65d',
+            'LayoutTests/platform/mac-lion': 'c43eaeb358f49d5e835236ae23b7e49d7f2b089f',
+            'LayoutTests/platform/chromium-mac': 'a9ba153c700a94ae1b206d8e4a75a621a89b4554',
+        }, {
+            'LayoutTests/platform/mac': '29a1715a6470d5dd9486a142f609708de84cdac8',
+            'LayoutTests/platform/win-xp': '453e67177a75b2e79905154ece0efba6e5bfb65d',
+            'LayoutTests/platform/mac-lion': 'c43eaeb358f49d5e835236ae23b7e49d7f2b089f',
+            'LayoutTests/platform/chromium-mac': 'a9ba153c700a94ae1b206d8e4a75a621a89b4554',
+        })
+
+    def test_chromium_covers_mac_win_linux(self):
+        self._assertOptimization({
+            'LayoutTests/platform/chromium-mac': '462d03b9c025db1b0392d7453310dbee5f9a9e74',
+            'LayoutTests/platform/chromium-win': '462d03b9c025db1b0392d7453310dbee5f9a9e74',
+            'LayoutTests/platform/chromium-linux': '462d03b9c025db1b0392d7453310dbee5f9a9e74',
+        }, {
+            'LayoutTests/platform/chromium': '462d03b9c025db1b0392d7453310dbee5f9a9e74',
+        })
+
+    def test_mac_future(self):
+        self._assertOptimization({
+            'LayoutTests/platform/mac-lion': '462d03b9c025db1b0392d7453310dbee5f9a9e74',
+        }, {
+            'LayoutTests/platform/mac-lion': '462d03b9c025db1b0392d7453310dbee5f9a9e74',
+        })
+
+    def test_qt_unknown(self):
+        self._assertOptimization({
+            'LayoutTests/platform/qt': '462d03b9c025db1b0392d7453310dbee5f9a9e74',
+        }, {
+            'LayoutTests/platform/qt': '462d03b9c025db1b0392d7453310dbee5f9a9e74',
+        })
+
+    def test_win_does_not_drop_to_win_7sp0(self):
+        self._assertOptimization({
+            'LayoutTests/platform/win': '1',
+            'LayoutTests/platform/mac': '2',
+            'LayoutTests/platform/gtk': '3',
+            'LayoutTests/platform/qt': '4',
+            'LayoutTests/platform/chromium': '5',
+        }, {
+            'LayoutTests/platform/win': '1',
+            'LayoutTests/platform/mac': '2',
+            'LayoutTests/platform/gtk': '3',
+            'LayoutTests/platform/qt': '4',
+            'LayoutTests/platform/chromium': '5',
+        })
+
+    def test_common_directory_includes_root(self):
+        # This test case checks that we don't throw an exception when we fail
+        # to optimize.
+        self._assertOptimizationFailed({
+            'LayoutTests/platform/gtk': 'e8608763f6241ddacdd5c1ef1973ba27177d0846',
+            'LayoutTests/platform/qt': 'bcbd457d545986b7abf1221655d722363079ac87',
+            'LayoutTests/platform/chromium-win': '3764ac11e1f9fbadd87a90a2e40278319190a0d3',
+            'LayoutTests/platform/mac': 'e8608763f6241ddacdd5c1ef1973ba27177d0846',
+        })
+
+        self._assertOptimization({
+            'LayoutTests/platform/chromium-win': '23a30302a6910f8a48b1007fa36f3e3158341834',
+            'LayoutTests': '9c876f8c3e4cc2aef9519a6c1174eb3432591127',
+            'LayoutTests/platform/chromium-mac': '23a30302a6910f8a48b1007fa36f3e3158341834',
+            'LayoutTests/platform/chromium': '1',
+        }, {
+            'LayoutTests/platform/chromium': '23a30302a6910f8a48b1007fa36f3e3158341834',
+            'LayoutTests': '9c876f8c3e4cc2aef9519a6c1174eb3432591127',
+        })
+
+    def test_complex_shadowing(self):
+        # This test relies on OS specific functionality, so it doesn't work on Windows.
+        # FIXME: What functionality does this rely on?  When can we remove this if?
+        if sys.platform == 'win32':
+            return
+        self._assertOptimization({
+            'LayoutTests/platform/chromium-win': '462d03b9c025db1b0392d7453310dbee5f9a9e74',
+            'LayoutTests/platform/mac': '5daa78e55f05d9f0d1bb1f32b0cd1bc3a01e9364',
+            'LayoutTests/platform/chromium-win-xp': '462d03b9c025db1b0392d7453310dbee5f9a9e74',
+            'LayoutTests/platform/mac-lion': '7ad045ece7c030e2283c5d21d9587be22bcba56e',
+            'LayoutTests/platform/chromium-win': 'f83af9732ce74f702b8c9c4a3d9a4c6636b8d3bd',
+            'LayoutTests/platform/win-xp': '5b1253ef4d5094530d5f1bc6cdb95c90b446bec7',
+            'LayoutTests/platform/chromium-linux': 'f52fcdde9e4be8bd5142171cd859230bd4471036',
+        }, {
+            'LayoutTests/platform/chromium-win': '462d03b9c025db1b0392d7453310dbee5f9a9e74',
+            'LayoutTests/platform/mac': '5daa78e55f05d9f0d1bb1f32b0cd1bc3a01e9364',
+            'LayoutTests/platform/chromium-win-xp': '462d03b9c025db1b0392d7453310dbee5f9a9e74',
+            'LayoutTests/platform/mac-lion': '7ad045ece7c030e2283c5d21d9587be22bcba56e',
+            'LayoutTests/platform/chromium-win': 'f83af9732ce74f702b8c9c4a3d9a4c6636b8d3bd',
+            'LayoutTests/platform/win-xp': '5b1253ef4d5094530d5f1bc6cdb95c90b446bec7',
+            'LayoutTests/platform/chromium-linux': 'f52fcdde9e4be8bd5142171cd859230bd4471036'
+        })
+
+    def test_virtual_ports_filtered(self):
+        self._assertOptimization({
+            'LayoutTests/platform/chromium-mac': '1',
+            'LayoutTests/platform/chromium-mac-snowleopard': '1',
+            'LayoutTests/platform/chromium-win': '2',
+            'LayoutTests/platform/gtk': '3',
+            'LayoutTests/platform/efl': '3',
+            'LayoutTests/platform/qt': '4',
+            'LayoutTests/platform/mac': '5',
+        }, {
+            'LayoutTests/platform/chromium-mac': '1',
+            'LayoutTests/platform/chromium-win': '2',
+            'LayoutTests': '3',
+            'LayoutTests/platform/qt': '4',
+            'LayoutTests/platform/mac': '5',
+        })
diff --git a/Tools/Scripts/webkitpy/common/checkout/changelog.py b/Tools/Scripts/webkitpy/common/checkout/changelog.py
new file mode 100644
index 0000000..ae7b71f
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/checkout/changelog.py
@@ -0,0 +1,377 @@
+# Copyright (C) 2009, Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# WebKit's Python module for parsing and modifying ChangeLog files
+
+import codecs
+import fileinput # inplace file editing for set_reviewer_in_changelog
+import re
+import textwrap
+
+from webkitpy.common.config.committers import CommitterList
+from webkitpy.common.config.committers import Account
+import webkitpy.common.config.urls as config_urls
+from webkitpy.common.system.deprecated_logging import log
+
+
+# FIXME: parse_bug_id_from_changelog should not be a free function.
+# Parse the bug ID out of a Changelog message based on the format that is
+# used by prepare-ChangeLog
+def parse_bug_id_from_changelog(message):
+    if not message:
+        return None
+    match = re.search("^\s*" + config_urls.bug_url_short + "$", message, re.MULTILINE)
+    if match:
+        return int(match.group('bug_id'))
+    match = re.search("^\s*" + config_urls.bug_url_long + "$", message, re.MULTILINE)
+    if match:
+        return int(match.group('bug_id'))
+    # We weren't able to find a bug URL in the format used by prepare-ChangeLog. Fall back to the
+    # first bug URL found anywhere in the message.
+    return config_urls.parse_bug_id(message)
+
+
+class ChangeLogEntry(object):
+    # e.g. 2009-06-03  Eric Seidel  <eric@webkit.org>
+    date_line_regexp = r'^(?P<date>\d{4}-\d{2}-\d{2})\s+(?P<authors>(?P<name>[^<]+?)\s+<(?P<email>[^<>]+)>.*?)$'
+
+    # e.g. * Source/WebCore/page/EventHandler.cpp: Implement FooBarQuux.
+    touched_files_regexp = r'^\s*\*\s*(?P<file>[A-Za-z0-9_\-\./\\]+)\s*\:'
+
+    # e.g. Reviewed by Darin Adler.
+    # (Discard everything after the first period to match more invalid lines.)
+    reviewed_by_regexp = r'^\s*((\w+\s+)+and\s+)?(Review|Rubber(\s*|-)stamp)(s|ed)?\s+([a-z]+\s+)*?by\s+(?P<reviewer>.*?)[\.,]?\s*$'
+
+    reviewed_byless_regexp = r'^\s*((Review|Rubber(\s*|-)stamp)(s|ed)?|RS)(\s+|\s*=\s*)(?P<reviewer>([A-Z]\w+\s*)+)[\.,]?\s*$'
+
+    reviewer_name_noise_regexp = re.compile(r"""
+    (\s+((tweaked\s+)?and\s+)?(landed|committed|okayed)\s+by.+) # "landed by", "commented by", etc...
+    |(^(Reviewed\s+)?by\s+) # extra "Reviewed by" or "by"
+    |([(<]\s*[\w_\-\.]+@[\w_\-\.]+[>)]) # email addresses
+    |([(<](https?://?bugs.)webkit.org[^>)]+[>)]) # bug url
+    |("[^"]+") # wresler names like 'Sean/Shawn/Shaun' in 'Geoffrey "Sean/Shawn/Shaun" Garen'
+    |('[^']+') # wresler names like "The Belly" in "Sam 'The Belly' Weinig"
+    |((Mr|Ms|Dr|Mrs|Prof)\.(\s+|$))
+    """, re.IGNORECASE | re.VERBOSE)
+
+    reviewer_name_casesensitive_noise_regexp = re.compile(r"""
+    ((\s+|^)(and\s+)?([a-z-]+\s+){5,}by\s+) # e.g. "and given a good once-over by"
+    |(\(\s*(?!(and|[A-Z])).+\)) # any parenthesis that doesn't start with "and" or a capital letter
+    |(with(\s+[a-z-]+)+) # phrases with "with no hesitation" in "Sam Weinig with no hesitation"
+    """, re.VERBOSE)
+
+    reviewer_name_noise_needing_a_backreference_regexp = re.compile(r"""
+    (\S\S)\.(?:(\s.+|$)) # Text after the two word characters (don't match initials) and a period followed by a space.
+    """, re.IGNORECASE | re.VERBOSE)
+
+    nobody_regexp = re.compile(r"""(\s+|^)nobody(
+    ((,|\s+-)?\s+(\w+\s+)+fix.*) # e.g. nobody, build fix...
+    |(\s*\([^)]+\).*) # NOBODY (..)...
+    |$)""", re.IGNORECASE | re.VERBOSE)
+
+    # e.g. == Rolled over to ChangeLog-2011-02-16 ==
+    rolled_over_regexp = r'^== Rolled over to ChangeLog-\d{4}-\d{2}-\d{2} ==$'
+
+    # e.g. git-svn-id: http://svn.webkit.org/repository/webkit/trunk@96161 268f45cc-cd09-0410-ab3c-d52691b4dbfc
+    svn_id_regexp = r'git-svn-id: http://svn.webkit.org/repository/webkit/trunk@(?P<svnid>\d+) '
+
+    def __init__(self, contents, committer_list=CommitterList(), revision=None):
+        self._contents = contents
+        self._committer_list = committer_list
+        self._revision = revision
+        self._parse_entry()
+
+    @staticmethod
+    def _parse_reviewer_text(text):
+        match = re.search(ChangeLogEntry.reviewed_by_regexp, text, re.MULTILINE | re.IGNORECASE)
+        if not match:
+            # There are cases where people omit "by". We match it only if reviewer part looked nice
+            # in order to avoid matching random lines that start with Reviewed
+            match = re.search(ChangeLogEntry.reviewed_byless_regexp, text, re.MULTILINE | re.IGNORECASE)
+        if not match:
+            return None, None
+
+        reviewer_text = match.group("reviewer")
+
+        reviewer_text = ChangeLogEntry.nobody_regexp.sub('', reviewer_text)
+        reviewer_text = ChangeLogEntry.reviewer_name_noise_regexp.sub('', reviewer_text)
+        reviewer_text = ChangeLogEntry.reviewer_name_casesensitive_noise_regexp.sub('', reviewer_text)
+        reviewer_text = ChangeLogEntry.reviewer_name_noise_needing_a_backreference_regexp.sub(r'\1', reviewer_text)
+        reviewer_text = reviewer_text.replace('(', '').replace(')', '')
+        reviewer_text = re.sub(r'\s\s+|[,.]\s*$', ' ', reviewer_text).strip()
+        if not len(reviewer_text):
+            return None, None
+
+        reviewer_list = ChangeLogEntry._split_contributor_names(reviewer_text)
+
+        # Get rid of "reviewers" like "even though this is just a..." in "Reviewed by Sam Weinig, even though this is just a..."
+        # and "who wrote the original code" in "Noam Rosenthal, who wrote the original code"
+        reviewer_list = [reviewer for reviewer in reviewer_list if not re.match('^who\s|^([a-z]+(\s+|\.|$)){6,}$', reviewer)]
+
+        return reviewer_text, reviewer_list
+
+    @staticmethod
+    def _split_contributor_names(text):
+        return re.split(r'\s*(?:,(?:\s+and\s+|&)?|(?:^|\s+)and\s+|&&|[/+&])\s*', text)
+
+    def _fuzz_match_reviewers(self, reviewers_text_list):
+        if not reviewers_text_list:
+            return []
+        list_of_reviewers = [self._committer_list.contributors_by_fuzzy_match(reviewer)[0] for reviewer in reviewers_text_list]
+        # Flatten lists and get rid of any reviewers with more than one candidate.
+        return [reviewers[0] for reviewers in list_of_reviewers if len(reviewers) == 1]
+
+    @staticmethod
+    def _parse_author_name_and_email(author_name_and_email):
+        match = re.match(r'(?P<name>.+?)\s+<(?P<email>[^>]+)>', author_name_and_email)
+        return {'name': match.group("name"), 'email': match.group("email")}
+
+    @staticmethod
+    def _parse_author_text(text):
+        if not text:
+            return []
+        authors = ChangeLogEntry._split_contributor_names(text)
+        assert(authors and len(authors) >= 1)
+        return [ChangeLogEntry._parse_author_name_and_email(author) for author in authors]
+
+    def _parse_entry(self):
+        match = re.match(self.date_line_regexp, self._contents, re.MULTILINE)
+        if not match:
+            log("WARNING: Creating invalid ChangeLogEntry:\n%s" % self._contents)
+
+        # FIXME: group("name") does not seem to be Unicode?  Probably due to self._contents not being unicode.
+        self._author_text = match.group("authors") if match else None
+        self._authors = ChangeLogEntry._parse_author_text(self._author_text)
+
+        self._reviewer_text, self._reviewers_text_list = ChangeLogEntry._parse_reviewer_text(self._contents)
+        self._reviewers = self._fuzz_match_reviewers(self._reviewers_text_list)
+        self._author = self._committer_list.contributor_by_email(self.author_email()) or self._committer_list.contributor_by_name(self.author_name())
+
+        self._touched_files = re.findall(self.touched_files_regexp, self._contents, re.MULTILINE)
+
+    def author_text(self):
+        return self._author_text
+
+    def revision(self):
+        return self._revision
+
+    def author_name(self):
+        return self._authors[0]['name']
+
+    def author_email(self):
+        return self._authors[0]['email']
+
+    def author(self):
+        return self._author  # Might be None
+
+    def authors(self):
+        return self._authors
+
+    # FIXME: Eventually we would like to map reviwer names to reviewer objects.
+    # See https://bugs.webkit.org/show_bug.cgi?id=26533
+    def reviewer_text(self):
+        return self._reviewer_text
+
+    # Might be None, might also not be a Reviewer!
+    def reviewer(self):
+        return self._reviewers[0] if len(self._reviewers) > 0 else None
+
+    def reviewers(self):
+        return self._reviewers
+
+    def has_valid_reviewer(self):
+        if self._reviewers_text_list:
+            for reviewer in self._reviewers_text_list:
+                reviewer = self._committer_list.committer_by_name(reviewer)
+                if reviewer:
+                    return True
+        return bool(re.search("unreviewed", self._contents, re.IGNORECASE))
+
+    def contents(self):
+        return self._contents
+
+    def bug_id(self):
+        return parse_bug_id_from_changelog(self._contents)
+
+    def touched_files(self):
+        return self._touched_files
+
+
+# FIXME: Various methods on ChangeLog should move into ChangeLogEntry instead.
+class ChangeLog(object):
+
+    def __init__(self, path):
+        self.path = path
+
+    _changelog_indent = " " * 8
+
+    @staticmethod
+    def parse_latest_entry_from_file(changelog_file):
+        """changelog_file must be a file-like object which returns
+        unicode strings.  Use codecs.open or StringIO(unicode())
+        to pass file objects to this class."""
+        date_line_regexp = re.compile(ChangeLogEntry.date_line_regexp)
+        rolled_over_regexp = re.compile(ChangeLogEntry.rolled_over_regexp)
+        entry_lines = []
+        # The first line should be a date line.
+        first_line = changelog_file.readline()
+        assert(isinstance(first_line, unicode))
+        if not date_line_regexp.match(first_line):
+            return None
+        entry_lines.append(first_line)
+
+        for line in changelog_file:
+            # If we've hit the next entry, return.
+            if date_line_regexp.match(line) or rolled_over_regexp.match(line):
+                # Remove the extra newline at the end
+                return ChangeLogEntry(''.join(entry_lines[:-1]))
+            entry_lines.append(line)
+        return None # We never found a date line!
+
+    svn_blame_regexp = re.compile(r'^(\s*(?P<revision>\d+) [^ ]+)\s*(?P<line>.*?\n)')
+
+    @staticmethod
+    def _separate_revision_and_line(line):
+        match = ChangeLog.svn_blame_regexp.match(line)
+        if not match:
+            return None, line
+        return int(match.group('revision')), match.group('line')
+
+    @staticmethod
+    def parse_entries_from_file(changelog_file):
+        """changelog_file must be a file-like object which returns
+        unicode strings.  Use codecs.open or StringIO(unicode())
+        to pass file objects to this class."""
+        date_line_regexp = re.compile(ChangeLogEntry.date_line_regexp)
+        rolled_over_regexp = re.compile(ChangeLogEntry.rolled_over_regexp)
+
+        # The first line should be a date line.
+        revision, first_line = ChangeLog._separate_revision_and_line(changelog_file.readline())
+        assert(isinstance(first_line, unicode))
+        if not date_line_regexp.match(ChangeLog.svn_blame_regexp.sub('', first_line)):
+            raise StopIteration
+
+        entry_lines = [first_line]
+        revisions_in_entry = {revision: 1} if revision != None else None
+        for line in changelog_file:
+            if revisions_in_entry:
+                revision, line = ChangeLog._separate_revision_and_line(line)
+
+            if rolled_over_regexp.match(line):
+                break
+
+            if date_line_regexp.match(line):
+                most_probable_revision = max(revisions_in_entry, key=revisions_in_entry.__getitem__) if revisions_in_entry else None
+                # Remove the extra newline at the end
+                yield ChangeLogEntry(''.join(entry_lines[:-1]), revision=most_probable_revision)
+                entry_lines = []
+                revisions_in_entry = {revision: 0}
+
+            entry_lines.append(line)
+            if revisions_in_entry:
+                revisions_in_entry[revision] = revisions_in_entry.get(revision, 0) + 1
+
+        most_probable_revision = max(revisions_in_entry, key=revisions_in_entry.__getitem__) if revisions_in_entry else None
+        yield ChangeLogEntry(''.join(entry_lines[:-1]), revision=most_probable_revision)
+
+    def latest_entry(self):
+        # ChangeLog files are always UTF-8, we read them in as such to support Reviewers with unicode in their names.
+        changelog_file = codecs.open(self.path, "r", "utf-8")
+        try:
+            return self.parse_latest_entry_from_file(changelog_file)
+        finally:
+            changelog_file.close()
+
+    # _wrap_line and _wrap_lines exist to work around
+    # http://bugs.python.org/issue1859
+
+    def _wrap_line(self, line):
+        return textwrap.fill(line,
+                             width=70,
+                             initial_indent=self._changelog_indent,
+                             # Don't break urls which may be longer than width.
+                             break_long_words=False,
+                             subsequent_indent=self._changelog_indent)
+
+    # Workaround as suggested by guido in
+    # http://bugs.python.org/issue1859#msg60040
+
+    def _wrap_lines(self, message):
+        lines = [self._wrap_line(line) for line in message.splitlines()]
+        return "\n".join(lines)
+
+    def update_with_unreviewed_message(self, message):
+        first_boilerplate_line_regexp = re.compile(
+                "%sNeed a short description \(OOPS!\)\." % self._changelog_indent)
+        removing_boilerplate = False
+        # inplace=1 creates a backup file and re-directs stdout to the file
+        for line in fileinput.FileInput(self.path, inplace=1):
+            if first_boilerplate_line_regexp.search(line):
+                message_lines = self._wrap_lines(message)
+                print first_boilerplate_line_regexp.sub(message_lines, line),
+                # Remove all the ChangeLog boilerplate before the first changed
+                # file.
+                removing_boilerplate = True
+            elif removing_boilerplate:
+                if line.find('*') >= 0: # each changed file is preceded by a *
+                    removing_boilerplate = False
+
+            if not removing_boilerplate:
+                print line,
+
+    def set_reviewer(self, reviewer):
+        latest_entry = self.latest_entry()
+        latest_entry_contents = latest_entry.contents()
+        reviewer_text = latest_entry.reviewer()
+        found_nobody = re.search("NOBODY\s*\(OOPS!\)", latest_entry_contents, re.MULTILINE)
+
+        if not found_nobody and not reviewer_text:
+            bug_url_number_of_items = len(re.findall(config_urls.bug_url_long, latest_entry_contents, re.MULTILINE))
+            bug_url_number_of_items += len(re.findall(config_urls.bug_url_short, latest_entry_contents, re.MULTILINE))
+            for line in fileinput.FileInput(self.path, inplace=1):
+                found_bug_url = re.search(config_urls.bug_url_long, line)
+                if not found_bug_url:
+                    found_bug_url = re.search(config_urls.bug_url_short, line)
+                print line,
+                if found_bug_url:
+                    if bug_url_number_of_items == 1:
+                        print "\n        Reviewed by %s." % (reviewer.encode("utf-8"))
+                    bug_url_number_of_items -= 1
+        else:
+            # inplace=1 creates a backup file and re-directs stdout to the file
+            for line in fileinput.FileInput(self.path, inplace=1):
+                # Trailing comma suppresses printing newline
+                print line.replace("NOBODY (OOPS!)", reviewer.encode("utf-8")),
+
+    def set_short_description_and_bug_url(self, short_description, bug_url):
+        message = "%s\n%s%s" % (short_description, self._changelog_indent, bug_url)
+        bug_boilerplate = "%sNeed the bug URL (OOPS!).\n" % self._changelog_indent
+        for line in fileinput.FileInput(self.path, inplace=1):
+            line = line.replace("Need a short description (OOPS!).", message.encode("utf-8"))
+            if line != bug_boilerplate:
+                print line,
diff --git a/Tools/Scripts/webkitpy/common/checkout/changelog_unittest.py b/Tools/Scripts/webkitpy/common/checkout/changelog_unittest.py
new file mode 100644
index 0000000..9591744
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/checkout/changelog_unittest.py
@@ -0,0 +1,585 @@
+# Copyright (C) 2009 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#    * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import codecs
+import os
+import tempfile
+import unittest
+
+from StringIO import StringIO
+
+from webkitpy.common.checkout.changelog import *
+
+
+class ChangeLogTest(unittest.TestCase):
+
+    _example_entry = u'''2009-08-17  Peter Kasting  <pkasting@google.com>
+
+        Reviewed by Tor Arne Vestb\xf8.
+
+        https://bugs.webkit.org/show_bug.cgi?id=27323
+        Only add Cygwin to the path when it isn't already there.  This avoids
+        causing problems for people who purposefully have non-Cygwin versions of
+        executables like svn in front of the Cygwin ones in their paths.
+
+        * DumpRenderTree/win/DumpRenderTree.vcproj:
+        * DumpRenderTree/win/ImageDiff.vcproj:
+        * DumpRenderTree/win/TestNetscapePlugin/TestNetscapePlugin.vcproj:
+'''
+
+    _rolled_over_footer = '== Rolled over to ChangeLog-2009-06-16 =='
+
+    # More example text than we need.  Eventually we need to support parsing this all and write tests for the parsing.
+    _example_changelog = u"""2009-08-17  Tor Arne Vestb\xf8  <vestbo@webkit.org>
+
+        <http://webkit.org/b/28393> check-webkit-style: add check for use of std::max()/std::min() instead of MAX()/MIN()
+
+        Reviewed by David Levin.
+
+        * Scripts/modules/cpp_style.py:
+        (_ERROR_CATEGORIES): Added 'runtime/max_min_macros'.
+        (check_max_min_macros): Added.  Returns level 4 error when MAX()
+        and MIN() macros are used in header files and C++ source files.
+        (check_style): Added call to check_max_min_macros().
+        * Scripts/modules/cpp_style_unittest.py: Added unit tests.
+        (test_max_macro): Added.
+        (test_min_macro): Added.
+
+2009-08-16  David Kilzer  <ddkilzer@apple.com>
+
+        Backed out r47343 which was mistakenly committed
+
+        * Scripts/bugzilla-tool:
+        * Scripts/modules/scm.py:
+
+2009-06-18  Darin Adler  <darin@apple.com>
+
+        Rubber stamped by Mark Rowe.
+
+        * DumpRenderTree/mac/DumpRenderTreeWindow.mm:
+        (-[DumpRenderTreeWindow close]): Resolved crashes seen during regression
+        tests. The close method can be called on a window that's already closed
+        so we can't assert here.
+
+2011-11-04  Benjamin Poulain  <bpoulain@apple.com>
+
+        [Mac] ResourceRequest's nsURLRequest() does not differentiate null and empty URLs with CFNetwork
+        https://bugs.webkit.org/show_bug.cgi?id=71539
+
+        Reviewed by David Kilzer.
+
+        In order to have CFURL and NSURL to be consistent when both are used on Mac,
+        KURL::createCFURL() is changed to support empty URL values.
+
+        * This change log entry is made up to test _parse_entry:
+            * a list of things
+
+        * platform/cf/KURLCFNet.cpp:
+        (WebCore::createCFURLFromBuffer):
+        (WebCore::KURL::createCFURL):
+        * platform/mac/KURLMac.mm :
+        (WebCore::KURL::operator NSURL *):
+        (WebCore::KURL::createCFURL):
+        * WebCoreSupport/ChromeClientEfl.cpp:
+        (WebCore::ChromeClientEfl::closeWindowSoon): call new function and moves its
+        previous functionality there.
+        * ewk/ewk_private.h:
+        * ewk/ewk_view.cpp:
+
+2011-03-02  Carol Szabo  <carol.szabo@nokia.com>
+
+        Reviewed by David Hyatt  <hyatt@apple.com>
+
+        content property doesn't support quotes
+        https://bugs.webkit.org/show_bug.cgi?id=6503
+
+        Added full support for quotes as defined by CSS 2.1.
+
+        Tests: fast/css/content/content-quotes-01.html
+               fast/css/content/content-quotes-02.html
+               fast/css/content/content-quotes-03.html
+               fast/css/content/content-quotes-04.html
+               fast/css/content/content-quotes-05.html
+               fast/css/content/content-quotes-06.html
+
+2011-03-31  Brent Fulgham  <bfulgham@webkit.org>
+
+       Reviewed Adam Roben.
+
+       [WinCairo] Implement Missing drawWindowsBitmap method.
+       https://bugs.webkit.org/show_bug.cgi?id=57409
+
+2011-03-28  Dirk Pranke  <dpranke@chromium.org>
+
+       RS=Tony Chang.
+
+       r81977 moved FontPlatformData.h from
+       WebCore/platform/graphics/cocoa to platform/graphics. This
+       change updates the chromium build accordingly.
+
+       https://bugs.webkit.org/show_bug.cgi?id=57281
+
+       * platform/graphics/chromium/CrossProcessFontLoading.mm:
+
+2011-05-04  Alexis Menard  <alexis.menard@openbossa.org>
+
+       Unreviewed warning fix.
+
+       The variable is just used in the ASSERT macro. Let's use ASSERT_UNUSED to avoid
+       a warning in Release build.
+
+       * accessibility/AccessibilityRenderObject.cpp:
+       (WebCore::lastChildConsideringContinuation):
+
+2011-10-11  Antti Koivisto  <antti@apple.com>
+
+       Resolve regular and visited link style in a single pass
+       https://bugs.webkit.org/show_bug.cgi?id=69838
+
+       Reviewed by Darin Adler
+
+       We can simplify and speed up selector matching by removing the recursive matching done
+       to generate the style for the :visited pseudo selector. Both regular and visited link style
+       can be generated in a single pass through the style selector.
+
+== Rolled over to ChangeLog-2009-06-16 ==
+"""
+
+    def test_parse_bug_id_from_changelog(self):
+        commit_text = '''
+2011-03-23  Ojan Vafai  <ojan@chromium.org>
+
+        Add failing result for WebKit2. All tests that require
+        focus fail on WebKit2. See https://bugs.webkit.org/show_bug.cgi?id=56988.
+
+        * platform/mac-wk2/fast/css/pseudo-any-expected.txt: Added.
+
+        '''
+
+        self.assertEquals(56988, parse_bug_id_from_changelog(commit_text))
+
+        commit_text = '''
+2011-03-23  Ojan Vafai  <ojan@chromium.org>
+
+        Add failing result for WebKit2. All tests that require
+        focus fail on WebKit2. See https://bugs.webkit.org/show_bug.cgi?id=56988.
+        https://bugs.webkit.org/show_bug.cgi?id=12345
+
+        * platform/mac-wk2/fast/css/pseudo-any-expected.txt: Added.
+
+        '''
+
+        self.assertEquals(12345, parse_bug_id_from_changelog(commit_text))
+
+        commit_text = '''
+2011-03-31  Adam Roben  <aroben@apple.com>
+
+        Quote the executable path we pass to ::CreateProcessW
+
+        This will ensure that spaces in the path will be interpreted correctly.
+
+        Fixes <http://webkit.org/b/57569> Web process sometimes fails to launch when there are
+        spaces in its path
+
+        Reviewed by Steve Falkenburg.
+
+        * UIProcess/Launcher/win/ProcessLauncherWin.cpp:
+        (WebKit::ProcessLauncher::launchProcess): Surround the executable path in quotes.
+
+        '''
+
+        self.assertEquals(57569, parse_bug_id_from_changelog(commit_text))
+
+        commit_text = '''
+2011-03-29  Timothy Hatcher  <timothy@apple.com>
+
+        Update WebCore Localizable.strings to contain WebCore, WebKit/mac and WebKit2 strings.
+
+        https://webkit.org/b/57354
+
+        Reviewed by Sam Weinig.
+
+        * English.lproj/Localizable.strings: Updated.
+        * StringsNotToBeLocalized.txt: Removed. To hard to maintain in WebCore.
+        * platform/network/cf/LoaderRunLoopCF.h: Remove a single quote in an #error so
+        extract-localizable-strings does not complain about unbalanced single quotes.
+        '''
+
+        self.assertEquals(57354, parse_bug_id_from_changelog(commit_text))
+
+    def test_parse_log_entries_from_changelog(self):
+        changelog_file = StringIO(self._example_changelog)
+        parsed_entries = list(ChangeLog.parse_entries_from_file(changelog_file))
+        self.assertEquals(len(parsed_entries), 9)
+        self.assertEquals(parsed_entries[0].reviewer_text(), "David Levin")
+        self.assertEquals(parsed_entries[1].author_email(), "ddkilzer@apple.com")
+        self.assertEquals(parsed_entries[2].reviewer_text(), "Mark Rowe")
+        self.assertEquals(parsed_entries[2].touched_files(), ["DumpRenderTree/mac/DumpRenderTreeWindow.mm"])
+        self.assertEquals(parsed_entries[3].author_name(), "Benjamin Poulain")
+        self.assertEquals(parsed_entries[3].touched_files(), ["platform/cf/KURLCFNet.cpp", "platform/mac/KURLMac.mm",
+            "WebCoreSupport/ChromeClientEfl.cpp", "ewk/ewk_private.h", "ewk/ewk_view.cpp"])
+        self.assertEquals(parsed_entries[4].reviewer_text(), "David Hyatt")
+        self.assertEquals(parsed_entries[5].reviewer_text(), "Adam Roben")
+        self.assertEquals(parsed_entries[6].reviewer_text(), "Tony Chang")
+        self.assertEquals(parsed_entries[7].reviewer_text(), None)
+        self.assertEquals(parsed_entries[8].reviewer_text(), 'Darin Adler')
+
+    def test_parse_log_entries_from_annotated_file(self):
+        # Note that there are trailing spaces on some of the lines intentionally.
+        changelog_file = StringIO(u"100000 ossy@webkit.org 2011-11-11  Csaba Osztrogon\u00e1c  <ossy@webkit.org>\n"
+            u"100000 ossy@webkit.org\n"
+            u"100000 ossy@webkit.org         100,000 !!!\n"
+            u"100000 ossy@webkit.org \n"
+            u"100000 ossy@webkit.org         Reviewed by Zoltan Herczeg.\n"
+            u"100000 ossy@webkit.org \n"
+            u"100000 ossy@webkit.org         * ChangeLog: Point out revision 100,000.\n"
+            u"100000 ossy@webkit.org \n"
+            u"93798 ap@apple.com 2011-08-25  Alexey Proskuryakov  <ap@apple.com>\n"
+            u"93798 ap@apple.com \n"
+            u"93798 ap@apple.com         Fix build when GCC 4.2 is not installed.\n"
+            u"93798 ap@apple.com \n"
+            u"93798 ap@apple.com         * gtest/xcode/Config/CompilerVersion.xcconfig: Copied from Source/WebCore/Configurations/CompilerVersion.xcconfig.\n"
+            u"93798 ap@apple.com         * gtest/xcode/Config/General.xcconfig:\n"
+            u"93798 ap@apple.com         Use the same compiler version as other projects do.\n"
+            u"93798 ap@apple.com\n"
+            u"99491 andreas.kling@nokia.com 2011-11-03  Andreas Kling  <kling@webkit.org>\n"
+            u"99491 andreas.kling@nokia.com \n"
+            u"99190 andreas.kling@nokia.com         Unreviewed build fix, sigh.\n"
+            u"99190 andreas.kling@nokia.com \n"
+            u"99190 andreas.kling@nokia.com         * css/CSSFontFaceRule.h:\n"
+            u"99190 andreas.kling@nokia.com         * css/CSSMutableStyleDeclaration.h:\n"
+            u"99190 andreas.kling@nokia.com\n"
+            u"99190 andreas.kling@nokia.com 2011-11-03  Andreas Kling  <kling@webkit.org>\n"
+            u"99190 andreas.kling@nokia.com \n"
+            u"99187 andreas.kling@nokia.com         Unreviewed build fix, out-of-line StyleSheet::parentStyleSheet()\n"
+            u"99187 andreas.kling@nokia.com         again since there's a cycle in the includes between CSSRule/StyleSheet.\n"
+            u"99187 andreas.kling@nokia.com \n"
+            u"99187 andreas.kling@nokia.com         * css/StyleSheet.cpp:\n"
+            u"99187 andreas.kling@nokia.com         (WebCore::StyleSheet::parentStyleSheet):\n"
+            u"99187 andreas.kling@nokia.com         * css/StyleSheet.h:\n"
+            u"99187 andreas.kling@nokia.com \n")
+
+        parsed_entries = list(ChangeLog.parse_entries_from_file(changelog_file))
+        self.assertEquals(parsed_entries[0].revision(), 100000)
+        self.assertEquals(parsed_entries[0].reviewer_text(), "Zoltan Herczeg")
+        self.assertEquals(parsed_entries[0].author_name(), u"Csaba Osztrogon\u00e1c")
+        self.assertEquals(parsed_entries[0].author_email(), "ossy@webkit.org")
+        self.assertEquals(parsed_entries[1].revision(), 93798)
+        self.assertEquals(parsed_entries[1].author_name(), "Alexey Proskuryakov")
+        self.assertEquals(parsed_entries[2].revision(), 99190)
+        self.assertEquals(parsed_entries[2].author_name(), "Andreas Kling")
+        self.assertEquals(parsed_entries[3].revision(), 99187)
+        self.assertEquals(parsed_entries[3].author_name(), "Andreas Kling")
+
+    def _assert_parse_reviewer_text_and_list(self, text, expected_reviewer_text, expected_reviewer_text_list=None):
+        reviewer_text, reviewer_text_list = ChangeLogEntry._parse_reviewer_text(text)
+        self.assertEquals(reviewer_text, expected_reviewer_text)
+        if expected_reviewer_text_list:
+            self.assertEquals(reviewer_text_list, expected_reviewer_text_list)
+        else:
+            self.assertEquals(reviewer_text_list, [expected_reviewer_text])
+
+    def _assert_parse_reviewer_text_list(self, text, expected_reviewer_text_list):
+        reviewer_text, reviewer_text_list = ChangeLogEntry._parse_reviewer_text(text)
+        self.assertEquals(reviewer_text_list, expected_reviewer_text_list)
+
+    def test_parse_reviewer_text(self):
+        self._assert_parse_reviewer_text_and_list('  reviewed  by Ryosuke Niwa,   Oliver Hunt, and  Dimitri Glazkov',
+            'Ryosuke Niwa, Oliver Hunt, and Dimitri Glazkov', ['Ryosuke Niwa', 'Oliver Hunt', 'Dimitri Glazkov'])
+        self._assert_parse_reviewer_text_and_list('Reviewed by Brady Eidson and David Levin, landed by Brady Eidson',
+            'Brady Eidson and David Levin', ['Brady Eidson', 'David Levin'])
+
+        self._assert_parse_reviewer_text_and_list('Reviewed by Simon Fraser. Committed by Beth Dakin.', 'Simon Fraser')
+        self._assert_parse_reviewer_text_and_list('Reviewed by Geoff Garen. V8 fixes courtesy of Dmitry Titov.', 'Geoff Garen')
+        self._assert_parse_reviewer_text_and_list('Reviewed by Adam Roben&Dirk Schulze', 'Adam Roben&Dirk Schulze', ['Adam Roben', 'Dirk Schulze'])
+        self._assert_parse_reviewer_text_and_list('Rubber stamps by Darin Adler & Sam Weinig.', 'Darin Adler & Sam Weinig', ['Darin Adler', 'Sam Weinig'])
+
+        self._assert_parse_reviewer_text_and_list('Reviewed by adam,andy and andy adam, andy smith',
+            'adam,andy and andy adam, andy smith', ['adam', 'andy', 'andy adam', 'andy smith'])
+
+        self._assert_parse_reviewer_text_and_list('rubber stamped by Oliver Hunt (oliver@apple.com) and Darin Adler (darin@apple.com)',
+            'Oliver Hunt and Darin Adler', ['Oliver Hunt', 'Darin Adler'])
+
+        self._assert_parse_reviewer_text_and_list('rubber  Stamped by David Hyatt  <hyatt@apple.com>', 'David Hyatt')
+        self._assert_parse_reviewer_text_and_list('Rubber-stamped by Antti Koivisto.', 'Antti Koivisto')
+        self._assert_parse_reviewer_text_and_list('Rubberstamped by Dan Bernstein.', 'Dan Bernstein')
+        self._assert_parse_reviewer_text_and_list('Reviews by Ryosuke Niwa', 'Ryosuke Niwa')
+        self._assert_parse_reviewer_text_and_list('Reviews Ryosuke Niwa', 'Ryosuke Niwa')
+        self._assert_parse_reviewer_text_and_list('Rubberstamp Ryosuke Niwa', 'Ryosuke Niwa')
+        self._assert_parse_reviewer_text_and_list('Typed and reviewed by Alexey Proskuryakov.', 'Alexey Proskuryakov')
+        self._assert_parse_reviewer_text_and_list('Reviewed and landed by Brady Eidson', 'Brady Eidson')
+        self._assert_parse_reviewer_text_and_list('Reviewed by rniwa@webkit.org.', 'rniwa@webkit.org')
+        self._assert_parse_reviewer_text_and_list('Reviewed by Dirk Schulze / Darin Adler.', 'Dirk Schulze / Darin Adler', ['Dirk Schulze', 'Darin Adler'])
+        self._assert_parse_reviewer_text_and_list('Reviewed by Sam Weinig + Oliver Hunt.', 'Sam Weinig + Oliver Hunt', ['Sam Weinig', 'Oliver Hunt'])
+
+        self._assert_parse_reviewer_text_list('Reviewed by Sam Weinig, and given a good once-over by Jeff Miller.', ['Sam Weinig', 'Jeff Miller'])
+        self._assert_parse_reviewer_text_list(' Reviewed by Sam Weinig, even though this is just a...', ['Sam Weinig'])
+        self._assert_parse_reviewer_text_list('Rubber stamped by by Gustavo Noronha Silva', ['Gustavo Noronha Silva'])
+        self._assert_parse_reviewer_text_list('Rubberstamped by Noam Rosenthal, who wrote the original code.', ['Noam Rosenthal'])
+        self._assert_parse_reviewer_text_list('Reviewed by Dan Bernstein (relanding of r47157)', ['Dan Bernstein'])
+        self._assert_parse_reviewer_text_list('Reviewed by Geoffrey "Sean/Shawn/Shaun" Garen', ['Geoffrey Garen'])
+        self._assert_parse_reviewer_text_list('Reviewed by Dave "Messy" Hyatt.', ['Dave Hyatt'])
+        self._assert_parse_reviewer_text_list('Reviewed by Sam \'The Belly\' Weinig', ['Sam Weinig'])
+        self._assert_parse_reviewer_text_list('Rubber-stamped by David "I\'d prefer not" Hyatt.', ['David Hyatt'])
+        self._assert_parse_reviewer_text_list('Reviewed by Mr. Geoffrey Garen.', ['Geoffrey Garen'])
+        self._assert_parse_reviewer_text_list('Reviewed by Darin (ages ago)', ['Darin'])
+        self._assert_parse_reviewer_text_list('Reviewed by Sam Weinig (except for a few comment and header tweaks).', ['Sam Weinig'])
+        self._assert_parse_reviewer_text_list('Reviewed by Sam Weinig (all but the FormDataListItem rename)', ['Sam Weinig'])
+        self._assert_parse_reviewer_text_list('Reviewed by Darin Adler, tweaked and landed by Beth.', ['Darin Adler'])
+        self._assert_parse_reviewer_text_list('Reviewed by Sam Weinig with no hesitation', ['Sam Weinig'])
+        self._assert_parse_reviewer_text_list('Reviewed by Oliver Hunt, okayed by Darin Adler.', ['Oliver Hunt'])
+        self._assert_parse_reviewer_text_list('Reviewed by Darin Adler).', ['Darin Adler'])
+
+        # For now, we let unofficial reviewers recognized as reviewers
+        self._assert_parse_reviewer_text_list('Reviewed by Sam Weinig, Anders Carlsson, and (unofficially) Adam Barth.',
+            ['Sam Weinig', 'Anders Carlsson', 'Adam Barth'])
+
+        self._assert_parse_reviewer_text_list('Reviewed by NOBODY.', None)
+        self._assert_parse_reviewer_text_list('Reviewed by NOBODY - Build Fix.', None)
+        self._assert_parse_reviewer_text_list('Reviewed by NOBODY, layout tests fix.', None)
+        self._assert_parse_reviewer_text_list('Reviewed by NOBODY (Qt build fix pt 2).', None)
+        self._assert_parse_reviewer_text_list('Reviewed by NOBODY(rollout)', None)
+        self._assert_parse_reviewer_text_list('Reviewed by NOBODY (Build fix, forgot to svn add this file)', None)
+        self._assert_parse_reviewer_text_list('Reviewed by nobody (trivial follow up fix), Joseph Pecoraro LGTM-ed.', None)
+
+    def _entry_with_author(self, author_text):
+        return ChangeLogEntry('''2009-08-19  AUTHOR_TEXT
+
+            Reviewed by Ryosuke Niwa
+
+            * Scripts/bugzilla-tool:
+'''.replace("AUTHOR_TEXT", author_text))
+
+    def _entry_with_reviewer(self, reviewer_line):
+        return ChangeLogEntry('''2009-08-19  Eric Seidel  <eric@webkit.org>
+
+            REVIEW_LINE
+
+            * Scripts/bugzilla-tool:
+'''.replace("REVIEW_LINE", reviewer_line))
+
+    def _contributors(self, names):
+        return [CommitterList().contributor_by_name(name) for name in names]
+
+    def _assert_fuzzy_reviewer_match(self, reviewer_text, expected_text_list, expected_contributors):
+        unused, reviewer_text_list = ChangeLogEntry._parse_reviewer_text(reviewer_text)
+        self.assertEquals(reviewer_text_list, expected_text_list)
+        self.assertEquals(self._entry_with_reviewer(reviewer_text).reviewers(), self._contributors(expected_contributors))
+
+    def test_fuzzy_reviewer_match__none(self):
+        self._assert_fuzzy_reviewer_match('Reviewed by BUILD FIX', ['BUILD FIX'], [])
+        self._assert_fuzzy_reviewer_match('Reviewed by Mac build fix', ['Mac build fix'], [])
+
+    def test_fuzzy_reviewer_match_adam_barth(self):
+        self._assert_fuzzy_reviewer_match('Reviewed by Adam Barth.:w', ['Adam Barth.:w'], ['Adam Barth'])
+
+    def test_fuzzy_reviewer_match_darin_adler_et_al(self):
+        self._assert_fuzzy_reviewer_match('Reviewed by Darin Adler in <https://bugs.webkit.org/show_bug.cgi?id=47736>.', ['Darin Adler in'], ['Darin Adler'])
+        self._assert_fuzzy_reviewer_match('Reviewed by Darin Adler, Dan Bernstein, Adele Peterson, and others.',
+            ['Darin Adler', 'Dan Bernstein', 'Adele Peterson', 'others'], ['Darin Adler', 'Dan Bernstein', 'Adele Peterson'])
+
+    def test_fuzzy_reviewer_match_dimitri_glazkov(self):
+        self._assert_fuzzy_reviewer_match('Reviewed by Dimitri Glazkov, build fix', ['Dimitri Glazkov', 'build fix'], ['Dimitri Glazkov'])
+
+    def test_fuzzy_reviewer_match_george_staikos(self):
+        self._assert_fuzzy_reviewer_match('Reviewed by George Staikos (and others)', ['George Staikos', 'others'], ['George Staikos'])
+
+    def test_fuzzy_reviewer_match_mark_rowe(self):
+        self._assert_fuzzy_reviewer_match('Reviewed by Mark Rowe, but Dan Bernstein also reviewed and asked thoughtful questions.',
+            ['Mark Rowe', 'but Dan Bernstein also reviewed', 'asked thoughtful questions'], ['Mark Rowe'])
+
+    def test_fuzzy_reviewer_match_initial(self):
+        self._assert_fuzzy_reviewer_match('Reviewed by Alejandro G. Castro.',
+            ['Alejandro G. Castro'], ['Alejandro G. Castro'])
+        self._assert_fuzzy_reviewer_match('Reviewed by G. Alejandro G. Castro and others.',
+            ['G. Alejandro G. Castro', 'others'], ['Alejandro G. Castro'])
+
+        # If a reviewer has a name that ended with an initial, the regular expression
+        # will incorrectly trim the last period, but it will still match fuzzily to
+        # the full reviewer name.
+        self._assert_fuzzy_reviewer_match('Reviewed by G. Alejandro G. G. Castro G.',
+            ['G. Alejandro G. G. Castro G'], ['Alejandro G. Castro'])
+
+    def _assert_parse_authors(self, author_text, expected_contributors):
+        parsed_authors = [(author['name'], author['email']) for author in self._entry_with_author(author_text).authors()]
+        self.assertEquals(parsed_authors, expected_contributors)
+
+    def test_parse_authors(self):
+        self._assert_parse_authors(u'Aaron Colwell  <acolwell@chromium.org>', [(u'Aaron Colwell', u'acolwell@chromium.org')])
+        self._assert_parse_authors('Eric Seidel  <eric@webkit.org>, Ryosuke Niwa  <rniwa@webkit.org>',
+            [('Eric Seidel', 'eric@webkit.org'), ('Ryosuke Niwa', 'rniwa@webkit.org')])
+        self._assert_parse_authors('Zan Dobersek  <zandobersek@gmail.com> and Philippe Normand  <pnormand@igalia.com>',
+            [('Zan Dobersek', 'zandobersek@gmail.com'), ('Philippe Normand', 'pnormand@igalia.com')])
+        self._assert_parse_authors('New Contributor  <new@webkit.org> and Noob  <noob@webkit.org>',
+            [('New Contributor', 'new@webkit.org'), ('Noob', 'noob@webkit.org')])
+        self._assert_parse_authors('Adam Barth  <abarth@webkit.org> && Benjamin Poulain  <bpoulain@apple.com>',
+            [('Adam Barth', 'abarth@webkit.org'), ('Benjamin Poulain', 'bpoulain@apple.com')])
+
+    def _assert_has_valid_reviewer(self, reviewer_line, expected):
+        self.assertEqual(self._entry_with_reviewer(reviewer_line).has_valid_reviewer(), expected)
+
+    def test_has_valid_reviewer(self):
+        self._assert_has_valid_reviewer("Reviewed by Eric Seidel.", True)
+        self._assert_has_valid_reviewer("Reviewed by Eric Seidel", True)  # Not picky about the '.'
+        self._assert_has_valid_reviewer("Reviewed by Eric.", False)
+        self._assert_has_valid_reviewer("Reviewed by Eric C Seidel.", False)
+        self._assert_has_valid_reviewer("Rubber-stamped by Eric.", False)
+        self._assert_has_valid_reviewer("Rubber-stamped by Eric Seidel.", True)
+        self._assert_has_valid_reviewer("Rubber stamped by Eric.", False)
+        self._assert_has_valid_reviewer("Rubber stamped by Eric Seidel.", True)
+        self._assert_has_valid_reviewer("Unreviewed build fix.", True)
+
+    def test_latest_entry_parse(self):
+        changelog_contents = u"%s\n%s" % (self._example_entry, self._example_changelog)
+        changelog_file = StringIO(changelog_contents)
+        latest_entry = ChangeLog.parse_latest_entry_from_file(changelog_file)
+        self.assertEquals(latest_entry.contents(), self._example_entry)
+        self.assertEquals(latest_entry.author_name(), "Peter Kasting")
+        self.assertEquals(latest_entry.author_email(), "pkasting@google.com")
+        self.assertEquals(latest_entry.reviewer_text(), u"Tor Arne Vestb\xf8")
+        self.assertEquals(latest_entry.touched_files(), ["DumpRenderTree/win/DumpRenderTree.vcproj", "DumpRenderTree/win/ImageDiff.vcproj", "DumpRenderTree/win/TestNetscapePlugin/TestNetscapePlugin.vcproj"])
+
+        self.assertTrue(latest_entry.reviewer())  # Make sure that our UTF8-based lookup of Tor works.
+
+    def test_latest_entry_parse_single_entry(self):
+        changelog_contents = u"%s\n%s" % (self._example_entry, self._rolled_over_footer)
+        changelog_file = StringIO(changelog_contents)
+        latest_entry = ChangeLog.parse_latest_entry_from_file(changelog_file)
+        self.assertEquals(latest_entry.contents(), self._example_entry)
+        self.assertEquals(latest_entry.author_name(), "Peter Kasting")
+
+    @staticmethod
+    def _write_tmp_file_with_contents(byte_array):
+        assert(isinstance(byte_array, str))
+        (file_descriptor, file_path) = tempfile.mkstemp() # NamedTemporaryFile always deletes the file on close in python < 2.6
+        with os.fdopen(file_descriptor, "w") as file:
+            file.write(byte_array)
+        return file_path
+
+    @staticmethod
+    def _read_file_contents(file_path, encoding):
+        with codecs.open(file_path, "r", encoding) as file:
+            return file.read()
+
+    # FIXME: We really should be getting this from prepare-ChangeLog itself.
+    _new_entry_boilerplate = '''2009-08-19  Eric Seidel  <eric@webkit.org>
+
+        Need a short description (OOPS!).
+        Need the bug URL (OOPS!).
+
+        Reviewed by NOBODY (OOPS!).
+
+        * Scripts/bugzilla-tool:
+'''
+
+    _new_entry_boilerplate_with_bugurl = '''2009-08-19  Eric Seidel  <eric@webkit.org>
+
+        Need a short description (OOPS!).
+        https://bugs.webkit.org/show_bug.cgi?id=12345
+
+        Reviewed by NOBODY (OOPS!).
+
+        * Scripts/bugzilla-tool:
+'''
+
+    _new_entry_boilerplate_with_multiple_bugurl = '''2009-08-19  Eric Seidel  <eric@webkit.org>
+
+        Need a short description (OOPS!).
+        https://bugs.webkit.org/show_bug.cgi?id=12345
+        http://webkit.org/b/12345
+
+        Reviewed by NOBODY (OOPS!).
+
+        * Scripts/bugzilla-tool:
+'''
+
+    _new_entry_boilerplate_without_reviewer_line = '''2009-08-19  Eric Seidel  <eric@webkit.org>
+
+        Need a short description (OOPS!).
+        https://bugs.webkit.org/show_bug.cgi?id=12345
+
+        * Scripts/bugzilla-tool:
+'''
+
+    _new_entry_boilerplate_without_reviewer_multiple_bugurl = '''2009-08-19  Eric Seidel  <eric@webkit.org>
+
+        Need a short description (OOPS!).
+        https://bugs.webkit.org/show_bug.cgi?id=12345
+        http://webkit.org/b/12345
+
+        * Scripts/bugzilla-tool:
+'''
+
+    def test_set_reviewer(self):
+        changelog_contents = u"%s\n%s" % (self._new_entry_boilerplate_with_bugurl, self._example_changelog)
+        changelog_path = self._write_tmp_file_with_contents(changelog_contents.encode("utf-8"))
+        reviewer_name = 'Test Reviewer'
+        ChangeLog(changelog_path).set_reviewer(reviewer_name)
+        actual_contents = self._read_file_contents(changelog_path, "utf-8")
+        expected_contents = changelog_contents.replace('NOBODY (OOPS!)', reviewer_name)
+        os.remove(changelog_path)
+        self.assertEquals(actual_contents.splitlines(), expected_contents.splitlines())
+
+        changelog_contents_without_reviewer_line = u"%s\n%s" % (self._new_entry_boilerplate_without_reviewer_line, self._example_changelog)
+        changelog_path = self._write_tmp_file_with_contents(changelog_contents_without_reviewer_line.encode("utf-8"))
+        ChangeLog(changelog_path).set_reviewer(reviewer_name)
+        actual_contents = self._read_file_contents(changelog_path, "utf-8")
+        os.remove(changelog_path)
+        self.assertEquals(actual_contents.splitlines(), expected_contents.splitlines())
+
+        changelog_contents_without_reviewer_line = u"%s\n%s" % (self._new_entry_boilerplate_without_reviewer_multiple_bugurl, self._example_changelog)
+        changelog_path = self._write_tmp_file_with_contents(changelog_contents_without_reviewer_line.encode("utf-8"))
+        ChangeLog(changelog_path).set_reviewer(reviewer_name)
+        actual_contents = self._read_file_contents(changelog_path, "utf-8")
+        changelog_contents = u"%s\n%s" % (self._new_entry_boilerplate_with_multiple_bugurl, self._example_changelog)
+        expected_contents = changelog_contents.replace('NOBODY (OOPS!)', reviewer_name)
+        os.remove(changelog_path)
+        self.assertEquals(actual_contents.splitlines(), expected_contents.splitlines())
+
+    def test_set_short_description_and_bug_url(self):
+        changelog_contents = u"%s\n%s" % (self._new_entry_boilerplate_with_bugurl, self._example_changelog)
+        changelog_path = self._write_tmp_file_with_contents(changelog_contents.encode("utf-8"))
+        short_description = "A short description"
+        bug_url = "http://example.com/b/2344"
+        ChangeLog(changelog_path).set_short_description_and_bug_url(short_description, bug_url)
+        actual_contents = self._read_file_contents(changelog_path, "utf-8")
+        expected_message = "%s\n        %s" % (short_description, bug_url)
+        expected_contents = changelog_contents.replace("Need a short description (OOPS!).", expected_message)
+        os.remove(changelog_path)
+        self.assertEquals(actual_contents.splitlines(), expected_contents.splitlines())
+
+        changelog_contents = u"%s\n%s" % (self._new_entry_boilerplate, self._example_changelog)
+        changelog_path = self._write_tmp_file_with_contents(changelog_contents.encode("utf-8"))
+        short_description = "A short description 2"
+        bug_url = "http://example.com/b/2345"
+        ChangeLog(changelog_path).set_short_description_and_bug_url(short_description, bug_url)
+        actual_contents = self._read_file_contents(changelog_path, "utf-8")
+        expected_message = "%s\n        %s" % (short_description, bug_url)
+        expected_contents = changelog_contents.replace("Need a short description (OOPS!).\n        Need the bug URL (OOPS!).", expected_message)
+        os.remove(changelog_path)
+        self.assertEquals(actual_contents.splitlines(), expected_contents.splitlines())
diff --git a/Tools/Scripts/webkitpy/common/checkout/checkout.py b/Tools/Scripts/webkitpy/common/checkout/checkout.py
new file mode 100644
index 0000000..8f45024
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/checkout/checkout.py
@@ -0,0 +1,178 @@
+# Copyright (c) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import StringIO
+
+from webkitpy.common.config import urls
+from webkitpy.common.checkout.changelog import ChangeLog, parse_bug_id_from_changelog
+from webkitpy.common.checkout.commitinfo import CommitInfo
+from webkitpy.common.checkout.scm import CommitMessage
+from webkitpy.common.checkout.deps import DEPS
+from webkitpy.common.memoized import memoized
+from webkitpy.common.system.executive import ScriptError
+from webkitpy.common.system.deprecated_logging import log
+
+
+# This class represents the WebKit-specific parts of the checkout (like ChangeLogs).
+# FIXME: Move a bunch of ChangeLog-specific processing from SCM to this object.
+# NOTE: All paths returned from this class should be absolute.
+class Checkout(object):
+    def __init__(self, scm, executive=None, filesystem=None):
+        self._scm = scm
+        # FIXME: We shouldn't be grabbing at private members on scm.
+        self._executive = executive or self._scm._executive
+        self._filesystem = filesystem or self._scm._filesystem
+
+    def is_path_to_changelog(self, path):
+        return self._filesystem.basename(path) == "ChangeLog"
+
+    def _latest_entry_for_changelog_at_revision(self, changelog_path, revision):
+        changelog_contents = self._scm.contents_at_revision(changelog_path, revision)
+        # contents_at_revision returns a byte array (str()), but we know
+        # that ChangeLog files are utf-8.  parse_latest_entry_from_file
+        # expects a file-like object which vends unicode(), so we decode here.
+        # Old revisions of Sources/WebKit/wx/ChangeLog have some invalid utf8 characters.
+        changelog_file = StringIO.StringIO(changelog_contents.decode("utf-8", "ignore"))
+        return ChangeLog.parse_latest_entry_from_file(changelog_file)
+
+    def changelog_entries_for_revision(self, revision, changed_files=None):
+        if not changed_files:
+            changed_files = self._scm.changed_files_for_revision(revision)
+        # FIXME: This gets confused if ChangeLog files are moved, as
+        # deletes are still "changed files" per changed_files_for_revision.
+        # FIXME: For now we hack around this by caching any exceptions
+        # which result from having deleted files included the changed_files list.
+        changelog_entries = []
+        for path in changed_files:
+            if not self.is_path_to_changelog(path):
+                continue
+            try:
+                changelog_entries.append(self._latest_entry_for_changelog_at_revision(path, revision))
+            except ScriptError:
+                pass
+        return changelog_entries
+
+    def _changelog_data_for_revision(self, revision):
+        changed_files = self._scm.changed_files_for_revision(revision)
+        changelog_entries = self.changelog_entries_for_revision(revision, changed_files=changed_files)
+        # Assume for now that the first entry has everything we need:
+        # FIXME: This will throw an exception if there were no ChangeLogs.
+        if not len(changelog_entries):
+            return None
+        changelog_entry = changelog_entries[0]
+        return {
+            "bug_id": parse_bug_id_from_changelog(changelog_entry.contents()),
+            "author_name": changelog_entry.author_name(),
+            "author_email": changelog_entry.author_email(),
+            "author": changelog_entry.author(),
+            "reviewer_text": changelog_entry.reviewer_text(),
+            "reviewer": changelog_entry.reviewer(),
+            "contents": changelog_entry.contents(),
+            "changed_files": changed_files,
+        }
+
+    @memoized
+    def commit_info_for_revision(self, revision):
+        committer_email = self._scm.committer_email_for_revision(revision)
+        changelog_data = self._changelog_data_for_revision(revision)
+        if not changelog_data:
+            return None
+        return CommitInfo(revision, committer_email, changelog_data)
+
+    def bug_id_for_revision(self, revision):
+        return self.commit_info_for_revision(revision).bug_id()
+
+    def _modified_files_matching_predicate(self, git_commit, predicate, changed_files=None):
+        # SCM returns paths relative to scm.checkout_root
+        # Callers (especially those using the ChangeLog class) may
+        # expect absolute paths, so this method returns absolute paths.
+        if not changed_files:
+            changed_files = self._scm.changed_files(git_commit)
+        return filter(predicate, map(self._scm.absolute_path, changed_files))
+
+    def modified_changelogs(self, git_commit, changed_files=None):
+        return self._modified_files_matching_predicate(git_commit, self.is_path_to_changelog, changed_files=changed_files)
+
+    def modified_non_changelogs(self, git_commit, changed_files=None):
+        return self._modified_files_matching_predicate(git_commit, lambda path: not self.is_path_to_changelog(path), changed_files=changed_files)
+
+    def commit_message_for_this_commit(self, git_commit, changed_files=None, return_stderr=False):
+        changelog_paths = self.modified_changelogs(git_commit, changed_files)
+        if not len(changelog_paths):
+            raise ScriptError(message="Found no modified ChangeLogs, cannot create a commit message.\n"
+                              "All changes require a ChangeLog.  See:\n %s" % urls.contribution_guidelines)
+
+        message_text = self._scm.run([self._scm.script_path('commit-log-editor'), '--print-log'] + changelog_paths, return_stderr=return_stderr)
+        return CommitMessage(message_text.splitlines())
+
+    def recent_commit_infos_for_files(self, paths):
+        revisions = set(sum(map(self._scm.revisions_changing_file, paths), []))
+        return set(map(self.commit_info_for_revision, revisions))
+
+    def suggested_reviewers(self, git_commit, changed_files=None):
+        changed_files = self.modified_non_changelogs(git_commit, changed_files)
+        commit_infos = self.recent_commit_infos_for_files(changed_files)
+        reviewers = [commit_info.reviewer() for commit_info in commit_infos if commit_info.reviewer()]
+        reviewers.extend([commit_info.author() for commit_info in commit_infos if commit_info.author() and commit_info.author().can_review])
+        return sorted(set(reviewers))
+
+    def bug_id_for_this_commit(self, git_commit, changed_files=None):
+        try:
+            return parse_bug_id_from_changelog(self.commit_message_for_this_commit(git_commit, changed_files).message())
+        except ScriptError, e:
+            pass # We might not have ChangeLogs.
+
+    def chromium_deps(self):
+        return DEPS(self._scm.absolute_path(self._filesystem.join("Source", "WebKit", "chromium", "DEPS")))
+
+    def apply_patch(self, patch):
+        # It's possible that the patch was not made from the root directory.
+        # We should detect and handle that case.
+        # FIXME: Move _scm.script_path here once we get rid of all the dependencies.
+        # --force (continue after errors) is the common case, so we always use it.
+        args = [self._scm.script_path('svn-apply'), "--force"]
+        if patch.reviewer():
+            args += ['--reviewer', patch.reviewer().full_name]
+        self._executive.run_command(args, input=patch.contents(), cwd=self._scm.checkout_root)
+
+    def apply_reverse_diff(self, revision):
+        self._scm.apply_reverse_diff(revision)
+
+        # We revert the ChangeLogs because removing lines from a ChangeLog
+        # doesn't make sense.  ChangeLogs are append only.
+        changelog_paths = self.modified_changelogs(git_commit=None)
+        if len(changelog_paths):
+            self._scm.revert_files(changelog_paths)
+
+        conflicts = self._scm.conflicted_files()
+        if len(conflicts):
+            raise ScriptError(message="Failed to apply reverse diff for revision %s because of the following conflicts:\n%s" % (revision, "\n".join(conflicts)))
+
+    def apply_reverse_diffs(self, revision_list):
+        for revision in sorted(revision_list, reverse=True):
+            self.apply_reverse_diff(revision)
diff --git a/Tools/Scripts/webkitpy/common/checkout/checkout_mock.py b/Tools/Scripts/webkitpy/common/checkout/checkout_mock.py
new file mode 100644
index 0000000..3c050ae
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/checkout/checkout_mock.py
@@ -0,0 +1,117 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#    * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from .deps_mock import MockDEPS
+from .commitinfo import CommitInfo
+
+# FIXME: These imports are wrong, we should use a shared MockCommittersList.
+from webkitpy.common.config.committers import CommitterList
+from webkitpy.common.net.bugzilla.bugzilla_mock import _mock_reviewers
+from webkitpy.common.system.filesystem_mock import MockFileSystem
+
+
+class MockCommitMessage(object):
+    def message(self):
+        return "This is a fake commit message that is at least 50 characters."
+
+
+committer_list = CommitterList()
+
+mock_revisions = {
+    1: CommitInfo(852, "eric@webkit.org", {
+        "bug_id": 50000,
+        "author_name": "Adam Barth",
+        "author_email": "abarth@webkit.org",
+        "author": committer_list.contributor_by_email("abarth@webkit.org"),
+        "reviewer_text": "Darin Adler",
+        "reviewer": committer_list.committer_by_name("Darin Adler"),
+        "changed_files": [
+            "path/to/file",
+            "another/file",
+        ],
+    }),
+    3001: CommitInfo(3001, "tomz@codeaurora.org", {
+        "bug_id": 50004,
+        "author_name": "Tom Zakrajsek",
+        "author_email": "tomz@codeaurora.org",
+        "author": committer_list.contributor_by_email("tomz@codeaurora.org"),
+        "reviewer_text": "Darin Adler",
+        "reviewer": committer_list.committer_by_name("Darin Adler"),
+        "changed_files": [
+            "path/to/file",
+            "another/file",
+        ],
+    })
+}
+
+class MockCheckout(object):
+    def __init__(self):
+        # FIXME: It's unclear if a MockCheckout is very useful.  A normal Checkout
+        # with a MockSCM/MockFileSystem/MockExecutive is probably better.
+        self._filesystem = MockFileSystem()
+
+    def commit_info_for_revision(self, svn_revision):
+        # There are legacy tests that all expected these revision numbers to map
+        # to the same commit description (now mock_revisions[1])
+        if svn_revision in [32, 123, 852, 853, 854, 1234, 21654, 21655, 21656]:
+            return mock_revisions[1]
+
+        if svn_revision in mock_revisions:
+            return mock_revisions[svn_revision]
+
+        # any "unrecognized" svn_revision will return None.
+
+    def is_path_to_changelog(self, path):
+        return self._filesystem.basename(path) == "ChangeLog"
+
+    def bug_id_for_revision(self, svn_revision):
+        return 12345
+
+    def recent_commit_infos_for_files(self, paths):
+        return [self.commit_info_for_revision(32)]
+
+    def modified_changelogs(self, git_commit, changed_files=None):
+        # Ideally we'd return something more interesting here.  The problem is
+        # that LandDiff will try to actually read the patch from disk!
+        return []
+
+    def commit_message_for_this_commit(self, git_commit, changed_files=None):
+        return MockCommitMessage()
+
+    def chromium_deps(self):
+        return MockDEPS()
+
+    def apply_patch(self, patch):
+        pass
+
+    def apply_reverse_diffs(self, revision):
+        pass
+
+    def suggested_reviewers(self, git_commit, changed_files=None):
+        # FIXME: We should use a shared mock commiter list.
+        return [_mock_reviewers[0]]
diff --git a/Tools/Scripts/webkitpy/common/checkout/checkout_unittest.py b/Tools/Scripts/webkitpy/common/checkout/checkout_unittest.py
new file mode 100644
index 0000000..e9c2cdd
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/checkout/checkout_unittest.py
@@ -0,0 +1,263 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#    * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import codecs
+import os
+import shutil
+import tempfile
+import unittest
+
+from .checkout import Checkout
+from .changelog import ChangeLogEntry
+from .scm import CommitMessage, SCMDetector
+from .scm.scm_mock import MockSCM
+from webkitpy.common.system.executive import Executive, ScriptError
+from webkitpy.common.system.filesystem import FileSystem  # FIXME: This should not be needed.
+from webkitpy.common.system.filesystem_mock import MockFileSystem
+from webkitpy.common.system.executive_mock import MockExecutive
+from webkitpy.common.system.outputcapture import OutputCapture
+from webkitpy.thirdparty.mock import Mock
+
+
+_changelog1entry1 = u"""2010-03-25  Tor Arne Vestb\u00f8  <vestbo@webkit.org>
+
+        Unreviewed build fix to un-break webkit-patch land.
+
+        Move commit_message_for_this_commit from scm to checkout
+        https://bugs.webkit.org/show_bug.cgi?id=36629
+
+        * Scripts/webkitpy/common/checkout/api.py: import scm.CommitMessage
+"""
+_changelog1entry2 = u"""2010-03-25  Adam Barth  <abarth@webkit.org>
+
+        Reviewed by Eric Seidel.
+
+        Move commit_message_for_this_commit from scm to checkout
+        https://bugs.webkit.org/show_bug.cgi?id=36629
+
+        * Scripts/webkitpy/common/checkout/api.py:
+"""
+_changelog1 = u"\n".join([_changelog1entry1, _changelog1entry2])
+_changelog2 = u"""2010-03-25  Tor Arne Vestb\u00f8  <vestbo@webkit.org>
+
+        Unreviewed build fix to un-break webkit-patch land.
+
+        Second part of this complicated change by me, Tor Arne Vestb\u00f8!
+
+        * Path/To/Complicated/File: Added.
+
+2010-03-25  Adam Barth  <abarth@webkit.org>
+
+        Reviewed by Eric Seidel.
+
+        Filler change.
+"""
+
+class CommitMessageForThisCommitTest(unittest.TestCase):
+    expected_commit_message = u"""Unreviewed build fix to un-break webkit-patch land.
+
+Tools: 
+
+Move commit_message_for_this_commit from scm to checkout
+https://bugs.webkit.org/show_bug.cgi?id=36629
+
+* Scripts/webkitpy/common/checkout/api.py: import scm.CommitMessage
+
+LayoutTests: 
+
+Second part of this complicated change by me, Tor Arne Vestb\u00f8!
+
+* Path/To/Complicated/File: Added.
+"""
+
+    def setUp(self):
+        # FIXME: This should not need to touch the filesystem, however
+        # ChangeLog is difficult to mock at current.
+        self.filesystem = FileSystem()
+        self.temp_dir = str(self.filesystem.mkdtemp(suffix="changelogs"))
+        self.old_cwd = self.filesystem.getcwd()
+        self.filesystem.chdir(self.temp_dir)
+
+        # Trick commit-log-editor into thinking we're in a Subversion working copy so it won't
+        # complain about not being able to figure out what SCM is in use.
+        # FIXME: VCSTools.pm is no longer so easily fooled.  It logs because "svn info" doesn't
+        # treat a bare .svn directory being part of an svn checkout.
+        self.filesystem.maybe_make_directory(".svn")
+
+        self.changelogs = map(self.filesystem.abspath, (self.filesystem.join("Tools", "ChangeLog"), self.filesystem.join("LayoutTests", "ChangeLog")))
+        for path, contents in zip(self.changelogs, (_changelog1, _changelog2)):
+            self.filesystem.maybe_make_directory(self.filesystem.dirname(path))
+            self.filesystem.write_text_file(path, contents)
+
+    def tearDown(self):
+        self.filesystem.rmtree(self.temp_dir)
+        self.filesystem.chdir(self.old_cwd)
+
+    def test_commit_message_for_this_commit(self):
+        executive = Executive()
+
+        def mock_run(*args, **kwargs):
+            # Note that we use a real Executive here, not a MockExecutive, so we can test that we're
+            # invoking commit-log-editor correctly.
+            env = os.environ.copy()
+            env['CHANGE_LOG_EMAIL_ADDRESS'] = 'vestbo@webkit.org'
+            kwargs['env'] = env
+            return executive.run_command(*args, **kwargs)
+
+        detector = SCMDetector(self.filesystem, executive)
+        real_scm = detector.detect_scm_system(self.old_cwd)
+
+        mock_scm = MockSCM()
+        mock_scm.run = mock_run
+        mock_scm.script_path = real_scm.script_path
+
+        checkout = Checkout(mock_scm)
+        checkout.modified_changelogs = lambda git_commit, changed_files=None: self.changelogs
+        commit_message = checkout.commit_message_for_this_commit(git_commit=None, return_stderr=True)
+        # Throw away the first line - a warning about unknown VCS root.
+        commit_message.message_lines = commit_message.message_lines[1:]
+        self.assertEqual(commit_message.message(), self.expected_commit_message)
+
+
+class CheckoutTest(unittest.TestCase):
+    def _make_checkout(self):
+        return Checkout(scm=MockSCM(), filesystem=MockFileSystem(), executive=MockExecutive())
+
+    def test_latest_entry_for_changelog_at_revision(self):
+        def mock_contents_at_revision(changelog_path, revision):
+            self.assertEqual(changelog_path, "foo")
+            self.assertEqual(revision, "bar")
+            # contents_at_revision is expected to return a byte array (str)
+            # so we encode our unicode ChangeLog down to a utf-8 stream.
+            # The ChangeLog utf-8 decoding should ignore invalid codepoints.
+            invalid_utf8 = "\255"
+            return _changelog1.encode("utf-8") + invalid_utf8
+        checkout = self._make_checkout()
+        checkout._scm.contents_at_revision = mock_contents_at_revision
+        entry = checkout._latest_entry_for_changelog_at_revision("foo", "bar")
+        self.assertEqual(entry.contents(), _changelog1entry1)
+
+    # FIXME: This tests a hack around our current changed_files handling.
+    # Right now changelog_entries_for_revision tries to fetch deleted files
+    # from revisions, resulting in a ScriptError exception.  Test that we
+    # recover from those and still return the other ChangeLog entries.
+    def test_changelog_entries_for_revision(self):
+        checkout = self._make_checkout()
+        checkout._scm.changed_files_for_revision = lambda revision: ['foo/ChangeLog', 'bar/ChangeLog']
+
+        def mock_latest_entry_for_changelog_at_revision(path, revision):
+            if path == "foo/ChangeLog":
+                return 'foo'
+            raise ScriptError()
+
+        checkout._latest_entry_for_changelog_at_revision = mock_latest_entry_for_changelog_at_revision
+
+        # Even though fetching one of the entries failed, the other should succeed.
+        entries = checkout.changelog_entries_for_revision(1)
+        self.assertEqual(len(entries), 1)
+        self.assertEqual(entries[0], 'foo')
+
+    def test_commit_info_for_revision(self):
+        checkout = self._make_checkout()
+        checkout._scm.changed_files_for_revision = lambda revision: ['path/to/file', 'another/file']
+        checkout._scm.committer_email_for_revision = lambda revision, changed_files=None: "committer@example.com"
+        checkout.changelog_entries_for_revision = lambda revision, changed_files=None: [ChangeLogEntry(_changelog1entry1)]
+        commitinfo = checkout.commit_info_for_revision(4)
+        self.assertEqual(commitinfo.bug_id(), 36629)
+        self.assertEqual(commitinfo.author_name(), u"Tor Arne Vestb\u00f8")
+        self.assertEqual(commitinfo.author_email(), "vestbo@webkit.org")
+        self.assertEqual(commitinfo.reviewer_text(), None)
+        self.assertEqual(commitinfo.reviewer(), None)
+        self.assertEqual(commitinfo.committer_email(), "committer@example.com")
+        self.assertEqual(commitinfo.committer(), None)
+        self.assertEqual(commitinfo.to_json(), {
+            'bug_id': 36629,
+            'author_email': 'vestbo@webkit.org',
+            'changed_files': [
+                'path/to/file',
+                'another/file',
+            ],
+            'reviewer_text': None,
+            'author_name': u'Tor Arne Vestb\xf8',
+        })
+
+        checkout.changelog_entries_for_revision = lambda revision, changed_files=None: []
+        self.assertEqual(checkout.commit_info_for_revision(1), None)
+
+    def test_bug_id_for_revision(self):
+        checkout = self._make_checkout()
+        checkout._scm.committer_email_for_revision = lambda revision: "committer@example.com"
+        checkout.changelog_entries_for_revision = lambda revision, changed_files=None: [ChangeLogEntry(_changelog1entry1)]
+        self.assertEqual(checkout.bug_id_for_revision(4), 36629)
+
+    def test_bug_id_for_this_commit(self):
+        checkout = self._make_checkout()
+        checkout.commit_message_for_this_commit = lambda git_commit, changed_files=None: CommitMessage(ChangeLogEntry(_changelog1entry1).contents().splitlines())
+        self.assertEqual(checkout.bug_id_for_this_commit(git_commit=None), 36629)
+
+    def test_modified_changelogs(self):
+        checkout = self._make_checkout()
+        checkout._scm.checkout_root = "/foo/bar"
+        checkout._scm.changed_files = lambda git_commit: ["file1", "ChangeLog", "relative/path/ChangeLog"]
+        expected_changlogs = ["/foo/bar/ChangeLog", "/foo/bar/relative/path/ChangeLog"]
+        self.assertEqual(checkout.modified_changelogs(git_commit=None), expected_changlogs)
+
+    def test_suggested_reviewers(self):
+        def mock_changelog_entries_for_revision(revision, changed_files=None):
+            if revision % 2 == 0:
+                return [ChangeLogEntry(_changelog1entry1)]
+            return [ChangeLogEntry(_changelog1entry2)]
+
+        def mock_revisions_changing_file(path, limit=5):
+            if path.endswith("ChangeLog"):
+                return [3]
+            return [4, 8]
+
+        checkout = self._make_checkout()
+        checkout._scm.checkout_root = "/foo/bar"
+        checkout._scm.changed_files = lambda git_commit: ["file1", "file2", "relative/path/ChangeLog"]
+        checkout._scm.revisions_changing_file = mock_revisions_changing_file
+        checkout.changelog_entries_for_revision = mock_changelog_entries_for_revision
+        reviewers = checkout.suggested_reviewers(git_commit=None)
+        reviewer_names = [reviewer.full_name for reviewer in reviewers]
+        self.assertEqual(reviewer_names, [u'Tor Arne Vestb\xf8'])
+
+    def test_chromium_deps(self):
+        checkout = self._make_checkout()
+        checkout._scm.checkout_root = "/foo/bar"
+        self.assertEqual(checkout.chromium_deps()._path, '/foo/bar/Source/WebKit/chromium/DEPS')
+
+    def test_apply_patch(self):
+        checkout = self._make_checkout()
+        checkout._executive = MockExecutive(should_log=True)
+        checkout._scm.script_path = lambda script: script
+        mock_patch = Mock()
+        mock_patch.contents = lambda: "foo"
+        mock_patch.reviewer = lambda: None
+        expected_stderr = "MOCK run_command: ['svn-apply', '--force'], cwd=/mock-checkout, input=foo\n"
+        OutputCapture().assert_outputs(self, checkout.apply_patch, [mock_patch], expected_stderr=expected_stderr)
diff --git a/Tools/Scripts/webkitpy/common/checkout/commitinfo.py b/Tools/Scripts/webkitpy/common/checkout/commitinfo.py
new file mode 100644
index 0000000..cba3fdd
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/checkout/commitinfo.py
@@ -0,0 +1,100 @@
+# Copyright (c) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# WebKit's python module for holding information on a commit
+
+from webkitpy.common.config import urls
+from webkitpy.common.config.committers import CommitterList
+
+
+class CommitInfo(object):
+    def __init__(self, revision, committer_email, changelog_data, committer_list=CommitterList()):
+        self._revision = revision
+        self._committer_email = committer_email
+        self._changelog_data = changelog_data
+
+        # Derived values:
+        self._committer = committer_list.committer_by_email(committer_email)
+
+    def revision(self):
+        return self._revision
+
+    def committer(self):
+        return self._committer  # None if committer isn't in committers.py
+
+    def committer_email(self):
+        return self._committer_email
+
+    def bug_id(self):
+        return self._changelog_data["bug_id"]  # May be None
+
+    def author(self):
+        return self._changelog_data["author"]  # May be None
+
+    def author_name(self):
+        return self._changelog_data["author_name"]
+
+    def author_email(self):
+        return self._changelog_data["author_email"]
+
+    def reviewer(self):
+        return self._changelog_data["reviewer"]  # May be None
+
+    def reviewer_text(self):
+        return self._changelog_data["reviewer_text"]  # May be None
+
+    def changed_files(self):
+        return self._changelog_data["changed_files"]
+
+    def to_json(self):
+        return {
+            "bug_id": self.bug_id(),
+            "author_name": self.author_name(),
+            "author_email": self.author_email(),
+            "reviewer_text": self.reviewer_text(),
+            "changed_files": self.changed_files(),
+        }
+
+    def responsible_parties(self):
+        responsible_parties = [
+            self.committer(),
+            self.author(),
+            self.reviewer(),
+        ]
+        return set([party for party in responsible_parties if party]) # Filter out None
+
+    # FIXME: It is slightly lame that this "view" method is on this "model" class (in MVC terms)
+    def blame_string(self, bugs):
+        string = "r%s:\n" % self.revision()
+        string += "  %s\n" % urls.view_revision_url(self.revision())
+        string += "  Bug: %s (%s)\n" % (self.bug_id(), bugs.bug_url_for_bug_id(self.bug_id()))
+        author_line = "\"%s\" <%s>" % (self.author_name(), self.author_email())
+        string += "  Author: %s\n" % (self.author() or author_line)
+        string += "  Reviewer: %s\n" % (self.reviewer() or self.reviewer_text())
+        string += "  Committer: %s" % self.committer()
+        return string
diff --git a/Tools/Scripts/webkitpy/common/checkout/commitinfo_unittest.py b/Tools/Scripts/webkitpy/common/checkout/commitinfo_unittest.py
new file mode 100644
index 0000000..f58e6f1
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/checkout/commitinfo_unittest.py
@@ -0,0 +1,61 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#    * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from webkitpy.common.checkout.commitinfo import CommitInfo
+from webkitpy.common.config.committers import CommitterList, Committer, Reviewer
+
+class CommitInfoTest(unittest.TestCase):
+
+    def test_commit_info_creation(self):
+        author = Committer("Author", "author@example.com")
+        committer = Committer("Committer", "committer@example.com")
+        reviewer = Reviewer("Reviewer", "reviewer@example.com")
+        committer_list = CommitterList(committers=[author, committer], reviewers=[reviewer])
+
+        changelog_data = {
+            "bug_id": 1234,
+            "author_name": "Committer",
+            "author_email": "author@example.com",
+            "author": author,
+            "reviewer_text": "Reviewer",
+            "reviewer": reviewer,
+        }
+        commit = CommitInfo(123, "committer@example.com", changelog_data, committer_list)
+
+        self.assertEqual(commit.revision(), 123)
+        self.assertEqual(commit.bug_id(), 1234)
+        self.assertEqual(commit.author_name(), "Committer")
+        self.assertEqual(commit.author_email(), "author@example.com")
+        self.assertEqual(commit.author(), author)
+        self.assertEqual(commit.reviewer_text(), "Reviewer")
+        self.assertEqual(commit.reviewer(), reviewer)
+        self.assertEqual(commit.committer(), committer)
+        self.assertEqual(commit.committer_email(), "committer@example.com")
+        self.assertEqual(commit.responsible_parties(), set([author, committer, reviewer]))
diff --git a/Tools/Scripts/webkitpy/common/checkout/deps.py b/Tools/Scripts/webkitpy/common/checkout/deps.py
new file mode 100644
index 0000000..2f3a873
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/checkout/deps.py
@@ -0,0 +1,61 @@
+# Copyright (C) 2011, Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# WebKit's Python module for parsing and modifying ChangeLog files
+
+import codecs
+import fileinput
+import re
+import textwrap
+
+
+class DEPS(object):
+
+    _variable_regexp = r"\s+'%s':\s+'(?P<value>\d+)'"
+
+    def __init__(self, path):
+        # FIXME: This should take a FileSystem object.
+        self._path = path
+
+    def read_variable(self, name):
+        pattern = re.compile(self._variable_regexp % name)
+        for line in fileinput.FileInput(self._path):
+            match = pattern.match(line)
+            if match:
+                return int(match.group("value"))
+
+    def write_variable(self, name, value):
+        pattern = re.compile(self._variable_regexp % name)
+        replacement_line = "  '%s': '%s'" % (name, value)
+        # inplace=1 creates a backup file and re-directs stdout to the file
+        for line in fileinput.FileInput(self._path, inplace=1):
+            if pattern.match(line):
+                print replacement_line
+                continue
+            # Trailing comma suppresses printing newline
+            print line,
diff --git a/Tools/Scripts/webkitpy/common/checkout/deps_mock.py b/Tools/Scripts/webkitpy/common/checkout/deps_mock.py
new file mode 100644
index 0000000..cb57e8b
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/checkout/deps_mock.py
@@ -0,0 +1,38 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#    * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+from webkitpy.common.system.deprecated_logging import log
+
+
+class MockDEPS(object):
+    def read_variable(self, name):
+        return 6564
+
+    def write_variable(self, name, value):
+        log("MOCK: MockDEPS.write_variable(%s, %s)" % (name, value))
diff --git a/Tools/Scripts/webkitpy/common/checkout/diff_parser.py b/Tools/Scripts/webkitpy/common/checkout/diff_parser.py
new file mode 100644
index 0000000..2ed552c
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/checkout/diff_parser.py
@@ -0,0 +1,186 @@
+# Copyright (C) 2009 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#    * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""WebKit's Python module for interacting with patches."""
+
+import logging
+import re
+
+_log = logging.getLogger(__name__)
+
+
+# FIXME: This is broken. We should compile our regexps up-front
+# instead of using a custom cache.
+_regexp_compile_cache = {}
+
+
+# FIXME: This function should be removed.
+def match(pattern, string):
+    """Matches the string with the pattern, caching the compiled regexp."""
+    if not pattern in _regexp_compile_cache:
+        _regexp_compile_cache[pattern] = re.compile(pattern)
+    return _regexp_compile_cache[pattern].match(string)
+
+
+# FIXME: This belongs on DiffParser (e.g. as to_svn_diff()).
+def git_diff_to_svn_diff(line):
+    """Converts a git formatted diff line to a svn formatted line.
+
+    Args:
+      line: A string representing a line of the diff.
+    """
+    # FIXME: This list should be a class member on DiffParser.
+    # These regexp patterns should be compiled once instead of every time.
+    conversion_patterns = (("^diff --git \w/(.+) \w/(?P<FilePath>.+)", lambda matched: "Index: " + matched.group('FilePath') + "\n"),
+                           ("^new file.*", lambda matched: "\n"),
+                           ("^index [0-9a-f]{7}\.\.[0-9a-f]{7} [0-9]{6}", lambda matched: "===================================================================\n"),
+                           ("^--- \w/(?P<FilePath>.+)", lambda matched: "--- " + matched.group('FilePath') + "\n"),
+                           ("^\+\+\+ \w/(?P<FilePath>.+)", lambda matched: "+++ " + matched.group('FilePath') + "\n"))
+
+    for pattern, conversion in conversion_patterns:
+        matched = match(pattern, line)
+        if matched:
+            return conversion(matched)
+    return line
+
+
+# FIXME: This method belongs on DiffParser
+def get_diff_converter(first_diff_line):
+    """Gets a converter function of diff lines.
+
+    Args:
+      first_diff_line: The first filename line of a diff file.
+                       If this line is git formatted, we'll return a
+                       converter from git to SVN.
+    """
+    if match(r"^diff --git \w/", first_diff_line):
+        return git_diff_to_svn_diff
+    return lambda input: input
+
+
+_INITIAL_STATE = 1
+_DECLARED_FILE_PATH = 2
+_PROCESSING_CHUNK = 3
+
+
+class DiffFile(object):
+    """Contains the information for one file in a patch.
+
+    The field "lines" is a list which contains tuples in this format:
+       (deleted_line_number, new_line_number, line_string)
+    If deleted_line_number is zero, it means this line is newly added.
+    If new_line_number is zero, it means this line is deleted.
+    """
+    # FIXME: Tuples generally grow into classes.  We should consider
+    # adding a DiffLine object.
+
+    def added_or_modified_line_numbers(self):
+        # This logic was moved from patchreader.py, but may not be
+        # the right API for this object long-term.
+        return [line[1] for line in self.lines if not line[0]]
+
+    def __init__(self, filename):
+        self.filename = filename
+        self.lines = []
+
+    def add_new_line(self, line_number, line):
+        self.lines.append((0, line_number, line))
+
+    def add_deleted_line(self, line_number, line):
+        self.lines.append((line_number, 0, line))
+
+    def add_unchanged_line(self, deleted_line_number, new_line_number, line):
+        self.lines.append((deleted_line_number, new_line_number, line))
+
+
+# If this is going to be called DiffParser, it should be a re-useable parser.
+# Otherwise we should rename it to ParsedDiff or just Diff.
+class DiffParser(object):
+    """A parser for a patch file.
+
+    The field "files" is a dict whose key is the filename and value is
+    a DiffFile object.
+    """
+
+    def __init__(self, diff_input):
+        """Parses a diff.
+
+        Args:
+          diff_input: An iterable object.
+        """
+        self.files = self._parse_into_diff_files(diff_input)
+
+    # FIXME: This function is way too long and needs to be broken up.
+    def _parse_into_diff_files(self, diff_input):
+        files = {}
+        state = _INITIAL_STATE
+        current_file = None
+        old_diff_line = None
+        new_diff_line = None
+        for line in diff_input:
+            line = line.rstrip("\n")
+            if state == _INITIAL_STATE:
+                transform_line = get_diff_converter(line)
+            line = transform_line(line)
+
+            file_declaration = match(r"^Index: (?P<FilePath>.+)", line)
+            if file_declaration:
+                filename = file_declaration.group('FilePath')
+                current_file = DiffFile(filename)
+                files[filename] = current_file
+                state = _DECLARED_FILE_PATH
+                continue
+
+            lines_changed = match(r"^@@ -(?P<OldStartLine>\d+)(,\d+)? \+(?P<NewStartLine>\d+)(,\d+)? @@", line)
+            if lines_changed:
+                if state != _DECLARED_FILE_PATH and state != _PROCESSING_CHUNK:
+                    _log.error('Unexpected line change without file path '
+                               'declaration: %r' % line)
+                old_diff_line = int(lines_changed.group('OldStartLine'))
+                new_diff_line = int(lines_changed.group('NewStartLine'))
+                state = _PROCESSING_CHUNK
+                continue
+
+            if state == _PROCESSING_CHUNK:
+                if line.startswith('+'):
+                    current_file.add_new_line(new_diff_line, line[1:])
+                    new_diff_line += 1
+                elif line.startswith('-'):
+                    current_file.add_deleted_line(old_diff_line, line[1:])
+                    old_diff_line += 1
+                elif line.startswith(' '):
+                    current_file.add_unchanged_line(old_diff_line, new_diff_line, line[1:])
+                    old_diff_line += 1
+                    new_diff_line += 1
+                elif line == '\\ No newline at end of file':
+                    # Nothing to do.  We may still have some added lines.
+                    pass
+                else:
+                    _log.error('Unexpected diff format when parsing a '
+                               'chunk: %r' % line)
+        return files
diff --git a/Tools/Scripts/webkitpy/common/checkout/diff_parser_unittest.py b/Tools/Scripts/webkitpy/common/checkout/diff_parser_unittest.py
new file mode 100644
index 0000000..d61a098
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/checkout/diff_parser_unittest.py
@@ -0,0 +1,94 @@
+# Copyright (C) 2009 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#    * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+import diff_parser
+import re
+
+from webkitpy.common.checkout.diff_test_data import DIFF_TEST_DATA
+
+class DiffParserTest(unittest.TestCase):
+    def test_diff_parser(self, parser = None):
+        if not parser:
+            parser = diff_parser.DiffParser(DIFF_TEST_DATA.splitlines())
+        self.assertEquals(3, len(parser.files))
+
+        self.assertTrue('WebCore/rendering/style/StyleFlexibleBoxData.h' in parser.files)
+        diff = parser.files['WebCore/rendering/style/StyleFlexibleBoxData.h']
+        self.assertEquals(7, len(diff.lines))
+        # The first two unchaged lines.
+        self.assertEquals((47, 47), diff.lines[0][0:2])
+        self.assertEquals('', diff.lines[0][2])
+        self.assertEquals((48, 48), diff.lines[1][0:2])
+        self.assertEquals('    unsigned align : 3; // EBoxAlignment', diff.lines[1][2])
+        # The deleted line
+        self.assertEquals((50, 0), diff.lines[3][0:2])
+        self.assertEquals('    unsigned orient: 1; // EBoxOrient', diff.lines[3][2])
+
+        # The first file looks OK. Let's check the next, more complicated file.
+        self.assertTrue('WebCore/rendering/style/StyleRareInheritedData.cpp' in parser.files)
+        diff = parser.files['WebCore/rendering/style/StyleRareInheritedData.cpp']
+        # There are 3 chunks.
+        self.assertEquals(7 + 7 + 9, len(diff.lines))
+        # Around an added line.
+        self.assertEquals((60, 61), diff.lines[9][0:2])
+        self.assertEquals((0, 62), diff.lines[10][0:2])
+        self.assertEquals((61, 63), diff.lines[11][0:2])
+        # Look through the last chunk, which contains both add's and delete's.
+        self.assertEquals((81, 83), diff.lines[14][0:2])
+        self.assertEquals((82, 84), diff.lines[15][0:2])
+        self.assertEquals((83, 85), diff.lines[16][0:2])
+        self.assertEquals((84, 0), diff.lines[17][0:2])
+        self.assertEquals((0, 86), diff.lines[18][0:2])
+        self.assertEquals((0, 87), diff.lines[19][0:2])
+        self.assertEquals((85, 88), diff.lines[20][0:2])
+        self.assertEquals((86, 89), diff.lines[21][0:2])
+        self.assertEquals((87, 90), diff.lines[22][0:2])
+
+        # Check if a newly added file is correctly handled.
+        diff = parser.files['LayoutTests/platform/mac/fast/flexbox/box-orient-button-expected.checksum']
+        self.assertEquals(1, len(diff.lines))
+        self.assertEquals((0, 1), diff.lines[0][0:2])
+
+    def test_git_mnemonicprefix(self):
+        p = re.compile(r' ([a|b])/')
+
+        prefixes = [
+            { 'a' : 'i', 'b' : 'w' }, # git-diff (compares the (i)ndex and the (w)ork tree)
+            { 'a' : 'c', 'b' : 'w' }, # git-diff HEAD (compares a (c)ommit and the (w)ork tree)
+            { 'a' : 'c', 'b' : 'i' }, # git diff --cached (compares a (c)ommit and the (i)ndex)
+            { 'a' : 'o', 'b' : 'w' }, # git-diff HEAD:file1 file2 (compares an (o)bject and a (w)ork tree entity)
+            { 'a' : '1', 'b' : '2' }, # git diff --no-index a b (compares two non-git things (1) and (2))
+        ]
+
+        for prefix in prefixes:
+            patch = p.sub(lambda x: " %s/" % prefix[x.group(1)], DIFF_TEST_DATA)
+            self.test_diff_parser(diff_parser.DiffParser(patch.splitlines()))
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/Tools/Scripts/webkitpy/common/checkout/diff_test_data.py b/Tools/Scripts/webkitpy/common/checkout/diff_test_data.py
new file mode 100644
index 0000000..5f1719d
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/checkout/diff_test_data.py
@@ -0,0 +1,80 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#    * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#  FIXME: Store this as a .patch file in some new fixtures directory or similar.
+DIFF_TEST_DATA = '''diff --git a/WebCore/rendering/style/StyleFlexibleBoxData.h b/WebCore/rendering/style/StyleFlexibleBoxData.h
+index f5d5e74..3b6aa92 100644
+--- a/WebCore/rendering/style/StyleFlexibleBoxData.h
++++ b/WebCore/rendering/style/StyleFlexibleBoxData.h
+@@ -47,7 +47,6 @@ public:
+ 
+     unsigned align : 3; // EBoxAlignment
+     unsigned pack: 3; // EBoxAlignment
+-    unsigned orient: 1; // EBoxOrient
+     unsigned lines : 1; // EBoxLines
+ 
+ private:
+diff --git a/WebCore/rendering/style/StyleRareInheritedData.cpp b/WebCore/rendering/style/StyleRareInheritedData.cpp
+index ce21720..324929e 100644
+--- a/WebCore/rendering/style/StyleRareInheritedData.cpp
++++ b/WebCore/rendering/style/StyleRareInheritedData.cpp
+@@ -39,6 +39,7 @@ StyleRareInheritedData::StyleRareInheritedData()
+     , textSizeAdjust(RenderStyle::initialTextSizeAdjust())
+     , resize(RenderStyle::initialResize())
+     , userSelect(RenderStyle::initialUserSelect())
++    , boxOrient(RenderStyle::initialBoxOrient())
+ {
+ }
+ 
+@@ -58,6 +59,7 @@ StyleRareInheritedData::StyleRareInheritedData(const StyleRareInheritedData& o)
+     , textSizeAdjust(o.textSizeAdjust)
+     , resize(o.resize)
+     , userSelect(o.userSelect)
++    , boxOrient(o.boxOrient)
+ {
+ }
+ 
+@@ -81,7 +83,8 @@ bool StyleRareInheritedData::operator==(const StyleRareInheritedData& o) const
+         && khtmlLineBreak == o.khtmlLineBreak
+         && textSizeAdjust == o.textSizeAdjust
+         && resize == o.resize
+-        && userSelect == o.userSelect;
++        && userSelect == o.userSelect
++        && boxOrient == o.boxOrient;
+ }
+ 
+ bool StyleRareInheritedData::shadowDataEquivalent(const StyleRareInheritedData& o) const
+diff --git a/LayoutTests/platform/mac/fast/flexbox/box-orient-button-expected.checksum b/LayoutTests/platform/mac/fast/flexbox/box-orient-button-expected.checksum
+new file mode 100644
+index 0000000..6db26bd
+--- /dev/null
++++ b/LayoutTests/platform/mac/fast/flexbox/box-orient-button-expected.checksum
+@@ -0,0 +1 @@
++61a373ee739673a9dcd7bac62b9f182e
+\ No newline at end of file
+'''
diff --git a/Tools/Scripts/webkitpy/common/checkout/scm/__init__.py b/Tools/Scripts/webkitpy/common/checkout/scm/__init__.py
new file mode 100644
index 0000000..f691f58
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/checkout/scm/__init__.py
@@ -0,0 +1,8 @@
+# Required for Python to search this directory for module files
+
+# We only export public API here.
+from .commitmessage import CommitMessage
+from .detection import SCMDetector
+from .git import Git, AmbiguousCommitError
+from .scm import SCM, AuthenticationError, CheckoutNeedsUpdate
+from .svn import SVN
diff --git a/Tools/Scripts/webkitpy/common/checkout/scm/commitmessage.py b/Tools/Scripts/webkitpy/common/checkout/scm/commitmessage.py
new file mode 100644
index 0000000..be0d431
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/checkout/scm/commitmessage.py
@@ -0,0 +1,62 @@
+# Copyright (c) 2009, 2010, 2011 Google Inc. All rights reserved.
+# Copyright (c) 2009 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import re
+
+
+def _first_non_empty_line_after_index(lines, index=0):
+    first_non_empty_line = index
+    for line in lines[index:]:
+        if re.match("^\s*$", line):
+            first_non_empty_line += 1
+        else:
+            break
+    return first_non_empty_line
+
+
+class CommitMessage:
+    def __init__(self, message):
+        self.message_lines = message[_first_non_empty_line_after_index(message, 0):]
+
+    def body(self, lstrip=False):
+        lines = self.message_lines[_first_non_empty_line_after_index(self.message_lines, 1):]
+        if lstrip:
+            lines = [line.lstrip() for line in lines]
+        return "\n".join(lines) + "\n"
+
+    def description(self, lstrip=False, strip_url=False):
+        line = self.message_lines[0]
+        if lstrip:
+            line = line.lstrip()
+        if strip_url:
+            line = re.sub("^(\s*)<.+> ", "\1", line)
+        return line
+
+    def message(self):
+        return "\n".join(self.message_lines) + "\n"
diff --git a/Tools/Scripts/webkitpy/common/checkout/scm/detection.py b/Tools/Scripts/webkitpy/common/checkout/scm/detection.py
new file mode 100644
index 0000000..44bc926
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/checkout/scm/detection.py
@@ -0,0 +1,81 @@
+# Copyright (c) 2009, 2010, 2011 Google Inc. All rights reserved.
+# Copyright (c) 2009 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from webkitpy.common.system.filesystem import FileSystem
+from webkitpy.common.system.executive import Executive
+
+from webkitpy.common.system.deprecated_logging import log
+
+from .svn import SVN
+from .git import Git
+
+
+class SCMDetector(object):
+    def __init__(self, filesystem, executive):
+        self._filesystem = filesystem
+        self._executive = executive
+
+    def default_scm(self, patch_directories=None):
+        """Return the default SCM object as determined by the CWD and running code.
+
+        Returns the default SCM object for the current working directory; if the
+        CWD is not in a checkout, then we attempt to figure out if the SCM module
+        itself is part of a checkout, and return that one. If neither is part of
+        a checkout, None is returned.
+        """
+        cwd = self._filesystem.getcwd()
+        scm_system = self.detect_scm_system(cwd, patch_directories)
+        if not scm_system:
+            script_directory = self._filesystem.dirname(self._filesystem.path_to_module(self.__module__))
+            scm_system = self.detect_scm_system(script_directory, patch_directories)
+            if scm_system:
+                log("The current directory (%s) is not a WebKit checkout, using %s" % (cwd, scm_system.checkout_root))
+            else:
+                raise Exception("FATAL: Failed to determine the SCM system for either %s or %s" % (cwd, script_directory))
+        return scm_system
+
+    def detect_scm_system(self, path, patch_directories=None):
+        absolute_path = self._filesystem.abspath(path)
+
+        if patch_directories == []:
+            patch_directories = None
+
+        if SVN.in_working_directory(absolute_path, executive=self._executive):
+            return SVN(cwd=absolute_path, patch_directories=patch_directories, filesystem=self._filesystem, executive=self._executive)
+
+        if Git.in_working_directory(absolute_path, executive=self._executive):
+            return Git(cwd=absolute_path, filesystem=self._filesystem, executive=self._executive)
+
+        return None
+
+
+# FIXME: These free functions are all deprecated:
+
+def detect_scm_system(path, patch_directories=None):
+    return SCMDetector(FileSystem(), Executive()).detect_scm_system(path, patch_directories)
diff --git a/Tools/Scripts/webkitpy/common/checkout/scm/detection_unittest.py b/Tools/Scripts/webkitpy/common/checkout/scm/detection_unittest.py
new file mode 100644
index 0000000..ecd9125
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/checkout/scm/detection_unittest.py
@@ -0,0 +1,48 @@
+# Copyright (C) 2009 Google Inc. All rights reserved.
+# Copyright (C) 2009 Apple Inc. All rights reserved.
+# Copyright (C) 2011 Daniel Bates (dbates@intudata.com). All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#    * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from .detection import SCMDetector
+from webkitpy.common.system.filesystem_mock import MockFileSystem
+from webkitpy.common.system.executive_mock import MockExecutive
+from webkitpy.common.system.outputcapture import OutputCapture
+
+
+class SCMDetectorTest(unittest.TestCase):
+    def test_detect_scm_system(self):
+        filesystem = MockFileSystem()
+        executive = MockExecutive(should_log=True)
+        detector = SCMDetector(filesystem, executive)
+
+        expected_stderr = "MOCK run_command: ['svn', 'info'], cwd=/\nMOCK run_command: ['git', 'rev-parse', '--is-inside-work-tree'], cwd=/\n"
+        scm = OutputCapture().assert_outputs(self, detector.detect_scm_system, ["/"], expected_stderr=expected_stderr)
+        self.assertEqual(scm, None)
+        # FIXME: This should make a synthetic tree and test SVN and Git detection in that tree.
diff --git a/Tools/Scripts/webkitpy/common/checkout/scm/git.py b/Tools/Scripts/webkitpy/common/checkout/scm/git.py
new file mode 100644
index 0000000..f688238
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/checkout/scm/git.py
@@ -0,0 +1,496 @@
+# Copyright (c) 2009, 2010, 2011 Google Inc. All rights reserved.
+# Copyright (c) 2009 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import logging
+import os
+import re
+
+from webkitpy.common.memoized import memoized
+from webkitpy.common.system.deprecated_logging import log
+from webkitpy.common.system.executive import Executive, ScriptError
+
+from .commitmessage import CommitMessage
+from .scm import AuthenticationError, SCM, commit_error_handler
+from .svn import SVN, SVNRepository
+
+
+_log = logging.getLogger(__name__)
+
+
+def run_command(*args, **kwargs):
+    # FIXME: This should not be a global static.
+    # New code should use Executive.run_command directly instead
+    return Executive().run_command(*args, **kwargs)
+
+
+class AmbiguousCommitError(Exception):
+    def __init__(self, num_local_commits, working_directory_is_clean):
+        self.num_local_commits = num_local_commits
+        self.working_directory_is_clean = working_directory_is_clean
+
+
+class Git(SCM, SVNRepository):
+
+    # Git doesn't appear to document error codes, but seems to return
+    # 1 or 128, mostly.
+    ERROR_FILE_IS_MISSING = 128
+
+    executable_name = 'git'
+
+    def __init__(self, cwd, **kwargs):
+        SCM.__init__(self, cwd, **kwargs)
+        self._check_git_architecture()
+
+    def _machine_is_64bit(self):
+        import platform
+        # This only is tested on Mac.
+        if not platform.mac_ver()[0]:
+            return False
+
+        # platform.architecture()[0] can be '64bit' even if the machine is 32bit:
+        # http://mail.python.org/pipermail/pythonmac-sig/2009-September/021648.html
+        # Use the sysctl command to find out what the processor actually supports.
+        return self.run(['sysctl', '-n', 'hw.cpu64bit_capable']).rstrip() == '1'
+
+    def _executable_is_64bit(self, path):
+        # Again, platform.architecture() fails us.  On my machine
+        # git_bits = platform.architecture(executable=git_path, bits='default')[0]
+        # git_bits is just 'default', meaning the call failed.
+        file_output = self.run(['file', path])
+        return re.search('x86_64', file_output)
+
+    def _check_git_architecture(self):
+        if not self._machine_is_64bit():
+            return
+
+        # We could path-search entirely in python or with
+        # which.py (http://code.google.com/p/which), but this is easier:
+        git_path = self.run(['which', self.executable_name]).rstrip()
+        if self._executable_is_64bit(git_path):
+            return
+
+        webkit_dev_thread_url = "https://lists.webkit.org/pipermail/webkit-dev/2010-December/015287.html"
+        log("Warning: This machine is 64-bit, but the git binary (%s) does not support 64-bit.\nInstall a 64-bit git for better performance, see:\n%s\n" % (git_path, webkit_dev_thread_url))
+
+    def _run_git(self, command_args, **kwargs):
+        full_command_args = [self.executable_name] + command_args
+        full_kwargs = kwargs
+        if not 'cwd' in full_kwargs:
+            full_kwargs['cwd'] = self.checkout_root
+        return self.run(full_command_args, **full_kwargs)
+
+    @classmethod
+    def in_working_directory(cls, path, executive=None):
+        try:
+            executive = executive or Executive()
+            return executive.run_command([cls.executable_name, 'rev-parse', '--is-inside-work-tree'], cwd=path, error_handler=Executive.ignore_error).rstrip() == "true"
+        except OSError, e:
+            # The Windows bots seem to through a WindowsError when git isn't installed.
+            return False
+
+    def find_checkout_root(self, path):
+        # "git rev-parse --show-cdup" would be another way to get to the root
+        checkout_root = self._run_git(['rev-parse', '--show-toplevel'], cwd=(path or "./")).strip()
+        if not self._filesystem.isabs(checkout_root):  # Sometimes git returns relative paths
+            checkout_root = self._filesystem.join(path, checkout_root)
+        return checkout_root
+
+    def to_object_name(self, filepath):
+        # FIXME: This can't be the right way to append a slash.
+        root_end_with_slash = self._filesystem.join(self.find_checkout_root(self._filesystem.dirname(filepath)), '')
+        # FIXME: This seems to want some sort of rel_path instead?
+        return filepath.replace(root_end_with_slash, '')
+
+    @classmethod
+    def read_git_config(cls, key, cwd=None):
+        # FIXME: This should probably use cwd=self.checkout_root.
+        # Pass --get-all for cases where the config has multiple values
+        # Pass the cwd if provided so that we can handle the case of running webkit-patch outside of the working directory.
+        # FIXME: This should use an Executive.
+        return run_command([cls.executable_name, "config", "--get-all", key], error_handler=Executive.ignore_error, cwd=cwd).rstrip('\n')
+
+    @staticmethod
+    def commit_success_regexp():
+        return "^Committed r(?P<svn_revision>\d+)$"
+
+    def discard_local_commits(self):
+        self._run_git(['reset', '--hard', self.remote_branch_ref()])
+
+    def local_commits(self):
+        return self._run_git(['log', '--pretty=oneline', 'HEAD...' + self.remote_branch_ref()]).splitlines()
+
+    def rebase_in_progress(self):
+        return self._filesystem.exists(self.absolute_path(self._filesystem.join('.git', 'rebase-apply')))
+
+    def working_directory_is_clean(self):
+        return self._run_git(['diff', 'HEAD', '--no-renames', '--name-only']) == ""
+
+    def clean_working_directory(self):
+        # Could run git clean here too, but that wouldn't match working_directory_is_clean
+        self._run_git(['reset', '--hard', 'HEAD'])
+        # Aborting rebase even though this does not match working_directory_is_clean
+        if self.rebase_in_progress():
+            self._run_git(['rebase', '--abort'])
+
+    def status_command(self):
+        # git status returns non-zero when there are changes, so we use git diff name --name-status HEAD instead.
+        # No file contents printed, thus utf-8 autodecoding in self.run is fine.
+        return [self.executable_name, "diff", "--name-status", "--no-renames", "HEAD"]
+
+    def _status_regexp(self, expected_types):
+        return '^(?P<status>[%s])\t(?P<filename>.+)$' % expected_types
+
+    def add_list(self, paths, return_exit_code=False):
+        return self._run_git(["add"] + paths, return_exit_code=return_exit_code)
+
+    def delete_list(self, paths):
+        return self._run_git(["rm", "-f"] + paths)
+
+    def exists(self, path):
+        return_code = self._run_git(["show", "HEAD:%s" % path], return_exit_code=True, decode_output=False)
+        return return_code != self.ERROR_FILE_IS_MISSING
+
+    def _branch_from_ref(self, ref):
+        return ref.replace('refs/heads/', '')
+
+    def _current_branch(self):
+        return self._branch_from_ref(self._run_git(['symbolic-ref', '-q', 'HEAD']).strip())
+
+    def _upstream_branch(self):
+        current_branch = self._current_branch()
+        return self._branch_from_ref(self.read_git_config('branch.%s.merge' % current_branch, cwd=self.checkout_root).strip())
+
+    def merge_base(self, git_commit):
+        if git_commit:
+            # Rewrite UPSTREAM to the upstream branch
+            if 'UPSTREAM' in git_commit:
+                upstream = self._upstream_branch()
+                if not upstream:
+                    raise ScriptError(message='No upstream/tracking branch set.')
+                git_commit = git_commit.replace('UPSTREAM', upstream)
+
+            # Special-case <refname>.. to include working copy changes, e.g., 'HEAD....' shows only the diffs from HEAD.
+            if git_commit.endswith('....'):
+                return git_commit[:-4]
+
+            if '..' not in git_commit:
+                git_commit = git_commit + "^.." + git_commit
+            return git_commit
+
+        return self.remote_merge_base()
+
+    def changed_files(self, git_commit=None):
+        # FIXME: --diff-filter could be used to avoid the "extract_filenames" step.
+        status_command = [self.executable_name, 'diff', '-r', '--name-status', "--no-renames", "--no-ext-diff", "--full-index", self.merge_base(git_commit)]
+        # FIXME: I'm not sure we're returning the same set of files that SVN.changed_files is.
+        # Added (A), Copied (C), Deleted (D), Modified (M), Renamed (R)
+        return self.run_status_and_extract_filenames(status_command, self._status_regexp("ADM"))
+
+    def _changes_files_for_commit(self, git_commit):
+        # --pretty="format:" makes git show not print the commit log header,
+        changed_files = self._run_git(["show", "--pretty=format:", "--name-only", git_commit]).splitlines()
+        # instead it just prints a blank line at the top, so we skip the blank line:
+        return changed_files[1:]
+
+    def changed_files_for_revision(self, revision):
+        commit_id = self.git_commit_from_svn_revision(revision)
+        return self._changes_files_for_commit(commit_id)
+
+    def revisions_changing_file(self, path, limit=5):
+        # raise a script error if path does not exists to match the behavior of  the svn implementation.
+        if not self._filesystem.exists(path):
+            raise ScriptError(message="Path %s does not exist." % path)
+
+        # git rev-list head --remove-empty --limit=5 -- path would be equivalent.
+        commit_ids = self._run_git(["log", "--remove-empty", "--pretty=format:%H", "-%s" % limit, "--", path]).splitlines()
+        return filter(lambda revision: revision, map(self.svn_revision_from_git_commit, commit_ids))
+
+    def conflicted_files(self):
+        # We do not need to pass decode_output for this diff command
+        # as we're passing --name-status which does not output any data.
+        status_command = [self.executable_name, 'diff', '--name-status', '--no-renames', '--diff-filter=U']
+        return self.run_status_and_extract_filenames(status_command, self._status_regexp("U"))
+
+    def added_files(self):
+        return self.run_status_and_extract_filenames(self.status_command(), self._status_regexp("A"))
+
+    def deleted_files(self):
+        return self.run_status_and_extract_filenames(self.status_command(), self._status_regexp("D"))
+
+    @staticmethod
+    def supports_local_commits():
+        return True
+
+    def display_name(self):
+        return "git"
+
+    def svn_revision(self, path):
+        _log.debug('Running git.head_svn_revision... (Temporary logging message)')
+        git_log = self._run_git(['log', '-25', path])
+        match = re.search("^\s*git-svn-id:.*@(?P<svn_revision>\d+)\ ", git_log, re.MULTILINE)
+        if not match:
+            return ""
+        return str(match.group('svn_revision'))
+
+    def prepend_svn_revision(self, diff):
+        revision = self.head_svn_revision()
+        if not revision:
+            return diff
+
+        return "Subversion Revision: " + revision + '\n' + diff
+
+    def create_patch(self, git_commit=None, changed_files=None):
+        """Returns a byte array (str()) representing the patch file.
+        Patch files are effectively binary since they may contain
+        files of multiple different encodings."""
+
+        # Put code changes at the top of the patch and layout tests
+        # at the bottom, this makes for easier reviewing.
+        config_path = self._filesystem.dirname(self._filesystem.path_to_module('webkitpy.common.config'))
+        order_file = self._filesystem.join(config_path, 'orderfile')
+        order = ""
+        if self._filesystem.exists(order_file):
+            order = "-O%s" % order_file
+
+        command = [self.executable_name, 'diff', '--binary', '--no-color', "--no-ext-diff", "--full-index", "--no-renames", order, self.merge_base(git_commit), "--"]
+        if changed_files:
+            command += changed_files
+        return self.prepend_svn_revision(self.run(command, decode_output=False, cwd=self.checkout_root))
+
+    def _run_git_svn_find_rev(self, arg):
+        # git svn find-rev always exits 0, even when the revision or commit is not found.
+        return self._run_git(['svn', 'find-rev', arg]).rstrip()
+
+    def _string_to_int_or_none(self, string):
+        try:
+            return int(string)
+        except ValueError, e:
+            return None
+
+    @memoized
+    def git_commit_from_svn_revision(self, svn_revision):
+        git_commit = self._run_git_svn_find_rev('r%s' % svn_revision)
+        if not git_commit:
+            # FIXME: Alternatively we could offer to update the checkout? Or return None?
+            raise ScriptError(message='Failed to find git commit for revision %s, your checkout likely needs an update.' % svn_revision)
+        return git_commit
+
+    @memoized
+    def svn_revision_from_git_commit(self, git_commit):
+        svn_revision = self._run_git_svn_find_rev(git_commit)
+        return self._string_to_int_or_none(svn_revision)
+
+    def contents_at_revision(self, path, revision):
+        """Returns a byte array (str()) containing the contents
+        of path @ revision in the repository."""
+        return self._run_git(["show", "%s:%s" % (self.git_commit_from_svn_revision(revision), path)], decode_output=False)
+
+    def diff_for_revision(self, revision):
+        git_commit = self.git_commit_from_svn_revision(revision)
+        return self.create_patch(git_commit)
+
+    def diff_for_file(self, path, log=None):
+        return self._run_git(['diff', 'HEAD', '--no-renames', '--', path])
+
+    def show_head(self, path):
+        return self._run_git(['show', 'HEAD:' + self.to_object_name(path)], decode_output=False)
+
+    def committer_email_for_revision(self, revision):
+        git_commit = self.git_commit_from_svn_revision(revision)
+        committer_email = self._run_git(["log", "-1", "--pretty=format:%ce", git_commit])
+        # Git adds an extra @repository_hash to the end of every committer email, remove it:
+        return committer_email.rsplit("@", 1)[0]
+
+    def apply_reverse_diff(self, revision):
+        # Assume the revision is an svn revision.
+        git_commit = self.git_commit_from_svn_revision(revision)
+        # I think this will always fail due to ChangeLogs.
+        self._run_git(['revert', '--no-commit', git_commit], error_handler=Executive.ignore_error)
+
+    def revert_files(self, file_paths):
+        self._run_git(['checkout', 'HEAD'] + file_paths)
+
+    def _assert_can_squash(self, working_directory_is_clean):
+        squash = Git.read_git_config('webkit-patch.commit-should-always-squash', cwd=self.checkout_root)
+        should_squash = squash and squash.lower() == "true"
+
+        if not should_squash:
+            # Only warn if there are actually multiple commits to squash.
+            num_local_commits = len(self.local_commits())
+            if num_local_commits > 1 or (num_local_commits > 0 and not working_directory_is_clean):
+                raise AmbiguousCommitError(num_local_commits, working_directory_is_clean)
+
+    def commit_with_message(self, message, username=None, password=None, git_commit=None, force_squash=False, changed_files=None):
+        # Username is ignored during Git commits.
+        working_directory_is_clean = self.working_directory_is_clean()
+
+        if git_commit:
+            # Special-case HEAD.. to mean working-copy changes only.
+            if git_commit.upper() == 'HEAD..':
+                if working_directory_is_clean:
+                    raise ScriptError(message="The working copy is not modified. --git-commit=HEAD.. only commits working copy changes.")
+                self.commit_locally_with_message(message)
+                return self._commit_on_branch(message, 'HEAD', username=username, password=password)
+
+            # Need working directory changes to be committed so we can checkout the merge branch.
+            if not working_directory_is_clean:
+                # FIXME: webkit-patch land will modify the ChangeLogs to correct the reviewer.
+                # That will modify the working-copy and cause us to hit this error.
+                # The ChangeLog modification could be made to modify the existing local commit.
+                raise ScriptError(message="Working copy is modified. Cannot commit individual git_commits.")
+            return self._commit_on_branch(message, git_commit, username=username, password=password)
+
+        if not force_squash:
+            self._assert_can_squash(working_directory_is_clean)
+        self._run_git(['reset', '--soft', self.remote_merge_base()])
+        self.commit_locally_with_message(message)
+        return self.push_local_commits_to_server(username=username, password=password)
+
+    def _commit_on_branch(self, message, git_commit, username=None, password=None):
+        branch_name = self._current_branch()
+        commit_ids = self.commit_ids_from_commitish_arguments([git_commit])
+
+        # We want to squash all this branch's commits into one commit with the proper description.
+        # We do this by doing a "merge --squash" into a new commit branch, then dcommitting that.
+        MERGE_BRANCH_NAME = 'webkit-patch-land'
+        self.delete_branch(MERGE_BRANCH_NAME)
+
+        # We might be in a directory that's present in this branch but not in the
+        # trunk.  Move up to the top of the tree so that git commands that expect a
+        # valid CWD won't fail after we check out the merge branch.
+        # FIXME: We should never be using chdir! We can instead pass cwd= to run_command/self.run!
+        self._filesystem.chdir(self.checkout_root)
+
+        # Stuff our change into the merge branch.
+        # We wrap in a try...finally block so if anything goes wrong, we clean up the branches.
+        commit_succeeded = True
+        try:
+            self._run_git(['checkout', '-q', '-b', MERGE_BRANCH_NAME, self.remote_branch_ref()])
+
+            for commit in commit_ids:
+                # We're on a different branch now, so convert "head" to the branch name.
+                commit = re.sub(r'(?i)head', branch_name, commit)
+                # FIXME: Once changed_files and create_patch are modified to separately handle each
+                # commit in a commit range, commit each cherry pick so they'll get dcommitted separately.
+                self._run_git(['cherry-pick', '--no-commit', commit])
+
+            self._run_git(['commit', '-m', message])
+            output = self.push_local_commits_to_server(username=username, password=password)
+        except Exception, e:
+            log("COMMIT FAILED: " + str(e))
+            output = "Commit failed."
+            commit_succeeded = False
+        finally:
+            # And then swap back to the original branch and clean up.
+            self.clean_working_directory()
+            self._run_git(['checkout', '-q', branch_name])
+            self.delete_branch(MERGE_BRANCH_NAME)
+
+        return output
+
+    def svn_commit_log(self, svn_revision):
+        svn_revision = self.strip_r_from_svn_revision(svn_revision)
+        return self._run_git(['svn', 'log', '-r', svn_revision])
+
+    def last_svn_commit_log(self):
+        return self._run_git(['svn', 'log', '--limit=1'])
+
+    def svn_blame(self, path):
+        return self._run_git(['svn', 'blame', path])
+
+    # Git-specific methods:
+    def _branch_ref_exists(self, branch_ref):
+        return self._run_git(['show-ref', '--quiet', '--verify', branch_ref], return_exit_code=True) == 0
+
+    def delete_branch(self, branch_name):
+        if self._branch_ref_exists('refs/heads/' + branch_name):
+            self._run_git(['branch', '-D', branch_name])
+
+    def remote_merge_base(self):
+        return self._run_git(['merge-base', self.remote_branch_ref(), 'HEAD']).strip()
+
+    def remote_branch_ref(self):
+        # Use references so that we can avoid collisions, e.g. we don't want to operate on refs/heads/trunk if it exists.
+        remote_branch_refs = Git.read_git_config('svn-remote.svn.fetch', cwd=self.checkout_root)
+        if not remote_branch_refs:
+            remote_master_ref = 'refs/remotes/origin/master'
+            if not self._branch_ref_exists(remote_master_ref):
+                raise ScriptError(message="Can't find a branch to diff against. svn-remote.svn.fetch is not in the git config and %s does not exist" % remote_master_ref)
+            return remote_master_ref
+
+        # FIXME: What's the right behavior when there are multiple svn-remotes listed?
+        # For now, just use the first one.
+        first_remote_branch_ref = remote_branch_refs.split('\n')[0]
+        return first_remote_branch_ref.split(':')[1]
+
+    def commit_locally_with_message(self, message):
+        self._run_git(['commit', '--all', '-F', '-'], input=message)
+
+    def push_local_commits_to_server(self, username=None, password=None):
+        dcommit_command = ['svn', 'dcommit']
+        if (not username or not password) and not self.has_authorization_for_realm(SVN.svn_server_realm):
+            raise AuthenticationError(SVN.svn_server_host, prompt_for_password=True)
+        if username:
+            dcommit_command.extend(["--username", username])
+        output = self._run_git(dcommit_command, error_handler=commit_error_handler, input=password)
+        return output
+
+    # This function supports the following argument formats:
+    # no args : rev-list trunk..HEAD
+    # A..B    : rev-list A..B
+    # A...B   : error!
+    # A B     : [A, B]  (different from git diff, which would use "rev-list A..B")
+    def commit_ids_from_commitish_arguments(self, args):
+        if not len(args):
+            args.append('%s..HEAD' % self.remote_branch_ref())
+
+        commit_ids = []
+        for commitish in args:
+            if '...' in commitish:
+                raise ScriptError(message="'...' is not supported (found in '%s'). Did you mean '..'?" % commitish)
+            elif '..' in commitish:
+                commit_ids += reversed(self._run_git(['rev-list', commitish]).splitlines())
+            else:
+                # Turn single commits or branch or tag names into commit ids.
+                commit_ids += self._run_git(['rev-parse', '--revs-only', commitish]).splitlines()
+        return commit_ids
+
+    def commit_message_for_local_commit(self, commit_id):
+        commit_lines = self._run_git(['cat-file', 'commit', commit_id]).splitlines()
+
+        # Skip the git headers.
+        first_line_after_headers = 0
+        for line in commit_lines:
+            first_line_after_headers += 1
+            if line == "":
+                break
+        return CommitMessage(commit_lines[first_line_after_headers:])
+
+    def files_changed_summary_for_commit(self, commit_id):
+        return self._run_git(['diff-tree', '--shortstat', '--no-renames', '--no-commit-id', commit_id])
diff --git a/Tools/Scripts/webkitpy/common/checkout/scm/scm.py b/Tools/Scripts/webkitpy/common/checkout/scm/scm.py
new file mode 100644
index 0000000..815e750
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/checkout/scm/scm.py
@@ -0,0 +1,247 @@
+# Copyright (c) 2009, Google Inc. All rights reserved.
+# Copyright (c) 2009 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# Python module for interacting with an SCM system (like SVN or Git)
+
+import logging
+import re
+
+from webkitpy.common.system.deprecated_logging import error, log
+from webkitpy.common.system.executive import Executive, ScriptError
+from webkitpy.common.system.filesystem import FileSystem
+
+
+class CheckoutNeedsUpdate(ScriptError):
+    def __init__(self, script_args, exit_code, output, cwd):
+        ScriptError.__init__(self, script_args=script_args, exit_code=exit_code, output=output, cwd=cwd)
+
+
+# FIXME: Should be moved onto SCM
+def commit_error_handler(error):
+    if re.search("resource out of date", error.output):
+        raise CheckoutNeedsUpdate(script_args=error.script_args, exit_code=error.exit_code, output=error.output, cwd=error.cwd)
+    Executive.default_error_handler(error)
+
+
+class AuthenticationError(Exception):
+    def __init__(self, server_host, prompt_for_password=False):
+        self.server_host = server_host
+        self.prompt_for_password = prompt_for_password
+
+
+
+# SCM methods are expected to return paths relative to self.checkout_root.
+class SCM:
+    def __init__(self, cwd, executive=None, filesystem=None):
+        self.cwd = cwd
+        self._executive = executive or Executive()
+        self._filesystem = filesystem or FileSystem()
+        self.checkout_root = self.find_checkout_root(self.cwd)
+
+    # A wrapper used by subclasses to create processes.
+    def run(self, args, cwd=None, input=None, error_handler=None, return_exit_code=False, return_stderr=True, decode_output=True):
+        # FIXME: We should set cwd appropriately.
+        return self._executive.run_command(args,
+                           cwd=cwd,
+                           input=input,
+                           error_handler=error_handler,
+                           return_exit_code=return_exit_code,
+                           return_stderr=return_stderr,
+                           decode_output=decode_output)
+
+    # SCM always returns repository relative path, but sometimes we need
+    # absolute paths to pass to rm, etc.
+    def absolute_path(self, repository_relative_path):
+        return self._filesystem.join(self.checkout_root, repository_relative_path)
+
+    # FIXME: This belongs in Checkout, not SCM.
+    def scripts_directory(self):
+        return self._filesystem.join(self.checkout_root, "Tools", "Scripts")
+
+    # FIXME: This belongs in Checkout, not SCM.
+    def script_path(self, script_name):
+        return self._filesystem.join(self.scripts_directory(), script_name)
+
+    def ensure_clean_working_directory(self, force_clean):
+        if self.working_directory_is_clean():
+            return
+        if not force_clean:
+            print self.run(self.status_command(), error_handler=Executive.ignore_error, cwd=self.checkout_root)
+            raise ScriptError(message="Working directory has modifications, pass --force-clean or --no-clean to continue.")
+        log("Cleaning working directory")
+        self.clean_working_directory()
+
+    def ensure_no_local_commits(self, force):
+        if not self.supports_local_commits():
+            return
+        commits = self.local_commits()
+        if not len(commits):
+            return
+        if not force:
+            error("Working directory has local commits, pass --force-clean to continue.")
+        self.discard_local_commits()
+
+    def run_status_and_extract_filenames(self, status_command, status_regexp):
+        filenames = []
+        # We run with cwd=self.checkout_root so that returned-paths are root-relative.
+        for line in self.run(status_command, cwd=self.checkout_root).splitlines():
+            match = re.search(status_regexp, line)
+            if not match:
+                continue
+            # status = match.group('status')
+            filename = match.group('filename')
+            filenames.append(filename)
+        return filenames
+
+    def strip_r_from_svn_revision(self, svn_revision):
+        match = re.match("^r(?P<svn_revision>\d+)", unicode(svn_revision))
+        if (match):
+            return match.group('svn_revision')
+        return svn_revision
+
+    def svn_revision_from_commit_text(self, commit_text):
+        match = re.search(self.commit_success_regexp(), commit_text, re.MULTILINE)
+        return match.group('svn_revision')
+
+    @staticmethod
+    def _subclass_must_implement():
+        raise NotImplementedError("subclasses must implement")
+
+    @classmethod
+    def in_working_directory(cls, path, executive=None):
+        SCM._subclass_must_implement()
+
+    def find_checkout_root(path):
+        SCM._subclass_must_implement()
+
+    @staticmethod
+    def commit_success_regexp():
+        SCM._subclass_must_implement()
+
+    def working_directory_is_clean(self):
+        self._subclass_must_implement()
+
+    def clean_working_directory(self):
+        self._subclass_must_implement()
+
+    def status_command(self):
+        self._subclass_must_implement()
+
+    def add(self, path, return_exit_code=False):
+        self.add_list([path], return_exit_code)
+
+    def add_list(self, paths, return_exit_code=False):
+        self._subclass_must_implement()
+
+    def delete(self, path):
+        self.delete_list([path])
+
+    def delete_list(self, paths):
+        self._subclass_must_implement()
+
+    def exists(self, path):
+        self._subclass_must_implement()
+
+    def changed_files(self, git_commit=None):
+        self._subclass_must_implement()
+
+    def changed_files_for_revision(self, revision):
+        self._subclass_must_implement()
+
+    def revisions_changing_file(self, path, limit=5):
+        self._subclass_must_implement()
+
+    def added_files(self):
+        self._subclass_must_implement()
+
+    def conflicted_files(self):
+        self._subclass_must_implement()
+
+    def display_name(self):
+        self._subclass_must_implement()
+
+    def head_svn_revision(self):
+        return self.svn_revision(self.checkout_root)
+
+    def svn_revision(self, path):
+        self._subclass_must_implement()
+
+    def create_patch(self, git_commit=None, changed_files=None):
+        self._subclass_must_implement()
+
+    def committer_email_for_revision(self, revision):
+        self._subclass_must_implement()
+
+    def contents_at_revision(self, path, revision):
+        self._subclass_must_implement()
+
+    def diff_for_revision(self, revision):
+        self._subclass_must_implement()
+
+    def diff_for_file(self, path, log=None):
+        self._subclass_must_implement()
+
+    def show_head(self, path):
+        self._subclass_must_implement()
+
+    def apply_reverse_diff(self, revision):
+        self._subclass_must_implement()
+
+    def revert_files(self, file_paths):
+        self._subclass_must_implement()
+
+    def commit_with_message(self, message, username=None, password=None, git_commit=None, force_squash=False, changed_files=None):
+        self._subclass_must_implement()
+
+    def svn_commit_log(self, svn_revision):
+        self._subclass_must_implement()
+
+    def last_svn_commit_log(self):
+        self._subclass_must_implement()
+
+    def svn_blame(self, path):
+        self._subclass_must_implement()
+
+    # Subclasses must indicate if they support local commits,
+    # but the SCM baseclass will only call local_commits methods when this is true.
+    @staticmethod
+    def supports_local_commits():
+        SCM._subclass_must_implement()
+
+    def remote_merge_base():
+        SCM._subclass_must_implement()
+
+    def commit_locally_with_message(self, message):
+        error("Your source control manager does not support local commits.")
+
+    def discard_local_commits(self):
+        pass
+
+    def local_commits(self):
+        return []
diff --git a/Tools/Scripts/webkitpy/common/checkout/scm/scm_mock.py b/Tools/Scripts/webkitpy/common/checkout/scm/scm_mock.py
new file mode 100644
index 0000000..9dd01e8
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/checkout/scm/scm_mock.py
@@ -0,0 +1,124 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#    * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from webkitpy.common.system.filesystem_mock import MockFileSystem
+from webkitpy.common.system.executive_mock import MockExecutive
+
+
+class MockSCM(object):
+    def __init__(self, filesystem=None, executive=None):
+        self.checkout_root = "/mock-checkout"
+        self.added_paths = set()
+        self._filesystem = filesystem or MockFileSystem()
+        self._executive = executive or MockExecutive()
+
+    def add(self, destination_path, return_exit_code=False):
+        self.add_list([destination_path], return_exit_code)
+
+    def add_list(self, destination_paths, return_exit_code=False):
+        self.added_paths.update(set(destination_paths))
+        if return_exit_code:
+            return 0
+
+    def ensure_clean_working_directory(self, force_clean):
+        pass
+
+    def supports_local_commits(self):
+        return True
+
+    def ensure_no_local_commits(self, force_clean):
+        pass
+
+    def exists(self, path):
+        # TestRealMain.test_real_main (and several other rebaseline tests) are sensitive to this return value.
+        # We should make those tests more robust, but for now we just return True always (since no test needs otherwise).
+        return True
+
+    def absolute_path(self, *comps):
+        return self._filesystem.join(self.checkout_root, *comps)
+
+    def changed_files(self, git_commit=None):
+        return ["MockFile1"]
+
+    def changed_files_for_revision(self, revision):
+        return ["MockFile1"]
+
+    def head_svn_revision(self):
+        return '1234'
+
+    def svn_revision(self, path):
+        return '5678'
+
+    def create_patch(self, git_commit, changed_files=None):
+        return "Patch1"
+
+    def commit_ids_from_commitish_arguments(self, args):
+        return ["Commitish1", "Commitish2"]
+
+    def committer_email_for_revision(self, revision):
+        return "mock@webkit.org"
+
+    def commit_locally_with_message(self, message):
+        pass
+
+    def commit_with_message(self, message, username=None, password=None, git_commit=None, force_squash=False, changed_files=None):
+        pass
+
+    def merge_base(self, git_commit):
+        return None
+
+    def commit_message_for_local_commit(self, commit_id):
+        if commit_id == "Commitish1":
+            return CommitMessage("CommitMessage1\n" \
+                "https://bugs.example.org/show_bug.cgi?id=50000\n")
+        if commit_id == "Commitish2":
+            return CommitMessage("CommitMessage2\n" \
+                "https://bugs.example.org/show_bug.cgi?id=50001\n")
+        raise Exception("Bogus commit_id in commit_message_for_local_commit.")
+
+    def diff_for_file(self, path, log=None):
+        return path + '-diff'
+
+    def diff_for_revision(self, revision):
+        return "DiffForRevision%s\nhttp://bugs.webkit.org/show_bug.cgi?id=12345" % revision
+
+    def show_head(self, path):
+        return path
+
+    def svn_revision_from_commit_text(self, commit_text):
+        return "49824"
+
+    def delete(self, path):
+        return self.delete_list([path])
+
+    def delete_list(self, paths):
+        if not self._filesystem:
+            return
+        for path in paths:
+            if self._filesystem.exists(path):
+                self._filesystem.remove(path)
diff --git a/Tools/Scripts/webkitpy/common/checkout/scm/scm_unittest.py b/Tools/Scripts/webkitpy/common/checkout/scm/scm_unittest.py
new file mode 100644
index 0000000..802fe2c
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/checkout/scm/scm_unittest.py
@@ -0,0 +1,1612 @@
+# Copyright (C) 2009 Google Inc. All rights reserved.
+# Copyright (C) 2009 Apple Inc. All rights reserved.
+# Copyright (C) 2011 Daniel Bates (dbates@intudata.com). All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#    * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import atexit
+import base64
+import codecs
+import getpass
+import os
+import os.path
+import re
+import stat
+import sys
+import subprocess
+import tempfile
+import time
+import unittest
+import urllib
+import shutil
+
+from datetime import date
+from webkitpy.common.checkout.checkout import Checkout
+from webkitpy.common.config.committers import Committer  # FIXME: This should not be needed
+from webkitpy.common.net.bugzilla import Attachment # FIXME: This should not be needed
+from webkitpy.common.system.executive import Executive, ScriptError
+from webkitpy.common.system.outputcapture import OutputCapture
+from webkitpy.common.system.executive_mock import MockExecutive
+
+from .git import Git, AmbiguousCommitError
+from .detection import detect_scm_system
+from .scm import SCM, CheckoutNeedsUpdate, commit_error_handler, AuthenticationError
+from .svn import SVN
+
+# We cache the mock SVN repo so that we don't create it again for each call to an SVNTest or GitTest test_ method.
+# We store it in a global variable so that we can delete this cached repo on exit(3).
+# FIXME: Remove this once we migrate to Python 2.7. Unittest in Python 2.7 supports module-specific setup and teardown functions.
+cached_svn_repo_path = None
+
+
+def remove_dir(path):
+    # Change directory to / to ensure that we aren't in the directory we want to delete.
+    os.chdir('/')
+    shutil.rmtree(path)
+
+
+# FIXME: Remove this once we migrate to Python 2.7. Unittest in Python 2.7 supports module-specific setup and teardown functions.
+@atexit.register
+def delete_cached_mock_repo_at_exit():
+    if cached_svn_repo_path:
+        remove_dir(cached_svn_repo_path)
+
+# Eventually we will want to write tests which work for both scms. (like update_webkit, changed_files, etc.)
+# Perhaps through some SCMTest base-class which both SVNTest and GitTest inherit from.
+
+def run_command(*args, **kwargs):
+    # FIXME: This should not be a global static.
+    # New code should use Executive.run_command directly instead
+    return Executive().run_command(*args, **kwargs)
+
+
+# FIXME: This should be unified into one of the executive.py commands!
+# Callers could use run_and_throw_if_fail(args, cwd=cwd, quiet=True)
+def run_silent(args, cwd=None):
+    # Note: Not thread safe: http://bugs.python.org/issue2320
+    process = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=cwd)
+    process.communicate() # ignore output
+    exit_code = process.wait()
+    if exit_code:
+        raise ScriptError('Failed to run "%s"  exit_code: %d  cwd: %s' % (args, exit_code, cwd))
+
+
+def write_into_file_at_path(file_path, contents, encoding="utf-8"):
+    if encoding:
+        with codecs.open(file_path, "w", encoding) as file:
+            file.write(contents)
+    else:
+        with open(file_path, "w") as file:
+            file.write(contents)
+
+
+def read_from_path(file_path, encoding="utf-8"):
+    with codecs.open(file_path, "r", encoding) as file:
+        return file.read()
+
+
+def _make_diff(command, *args):
+    # We use this wrapper to disable output decoding. diffs should be treated as
+    # binary files since they may include text files of multiple differnet encodings.
+    # FIXME: This should use an Executive.
+    return run_command([command, "diff"] + list(args), decode_output=False)
+
+
+def _svn_diff(*args):
+    return _make_diff("svn", *args)
+
+
+def _git_diff(*args):
+    return _make_diff("git", *args)
+
+
+# Exists to share svn repository creation code between the git and svn tests
+class SVNTestRepository:
+    @classmethod
+    def _svn_add(cls, path):
+        run_command(["svn", "add", path])
+
+    @classmethod
+    def _svn_commit(cls, message):
+        run_command(["svn", "commit", "--quiet", "--message", message])
+
+    @classmethod
+    def _setup_test_commits(cls, svn_repo_url):
+
+        svn_checkout_path = tempfile.mkdtemp(suffix="svn_test_checkout")
+        run_command(['svn', 'checkout', '--quiet', svn_repo_url, svn_checkout_path])
+
+        # Add some test commits
+        os.chdir(svn_checkout_path)
+
+        write_into_file_at_path("test_file", "test1")
+        cls._svn_add("test_file")
+        cls._svn_commit("initial commit")
+
+        write_into_file_at_path("test_file", "test1test2")
+        # This used to be the last commit, but doing so broke
+        # GitTest.test_apply_git_patch which use the inverse diff of the last commit.
+        # svn-apply fails to remove directories in Git, see:
+        # https://bugs.webkit.org/show_bug.cgi?id=34871
+        os.mkdir("test_dir")
+        # Slash should always be the right path separator since we use cygwin on Windows.
+        test_file3_path = "test_dir/test_file3"
+        write_into_file_at_path(test_file3_path, "third file")
+        cls._svn_add("test_dir")
+        cls._svn_commit("second commit")
+
+        write_into_file_at_path("test_file", "test1test2test3\n")
+        write_into_file_at_path("test_file2", "second file")
+        cls._svn_add("test_file2")
+        cls._svn_commit("third commit")
+
+        # This 4th commit is used to make sure that our patch file handling
+        # code correctly treats patches as binary and does not attempt to
+        # decode them assuming they're utf-8.
+        write_into_file_at_path("test_file", u"latin1 test: \u00A0\n", "latin1")
+        write_into_file_at_path("test_file2", u"utf-8 test: \u00A0\n", "utf-8")
+        cls._svn_commit("fourth commit")
+
+        # svn does not seem to update after commit as I would expect.
+        run_command(['svn', 'update'])
+        remove_dir(svn_checkout_path)
+
+    # This is a hot function since it's invoked by unittest before calling each test_ method in SVNTest and
+    # GitTest. We create a mock SVN repo once and then perform an SVN checkout from a filesystem copy of
+    # it since it's expensive to create the mock repo.
+    @classmethod
+    def setup(cls, test_object):
+        global cached_svn_repo_path
+        if not cached_svn_repo_path:
+            cached_svn_repo_path = cls._setup_mock_repo()
+
+        test_object.temp_directory = tempfile.mkdtemp(suffix="svn_test")
+        test_object.svn_repo_path = os.path.join(test_object.temp_directory, "repo")
+        test_object.svn_repo_url = "file://%s" % test_object.svn_repo_path
+        test_object.svn_checkout_path = os.path.join(test_object.temp_directory, "checkout")
+        shutil.copytree(cached_svn_repo_path, test_object.svn_repo_path)
+        run_command(['svn', 'checkout', '--quiet', test_object.svn_repo_url + "/trunk", test_object.svn_checkout_path])
+
+    @classmethod
+    def _setup_mock_repo(cls):
+        # Create an test SVN repository
+        svn_repo_path = tempfile.mkdtemp(suffix="svn_test_repo")
+        svn_repo_url = "file://%s" % svn_repo_path  # Not sure this will work on windows
+        # git svn complains if we don't pass --pre-1.5-compatible, not sure why:
+        # Expected FS format '2'; found format '3' at /usr/local/libexec/git-core//git-svn line 1477
+        run_command(['svnadmin', 'create', '--pre-1.5-compatible', svn_repo_path])
+
+        # Create a test svn checkout
+        svn_checkout_path = tempfile.mkdtemp(suffix="svn_test_checkout")
+        run_command(['svn', 'checkout', '--quiet', svn_repo_url, svn_checkout_path])
+
+        # Create and checkout a trunk dir to match the standard svn configuration to match git-svn's expectations
+        os.chdir(svn_checkout_path)
+        os.mkdir('trunk')
+        cls._svn_add('trunk')
+        # We can add tags and branches as well if we ever need to test those.
+        cls._svn_commit('add trunk')
+
+        # Change directory out of the svn checkout so we can delete the checkout directory.
+        remove_dir(svn_checkout_path)
+
+        cls._setup_test_commits(svn_repo_url + "/trunk")
+        return svn_repo_path
+
+    @classmethod
+    def tear_down(cls, test_object):
+        remove_dir(test_object.temp_directory)
+
+        # Now that we've deleted the checkout paths, cwddir may be invalid
+        # Change back to a valid directory so that later calls to os.getcwd() do not fail.
+        if os.path.isabs(__file__):
+            path = os.path.dirname(__file__)
+        else:
+            path = sys.path[0]
+        os.chdir(detect_scm_system(path).checkout_root)
+
+
+# For testing the SCM baseclass directly.
+class SCMClassTests(unittest.TestCase):
+    def setUp(self):
+        self.dev_null = open(os.devnull, "w") # Used to make our Popen calls quiet.
+
+    def tearDown(self):
+        self.dev_null.close()
+
+    def test_run_command_with_pipe(self):
+        input_process = subprocess.Popen(['echo', 'foo\nbar'], stdout=subprocess.PIPE, stderr=self.dev_null)
+        self.assertEqual(run_command(['grep', 'bar'], input=input_process.stdout), "bar\n")
+
+        # Test the non-pipe case too:
+        self.assertEqual(run_command(['grep', 'bar'], input="foo\nbar"), "bar\n")
+
+        command_returns_non_zero = ['/bin/sh', '--invalid-option']
+        # Test when the input pipe process fails.
+        input_process = subprocess.Popen(command_returns_non_zero, stdout=subprocess.PIPE, stderr=self.dev_null)
+        self.assertTrue(input_process.poll() != 0)
+        self.assertRaises(ScriptError, run_command, ['grep', 'bar'], input=input_process.stdout)
+
+        # Test when the run_command process fails.
+        input_process = subprocess.Popen(['echo', 'foo\nbar'], stdout=subprocess.PIPE, stderr=self.dev_null) # grep shows usage and calls exit(2) when called w/o arguments.
+        self.assertRaises(ScriptError, run_command, command_returns_non_zero, input=input_process.stdout)
+
+    def test_error_handlers(self):
+        git_failure_message="Merge conflict during commit: Your file or directory 'WebCore/ChangeLog' is probably out-of-date: resource out of date; try updating at /usr/local/libexec/git-core//git-svn line 469"
+        svn_failure_message="""svn: Commit failed (details follow):
+svn: File or directory 'ChangeLog' is out of date; try updating
+svn: resource out of date; try updating
+"""
+        command_does_not_exist = ['does_not_exist', 'invalid_option']
+        self.assertRaises(OSError, run_command, command_does_not_exist)
+        self.assertRaises(OSError, run_command, command_does_not_exist, error_handler=Executive.ignore_error)
+
+        command_returns_non_zero = ['/bin/sh', '--invalid-option']
+        self.assertRaises(ScriptError, run_command, command_returns_non_zero)
+        # Check if returns error text:
+        self.assertTrue(run_command(command_returns_non_zero, error_handler=Executive.ignore_error))
+
+        self.assertRaises(CheckoutNeedsUpdate, commit_error_handler, ScriptError(output=git_failure_message))
+        self.assertRaises(CheckoutNeedsUpdate, commit_error_handler, ScriptError(output=svn_failure_message))
+        self.assertRaises(ScriptError, commit_error_handler, ScriptError(output='blah blah blah'))
+
+
+# GitTest and SVNTest inherit from this so any test_ methods here will be run once for this class and then once for each subclass.
+class SCMTest(unittest.TestCase):
+    def _create_patch(self, patch_contents):
+        # FIXME: This code is brittle if the Attachment API changes.
+        attachment = Attachment({"bug_id": 12345}, None)
+        attachment.contents = lambda: patch_contents
+
+        joe_cool = Committer("Joe Cool", "joe@cool.com")
+        attachment.reviewer = lambda: joe_cool
+
+        return attachment
+
+    def _setup_webkittools_scripts_symlink(self, local_scm):
+        webkit_scm = detect_scm_system(os.path.dirname(os.path.abspath(__file__)))
+        webkit_scripts_directory = webkit_scm.scripts_directory()
+        local_scripts_directory = local_scm.scripts_directory()
+        os.mkdir(os.path.dirname(local_scripts_directory))
+        os.symlink(webkit_scripts_directory, local_scripts_directory)
+
+    # Tests which both GitTest and SVNTest should run.
+    # FIXME: There must be a simpler way to add these w/o adding a wrapper method to both subclasses
+
+    def _shared_test_changed_files(self):
+        write_into_file_at_path("test_file", "changed content")
+        self.assertEqual(self.scm.changed_files(), ["test_file"])
+        write_into_file_at_path("test_dir/test_file3", "new stuff")
+        self.assertEqual(self.scm.changed_files(), ["test_dir/test_file3", "test_file"])
+        old_cwd = os.getcwd()
+        os.chdir("test_dir")
+        # Validate that changed_files does not change with our cwd, see bug 37015.
+        self.assertEqual(self.scm.changed_files(), ["test_dir/test_file3", "test_file"])
+        os.chdir(old_cwd)
+
+    def _shared_test_added_files(self):
+        write_into_file_at_path("test_file", "changed content")
+        self.assertEqual(self.scm.added_files(), [])
+
+        write_into_file_at_path("added_file", "new stuff")
+        self.scm.add("added_file")
+
+        write_into_file_at_path("added_file3", "more new stuff")
+        write_into_file_at_path("added_file4", "more new stuff")
+        self.scm.add_list(["added_file3", "added_file4"])
+
+        os.mkdir("added_dir")
+        write_into_file_at_path("added_dir/added_file2", "new stuff")
+        self.scm.add("added_dir")
+
+        # SVN reports directory changes, Git does not.
+        added_files = self.scm.added_files()
+        if "added_dir" in added_files:
+            added_files.remove("added_dir")
+        self.assertEqual(added_files, ["added_dir/added_file2", "added_file", "added_file3", "added_file4"])
+
+        # Test also to make sure clean_working_directory removes added files
+        self.scm.clean_working_directory()
+        self.assertEqual(self.scm.added_files(), [])
+        self.assertFalse(os.path.exists("added_file"))
+        self.assertFalse(os.path.exists("added_file3"))
+        self.assertFalse(os.path.exists("added_file4"))
+        self.assertFalse(os.path.exists("added_dir"))
+
+    def _shared_test_changed_files_for_revision(self):
+        # SVN reports directory changes, Git does not.
+        changed_files = self.scm.changed_files_for_revision(3)
+        if "test_dir" in changed_files:
+            changed_files.remove("test_dir")
+        self.assertEqual(changed_files, ["test_dir/test_file3", "test_file"])
+        self.assertEqual(sorted(self.scm.changed_files_for_revision(4)), sorted(["test_file", "test_file2"]))  # Git and SVN return different orders.
+        self.assertEqual(self.scm.changed_files_for_revision(2), ["test_file"])
+
+    def _shared_test_contents_at_revision(self):
+        self.assertEqual(self.scm.contents_at_revision("test_file", 3), "test1test2")
+        self.assertEqual(self.scm.contents_at_revision("test_file", 4), "test1test2test3\n")
+
+        # Verify that contents_at_revision returns a byte array, aka str():
+        self.assertEqual(self.scm.contents_at_revision("test_file", 5), u"latin1 test: \u00A0\n".encode("latin1"))
+        self.assertEqual(self.scm.contents_at_revision("test_file2", 5), u"utf-8 test: \u00A0\n".encode("utf-8"))
+
+        self.assertEqual(self.scm.contents_at_revision("test_file2", 4), "second file")
+        # Files which don't exist:
+        # Currently we raise instead of returning None because detecting the difference between
+        # "file not found" and any other error seems impossible with svn (git seems to expose such through the return code).
+        self.assertRaises(ScriptError, self.scm.contents_at_revision, "test_file2", 2)
+        self.assertRaises(ScriptError, self.scm.contents_at_revision, "does_not_exist", 2)
+
+    def _shared_test_revisions_changing_file(self):
+        self.assertEqual(self.scm.revisions_changing_file("test_file"), [5, 4, 3, 2])
+        self.assertRaises(ScriptError, self.scm.revisions_changing_file, "non_existent_file")
+
+    def _shared_test_committer_email_for_revision(self):
+        self.assertEqual(self.scm.committer_email_for_revision(3), getpass.getuser())  # Committer "email" will be the current user
+
+    def _shared_test_reverse_diff(self):
+        self._setup_webkittools_scripts_symlink(self.scm) # Git's apply_reverse_diff uses resolve-ChangeLogs
+        # Only test the simple case, as any other will end up with conflict markers.
+        self.scm.apply_reverse_diff('5')
+        self.assertEqual(read_from_path('test_file'), "test1test2test3\n")
+
+    def _shared_test_diff_for_revision(self):
+        # Patch formats are slightly different between svn and git, so just regexp for things we know should be there.
+        r3_patch = self.scm.diff_for_revision(4)
+        self.assertTrue(re.search('test3', r3_patch))
+        self.assertFalse(re.search('test4', r3_patch))
+        self.assertTrue(re.search('test2', r3_patch))
+        self.assertTrue(re.search('test2', self.scm.diff_for_revision(3)))
+
+    def _shared_test_svn_apply_git_patch(self):
+        self._setup_webkittools_scripts_symlink(self.scm)
+        git_binary_addition = """diff --git a/fizzbuzz7.gif b/fizzbuzz7.gif
+new file mode 100644
+index 0000000000000000000000000000000000000000..64a9532e7794fcd791f6f12157406d90
+60151690
+GIT binary patch
+literal 512
+zcmZ?wbhEHbRAx|MU|?iW{Kxc~?KofD;ckY;H+&5HnHl!!GQMD7h+sU{_)e9f^V3c?
+zhJP##HdZC#4K}7F68@!1jfWQg2daCm-gs#3|JREDT>c+pG4L<_2;w##WMO#ysPPap
+zLqpAf1OE938xAsSp4!5f-o><?VKe(#0jEcwfHGF4%M1^kRs14oVBp2ZEL{E1N<-zJ
+zsfLmOtKta;2_;2c#^S1-8cf<nb!QnGl>c!Xe6RXvrEtAWBvSDTgTO1j3vA31Puw!A
+zs(87q)j_mVDTqBo-P+03-P5mHCEnJ+x}YdCuS7#bCCyePUe(ynK+|4b-3qK)T?Z&)
+zYG+`tl4h?GZv_$t82}X4*DTE|$;{DEiPyF@)U-1+FaX++T9H{&%cag`W1|zVP@`%b
+zqiSkp6{BTpWTkCr!=<C6Q=?#~R8^JfrliAF6Q^gV9Iup8RqCXqqhqC`qsyhk<-nlB
+z00f{QZvfK&|Nm#oZ0TQl`Yr$BIa6A@16O26ud7H<QM=xl`toLKnz-3h@9c9q&wm|X
+z{89I|WPyD!*M?gv?q`;L=2YFeXrJQNti4?}s!zFo=5CzeBxC69xA<zrjP<wUcCRh4
+ptUl-ZG<%a~#LwkIWv&q!KSCH7tQ8cJDiw+|GV?MN)RjY50RTb-xvT&H
+
+literal 0
+HcmV?d00001
+
+"""
+        self.checkout.apply_patch(self._create_patch(git_binary_addition))
+        added = read_from_path('fizzbuzz7.gif', encoding=None)
+        self.assertEqual(512, len(added))
+        self.assertTrue(added.startswith('GIF89a'))
+        self.assertTrue('fizzbuzz7.gif' in self.scm.changed_files())
+
+        # The file already exists.
+        self.assertRaises(ScriptError, self.checkout.apply_patch, self._create_patch(git_binary_addition))
+
+        git_binary_modification = """diff --git a/fizzbuzz7.gif b/fizzbuzz7.gif
+index 64a9532e7794fcd791f6f12157406d9060151690..323fae03f4606ea9991df8befbb2fca7
+GIT binary patch
+literal 7
+OcmYex&reD$;sO8*F9L)B
+
+literal 512
+zcmZ?wbhEHbRAx|MU|?iW{Kxc~?KofD;ckY;H+&5HnHl!!GQMD7h+sU{_)e9f^V3c?
+zhJP##HdZC#4K}7F68@!1jfWQg2daCm-gs#3|JREDT>c+pG4L<_2;w##WMO#ysPPap
+zLqpAf1OE938xAsSp4!5f-o><?VKe(#0jEcwfHGF4%M1^kRs14oVBp2ZEL{E1N<-zJ
+zsfLmOtKta;2_;2c#^S1-8cf<nb!QnGl>c!Xe6RXvrEtAWBvSDTgTO1j3vA31Puw!A
+zs(87q)j_mVDTqBo-P+03-P5mHCEnJ+x}YdCuS7#bCCyePUe(ynK+|4b-3qK)T?Z&)
+zYG+`tl4h?GZv_$t82}X4*DTE|$;{DEiPyF@)U-1+FaX++T9H{&%cag`W1|zVP@`%b
+zqiSkp6{BTpWTkCr!=<C6Q=?#~R8^JfrliAF6Q^gV9Iup8RqCXqqhqC`qsyhk<-nlB
+z00f{QZvfK&|Nm#oZ0TQl`Yr$BIa6A@16O26ud7H<QM=xl`toLKnz-3h@9c9q&wm|X
+z{89I|WPyD!*M?gv?q`;L=2YFeXrJQNti4?}s!zFo=5CzeBxC69xA<zrjP<wUcCRh4
+ptUl-ZG<%a~#LwkIWv&q!KSCH7tQ8cJDiw+|GV?MN)RjY50RTb-xvT&H
+
+"""
+        self.checkout.apply_patch(self._create_patch(git_binary_modification))
+        modified = read_from_path('fizzbuzz7.gif', encoding=None)
+        self.assertEqual('foobar\n', modified)
+        self.assertTrue('fizzbuzz7.gif' in self.scm.changed_files())
+
+        # Applying the same modification should fail.
+        self.assertRaises(ScriptError, self.checkout.apply_patch, self._create_patch(git_binary_modification))
+
+        git_binary_deletion = """diff --git a/fizzbuzz7.gif b/fizzbuzz7.gif
+deleted file mode 100644
+index 323fae0..0000000
+GIT binary patch
+literal 0
+HcmV?d00001
+
+literal 7
+OcmYex&reD$;sO8*F9L)B
+
+"""
+        self.checkout.apply_patch(self._create_patch(git_binary_deletion))
+        self.assertFalse(os.path.exists('fizzbuzz7.gif'))
+        self.assertFalse('fizzbuzz7.gif' in self.scm.changed_files())
+
+        # Cannot delete again.
+        self.assertRaises(ScriptError, self.checkout.apply_patch, self._create_patch(git_binary_deletion))
+
+    def _shared_test_add_recursively(self):
+        os.mkdir("added_dir")
+        write_into_file_at_path("added_dir/added_file", "new stuff")
+        self.scm.add("added_dir/added_file")
+        self.assertTrue("added_dir/added_file" in self.scm.added_files())
+
+    def _shared_test_delete_recursively(self):
+        os.mkdir("added_dir")
+        write_into_file_at_path("added_dir/added_file", "new stuff")
+        self.scm.add("added_dir/added_file")
+        self.assertTrue("added_dir/added_file" in self.scm.added_files())
+        self.scm.delete("added_dir/added_file")
+        self.assertFalse("added_dir" in self.scm.added_files())
+
+    def _shared_test_delete_recursively_or_not(self):
+        os.mkdir("added_dir")
+        write_into_file_at_path("added_dir/added_file", "new stuff")
+        write_into_file_at_path("added_dir/another_added_file", "more new stuff")
+        self.scm.add("added_dir/added_file")
+        self.scm.add("added_dir/another_added_file")
+        self.assertTrue("added_dir/added_file" in self.scm.added_files())
+        self.assertTrue("added_dir/another_added_file" in self.scm.added_files())
+        self.scm.delete("added_dir/added_file")
+        self.assertTrue("added_dir/another_added_file" in self.scm.added_files())
+
+    def _shared_test_exists(self, scm, commit_function):
+        os.chdir(scm.checkout_root)
+        self.assertFalse(scm.exists('foo.txt'))
+        write_into_file_at_path('foo.txt', 'some stuff')
+        self.assertFalse(scm.exists('foo.txt'))
+        scm.add('foo.txt')
+        commit_function('adding foo')
+        self.assertTrue(scm.exists('foo.txt'))
+        scm.delete('foo.txt')
+        commit_function('deleting foo')
+        self.assertFalse(scm.exists('foo.txt'))
+
+    def _shared_test_head_svn_revision(self):
+        self.assertEqual(self.scm.head_svn_revision(), '5')
+
+
+# Context manager that overrides the current timezone.
+class TimezoneOverride(object):
+    def __init__(self, timezone_string):
+        self._timezone_string = timezone_string
+
+    def __enter__(self):
+        if hasattr(time, 'tzset'):
+            self._saved_timezone = os.environ.get('TZ', None)
+            os.environ['TZ'] = self._timezone_string
+            time.tzset()
+
+    def __exit__(self, type, value, traceback):
+        if hasattr(time, 'tzset'):
+            if self._saved_timezone:
+                os.environ['TZ'] = self._saved_timezone
+            else:
+                del os.environ['TZ']
+            time.tzset()
+
+
+class SVNTest(SCMTest):
+
+    @staticmethod
+    def _set_date_and_reviewer(changelog_entry):
+        # Joe Cool matches the reviewer set in SCMTest._create_patch
+        changelog_entry = changelog_entry.replace('REVIEWER_HERE', 'Joe Cool')
+        # svn-apply will update ChangeLog entries with today's date (as in Cupertino, CA, US)
+        with TimezoneOverride('PST8PDT'):
+            return changelog_entry.replace('DATE_HERE', date.today().isoformat())
+
+    def test_svn_apply(self):
+        first_entry = """2009-10-26  Eric Seidel  <eric@webkit.org>
+
+        Reviewed by Foo Bar.
+
+        Most awesome change ever.
+
+        * scm_unittest.py:
+"""
+        intermediate_entry = """2009-10-27  Eric Seidel  <eric@webkit.org>
+
+        Reviewed by Baz Bar.
+
+        A more awesomer change yet!
+
+        * scm_unittest.py:
+"""
+        one_line_overlap_patch = """Index: ChangeLog
+===================================================================
+--- ChangeLog	(revision 5)
++++ ChangeLog	(working copy)
+@@ -1,5 +1,13 @@
+ 2009-10-26  Eric Seidel  <eric@webkit.org>
+%(whitespace)s
++        Reviewed by NOBODY (OOPS!).
++
++        Second most awesome change ever.
++
++        * scm_unittest.py:
++
++2009-10-26  Eric Seidel  <eric@webkit.org>
++
+         Reviewed by Foo Bar.
+%(whitespace)s
+         Most awesome change ever.
+""" % {'whitespace': ' '}
+        one_line_overlap_entry = """DATE_HERE  Eric Seidel  <eric@webkit.org>
+
+        Reviewed by REVIEWER_HERE.
+
+        Second most awesome change ever.
+
+        * scm_unittest.py:
+"""
+        two_line_overlap_patch = """Index: ChangeLog
+===================================================================
+--- ChangeLog	(revision 5)
++++ ChangeLog	(working copy)
+@@ -2,6 +2,14 @@
+%(whitespace)s
+         Reviewed by Foo Bar.
+%(whitespace)s
++        Second most awesome change ever.
++
++        * scm_unittest.py:
++
++2009-10-26  Eric Seidel  <eric@webkit.org>
++
++        Reviewed by Foo Bar.
++
+         Most awesome change ever.
+%(whitespace)s
+         * scm_unittest.py:
+""" % {'whitespace': ' '}
+        two_line_overlap_entry = """DATE_HERE  Eric Seidel  <eric@webkit.org>
+
+        Reviewed by Foo Bar.
+
+        Second most awesome change ever.
+
+        * scm_unittest.py:
+"""
+        write_into_file_at_path('ChangeLog', first_entry)
+        run_command(['svn', 'add', 'ChangeLog'])
+        run_command(['svn', 'commit', '--quiet', '--message', 'ChangeLog commit'])
+
+        # Patch files were created against just 'first_entry'.
+        # Add a second commit to make svn-apply have to apply the patches with fuzz.
+        changelog_contents = "%s\n%s" % (intermediate_entry, first_entry)
+        write_into_file_at_path('ChangeLog', changelog_contents)
+        run_command(['svn', 'commit', '--quiet', '--message', 'Intermediate commit'])
+
+        self._setup_webkittools_scripts_symlink(self.scm)
+        self.checkout.apply_patch(self._create_patch(one_line_overlap_patch))
+        expected_changelog_contents = "%s\n%s" % (self._set_date_and_reviewer(one_line_overlap_entry), changelog_contents)
+        self.assertEquals(read_from_path('ChangeLog'), expected_changelog_contents)
+
+        self.scm.revert_files(['ChangeLog'])
+        self.checkout.apply_patch(self._create_patch(two_line_overlap_patch))
+        expected_changelog_contents = "%s\n%s" % (self._set_date_and_reviewer(two_line_overlap_entry), changelog_contents)
+        self.assertEquals(read_from_path('ChangeLog'), expected_changelog_contents)
+
+    def setUp(self):
+        SVNTestRepository.setup(self)
+        os.chdir(self.svn_checkout_path)
+        self.scm = detect_scm_system(self.svn_checkout_path)
+        # For historical reasons, we test some checkout code here too.
+        self.checkout = Checkout(self.scm)
+
+    def tearDown(self):
+        SVNTestRepository.tear_down(self)
+
+    def test_detect_scm_system_relative_url(self):
+        scm = detect_scm_system(".")
+        # I wanted to assert that we got the right path, but there was some
+        # crazy magic with temp folder names that I couldn't figure out.
+        self.assertTrue(scm.checkout_root)
+
+    def test_create_patch_is_full_patch(self):
+        test_dir_path = os.path.join(self.svn_checkout_path, "test_dir2")
+        os.mkdir(test_dir_path)
+        test_file_path = os.path.join(test_dir_path, 'test_file2')
+        write_into_file_at_path(test_file_path, 'test content')
+        run_command(['svn', 'add', 'test_dir2'])
+
+        # create_patch depends on 'svn-create-patch', so make a dummy version.
+        scripts_path = os.path.join(self.svn_checkout_path, 'Tools', 'Scripts')
+        os.makedirs(scripts_path)
+        create_patch_path = os.path.join(scripts_path, 'svn-create-patch')
+        write_into_file_at_path(create_patch_path, '#!/bin/sh\necho $PWD') # We could pass -n to prevent the \n, but not all echo accept -n.
+        os.chmod(create_patch_path, stat.S_IXUSR | stat.S_IRUSR)
+
+        # Change into our test directory and run the create_patch command.
+        os.chdir(test_dir_path)
+        scm = detect_scm_system(test_dir_path)
+        self.assertEqual(scm.checkout_root, self.svn_checkout_path) # Sanity check that detection worked right.
+        patch_contents = scm.create_patch()
+        # Our fake 'svn-create-patch' returns $PWD instead of a patch, check that it was executed from the root of the repo.
+        self.assertEqual("%s\n" % os.path.realpath(scm.checkout_root), patch_contents) # Add a \n because echo adds a \n.
+
+    def test_detection(self):
+        scm = detect_scm_system(self.svn_checkout_path)
+        self.assertEqual(scm.display_name(), "svn")
+        self.assertEqual(scm.supports_local_commits(), False)
+
+    def test_apply_small_binary_patch(self):
+        patch_contents = """Index: test_file.swf
+===================================================================
+Cannot display: file marked as a binary type.
+svn:mime-type = application/octet-stream
+
+Property changes on: test_file.swf
+___________________________________________________________________
+Name: svn:mime-type
+   + application/octet-stream
+
+
+Q1dTBx0AAAB42itg4GlgYJjGwMDDyODMxMDw34GBgQEAJPQDJA==
+"""
+        expected_contents = base64.b64decode("Q1dTBx0AAAB42itg4GlgYJjGwMDDyODMxMDw34GBgQEAJPQDJA==")
+        self._setup_webkittools_scripts_symlink(self.scm)
+        patch_file = self._create_patch(patch_contents)
+        self.checkout.apply_patch(patch_file)
+        actual_contents = read_from_path("test_file.swf", encoding=None)
+        self.assertEqual(actual_contents, expected_contents)
+
+    def test_apply_svn_patch(self):
+        scm = detect_scm_system(self.svn_checkout_path)
+        patch = self._create_patch(_svn_diff("-r5:4"))
+        self._setup_webkittools_scripts_symlink(scm)
+        Checkout(scm).apply_patch(patch)
+
+    def test_commit_logs(self):
+        # Commits have dates and usernames in them, so we can't just direct compare.
+        self.assertTrue(re.search('fourth commit', self.scm.last_svn_commit_log()))
+        self.assertTrue(re.search('second commit', self.scm.svn_commit_log(3)))
+
+    def _shared_test_commit_with_message(self, username=None):
+        write_into_file_at_path('test_file', 'more test content')
+        commit_text = self.scm.commit_with_message("another test commit", username)
+        self.assertEqual(self.scm.svn_revision_from_commit_text(commit_text), '6')
+
+    def test_commit_in_subdir(self, username=None):
+        write_into_file_at_path('test_dir/test_file3', 'more test content')
+        os.chdir("test_dir")
+        commit_text = self.scm.commit_with_message("another test commit", username)
+        os.chdir("..")
+        self.assertEqual(self.scm.svn_revision_from_commit_text(commit_text), '6')
+
+    def test_commit_text_parsing(self):
+        self._shared_test_commit_with_message()
+
+    def test_commit_with_username(self):
+        self._shared_test_commit_with_message("dbates@webkit.org")
+
+    def test_commit_without_authorization(self):
+        self.scm.has_authorization_for_realm = lambda realm: False
+        self.assertRaises(AuthenticationError, self._shared_test_commit_with_message)
+
+    def test_has_authorization_for_realm_using_credentials_with_passtype(self):
+        credentials = """
+K 8
+passtype
+V 8
+keychain
+K 15
+svn:realmstring
+V 39
+<http://svn.webkit.org:80> Mac OS Forge
+K 8
+username
+V 17
+dbates@webkit.org
+END
+"""
+        self.assertTrue(self._test_has_authorization_for_realm_using_credentials(SVN.svn_server_realm, credentials))
+
+    def test_has_authorization_for_realm_using_credentials_with_password(self):
+        credentials = """
+K 15
+svn:realmstring
+V 39
+<http://svn.webkit.org:80> Mac OS Forge
+K 8
+username
+V 17
+dbates@webkit.org
+K 8
+password
+V 4
+blah
+END
+"""
+        self.assertTrue(self._test_has_authorization_for_realm_using_credentials(SVN.svn_server_realm, credentials))
+
+    def _test_has_authorization_for_realm_using_credentials(self, realm, credentials):
+        scm = detect_scm_system(self.svn_checkout_path)
+        fake_home_dir = tempfile.mkdtemp(suffix="fake_home_dir")
+        svn_config_dir_path = os.path.join(fake_home_dir, ".subversion")
+        os.mkdir(svn_config_dir_path)
+        fake_webkit_auth_file = os.path.join(svn_config_dir_path, "fake_webkit_auth_file")
+        write_into_file_at_path(fake_webkit_auth_file, credentials)
+        result = scm.has_authorization_for_realm(realm, home_directory=fake_home_dir)
+        os.remove(fake_webkit_auth_file)
+        os.rmdir(svn_config_dir_path)
+        os.rmdir(fake_home_dir)
+        return result
+
+    def test_not_have_authorization_for_realm_with_credentials_missing_password_and_passtype(self):
+        credentials = """
+K 15
+svn:realmstring
+V 39
+<http://svn.webkit.org:80> Mac OS Forge
+K 8
+username
+V 17
+dbates@webkit.org
+END
+"""
+        self.assertFalse(self._test_has_authorization_for_realm_using_credentials(SVN.svn_server_realm, credentials))
+
+    def test_not_have_authorization_for_realm_when_missing_credentials_file(self):
+        scm = detect_scm_system(self.svn_checkout_path)
+        fake_home_dir = tempfile.mkdtemp(suffix="fake_home_dir")
+        svn_config_dir_path = os.path.join(fake_home_dir, ".subversion")
+        os.mkdir(svn_config_dir_path)
+        self.assertFalse(scm.has_authorization_for_realm(SVN.svn_server_realm, home_directory=fake_home_dir))
+        os.rmdir(svn_config_dir_path)
+        os.rmdir(fake_home_dir)
+
+    def test_reverse_diff(self):
+        self._shared_test_reverse_diff()
+
+    def test_diff_for_revision(self):
+        self._shared_test_diff_for_revision()
+
+    def test_svn_apply_git_patch(self):
+        self._shared_test_svn_apply_git_patch()
+
+    def test_changed_files(self):
+        self._shared_test_changed_files()
+
+    def test_changed_files_for_revision(self):
+        self._shared_test_changed_files_for_revision()
+
+    def test_added_files(self):
+        self._shared_test_added_files()
+
+    def test_contents_at_revision(self):
+        self._shared_test_contents_at_revision()
+
+    def test_revisions_changing_file(self):
+        self._shared_test_revisions_changing_file()
+
+    def test_committer_email_for_revision(self):
+        self._shared_test_committer_email_for_revision()
+
+    def test_add_recursively(self):
+        self._shared_test_add_recursively()
+
+    def test_delete(self):
+        os.chdir(self.svn_checkout_path)
+        self.scm.delete("test_file")
+        self.assertTrue("test_file" in self.scm.deleted_files())
+
+    def test_delete_list(self):
+        os.chdir(self.svn_checkout_path)
+        self.scm.delete_list(["test_file", "test_file2"])
+        self.assertTrue("test_file" in self.scm.deleted_files())
+        self.assertTrue("test_file2" in self.scm.deleted_files())
+
+    def test_delete_recursively(self):
+        self._shared_test_delete_recursively()
+
+    def test_delete_recursively_or_not(self):
+        self._shared_test_delete_recursively_or_not()
+
+    def test_head_svn_revision(self):
+        self._shared_test_head_svn_revision()
+
+    def test_propset_propget(self):
+        filepath = os.path.join(self.svn_checkout_path, "test_file")
+        expected_mime_type = "x-application/foo-bar"
+        self.scm.propset("svn:mime-type", expected_mime_type, filepath)
+        self.assertEqual(expected_mime_type, self.scm.propget("svn:mime-type", filepath))
+
+    def test_show_head(self):
+        write_into_file_at_path("test_file", u"Hello!", "utf-8")
+        SVNTestRepository._svn_commit("fourth commit")
+        self.assertEqual("Hello!", self.scm.show_head('test_file'))
+
+    def test_show_head_binary(self):
+        data = "\244"
+        write_into_file_at_path("binary_file", data, encoding=None)
+        self.scm.add("binary_file")
+        self.scm.commit_with_message("a test commit")
+        self.assertEqual(data, self.scm.show_head('binary_file'))
+
+    def do_test_diff_for_file(self):
+        write_into_file_at_path('test_file', 'some content')
+        self.scm.commit_with_message("a test commit")
+        diff = self.scm.diff_for_file('test_file')
+        self.assertEqual(diff, "")
+
+        write_into_file_at_path("test_file", "changed content")
+        diff = self.scm.diff_for_file('test_file')
+        self.assertTrue("-some content" in diff)
+        self.assertTrue("+changed content" in diff)
+
+    def clean_bogus_dir(self):
+        self.bogus_dir = self.scm._bogus_dir_name()
+        if os.path.exists(self.bogus_dir):
+            shutil.rmtree(self.bogus_dir)
+
+    def test_diff_for_file_with_existing_bogus_dir(self):
+        self.clean_bogus_dir()
+        os.mkdir(self.bogus_dir)
+        self.do_test_diff_for_file()
+        self.assertTrue(os.path.exists(self.bogus_dir))
+        shutil.rmtree(self.bogus_dir)
+
+    def test_diff_for_file_with_missing_bogus_dir(self):
+        self.clean_bogus_dir()
+        self.do_test_diff_for_file()
+        self.assertFalse(os.path.exists(self.bogus_dir))
+
+    def test_svn_lock(self):
+        svn_root_lock_path = ".svn/lock"
+        write_into_file_at_path(svn_root_lock_path, "", "utf-8")
+        # webkit-patch uses a Checkout object and runs update-webkit, just use svn update here.
+        self.assertRaises(ScriptError, run_command, ['svn', 'update'])
+        self.scm.clean_working_directory()
+        self.assertFalse(os.path.exists(svn_root_lock_path))
+        run_command(['svn', 'update'])  # Should succeed and not raise.
+
+    def test_exists(self):
+        self._shared_test_exists(self.scm, self.scm.commit_with_message)
+
+class GitTest(SCMTest):
+
+    def setUp(self):
+        """Sets up fresh git repository with one commit. Then setups a second git
+        repo that tracks the first one."""
+        # FIXME: We should instead clone a git repo that is tracking an SVN repo.
+        # That better matches what we do with WebKit.
+        self.original_dir = os.getcwd()
+
+        self.untracking_checkout_path = tempfile.mkdtemp(suffix="git_test_checkout2")
+        run_command(['git', 'init', self.untracking_checkout_path])
+
+        os.chdir(self.untracking_checkout_path)
+        write_into_file_at_path('foo_file', 'foo')
+        run_command(['git', 'add', 'foo_file'])
+        run_command(['git', 'commit', '-am', 'dummy commit'])
+        self.untracking_scm = detect_scm_system(self.untracking_checkout_path)
+
+        self.tracking_git_checkout_path = tempfile.mkdtemp(suffix="git_test_checkout")
+        run_command(['git', 'clone', '--quiet', self.untracking_checkout_path, self.tracking_git_checkout_path])
+        os.chdir(self.tracking_git_checkout_path)
+        self.tracking_scm = detect_scm_system(self.tracking_git_checkout_path)
+
+    def tearDown(self):
+        # Change back to a valid directory so that later calls to os.getcwd() do not fail.
+        os.chdir(self.original_dir)
+        run_command(['rm', '-rf', self.tracking_git_checkout_path])
+        run_command(['rm', '-rf', self.untracking_checkout_path])
+
+    def test_remote_branch_ref(self):
+        self.assertEqual(self.tracking_scm.remote_branch_ref(), 'refs/remotes/origin/master')
+
+        os.chdir(self.untracking_checkout_path)
+        self.assertRaises(ScriptError, self.untracking_scm.remote_branch_ref)
+
+    def test_multiple_remotes(self):
+        run_command(['git', 'config', '--add', 'svn-remote.svn.fetch', 'trunk:remote1'])
+        run_command(['git', 'config', '--add', 'svn-remote.svn.fetch', 'trunk:remote2'])
+        self.assertEqual(self.tracking_scm.remote_branch_ref(), 'remote1')
+
+    def test_create_patch(self):
+        write_into_file_at_path('test_file_commit1', 'contents')
+        run_command(['git', 'add', 'test_file_commit1'])
+        scm = self.tracking_scm
+        scm.commit_locally_with_message('message')
+
+        patch = scm.create_patch()
+        self.assertFalse(re.search(r'Subversion Revision:', patch))
+
+    def test_orderfile(self):
+        os.mkdir("Tools")
+        os.mkdir("Source")
+        os.mkdir("LayoutTests")
+        os.mkdir("Websites")
+
+        # Slash should always be the right path separator since we use cygwin on Windows.
+        Tools_ChangeLog = "Tools/ChangeLog"
+        write_into_file_at_path(Tools_ChangeLog, "contents")
+        Source_ChangeLog = "Source/ChangeLog"
+        write_into_file_at_path(Source_ChangeLog, "contents")
+        LayoutTests_ChangeLog = "LayoutTests/ChangeLog"
+        write_into_file_at_path(LayoutTests_ChangeLog, "contents")
+        Websites_ChangeLog = "Websites/ChangeLog"
+        write_into_file_at_path(Websites_ChangeLog, "contents")
+
+        Tools_ChangeFile = "Tools/ChangeFile"
+        write_into_file_at_path(Tools_ChangeFile, "contents")
+        Source_ChangeFile = "Source/ChangeFile"
+        write_into_file_at_path(Source_ChangeFile, "contents")
+        LayoutTests_ChangeFile = "LayoutTests/ChangeFile"
+        write_into_file_at_path(LayoutTests_ChangeFile, "contents")
+        Websites_ChangeFile = "Websites/ChangeFile"
+        write_into_file_at_path(Websites_ChangeFile, "contents")
+
+        run_command(['git', 'add', 'Tools/ChangeLog'])
+        run_command(['git', 'add', 'LayoutTests/ChangeLog'])
+        run_command(['git', 'add', 'Source/ChangeLog'])
+        run_command(['git', 'add', 'Websites/ChangeLog'])
+        run_command(['git', 'add', 'Tools/ChangeFile'])
+        run_command(['git', 'add', 'LayoutTests/ChangeFile'])
+        run_command(['git', 'add', 'Source/ChangeFile'])
+        run_command(['git', 'add', 'Websites/ChangeFile'])
+        scm = self.tracking_scm
+        scm.commit_locally_with_message('message')
+
+        patch = scm.create_patch()
+        self.assertTrue(re.search(r'Tools/ChangeLog', patch).start() < re.search(r'Tools/ChangeFile', patch).start())
+        self.assertTrue(re.search(r'Websites/ChangeLog', patch).start() < re.search(r'Websites/ChangeFile', patch).start())
+        self.assertTrue(re.search(r'Source/ChangeLog', patch).start() < re.search(r'Source/ChangeFile', patch).start())
+        self.assertTrue(re.search(r'LayoutTests/ChangeLog', patch).start() < re.search(r'LayoutTests/ChangeFile', patch).start())
+
+        self.assertTrue(re.search(r'Source/ChangeLog', patch).start() < re.search(r'LayoutTests/ChangeLog', patch).start())
+        self.assertTrue(re.search(r'Tools/ChangeLog', patch).start() < re.search(r'LayoutTests/ChangeLog', patch).start())
+        self.assertTrue(re.search(r'Websites/ChangeLog', patch).start() < re.search(r'LayoutTests/ChangeLog', patch).start())
+
+        self.assertTrue(re.search(r'Source/ChangeFile', patch).start() < re.search(r'LayoutTests/ChangeLog', patch).start())
+        self.assertTrue(re.search(r'Tools/ChangeFile', patch).start() < re.search(r'LayoutTests/ChangeLog', patch).start())
+        self.assertTrue(re.search(r'Websites/ChangeFile', patch).start() < re.search(r'LayoutTests/ChangeLog', patch).start())
+
+        self.assertTrue(re.search(r'Source/ChangeFile', patch).start() < re.search(r'LayoutTests/ChangeFile', patch).start())
+        self.assertTrue(re.search(r'Tools/ChangeFile', patch).start() < re.search(r'LayoutTests/ChangeFile', patch).start())
+        self.assertTrue(re.search(r'Websites/ChangeFile', patch).start() < re.search(r'LayoutTests/ChangeFile', patch).start())
+
+    def test_exists(self):
+        scm = self.untracking_scm
+        self._shared_test_exists(scm, scm.commit_locally_with_message)
+
+    def test_head_svn_revision(self):
+        scm = detect_scm_system(self.untracking_checkout_path)
+        # If we cloned a git repo tracking an SVG repo, this would give the same result as
+        # self._shared_test_head_svn_revision().
+        self.assertEqual(scm.head_svn_revision(), '')
+
+    def test_rename_files(self):
+        scm = self.tracking_scm
+
+        run_command(['git', 'mv', 'foo_file', 'bar_file'])
+        scm.commit_locally_with_message('message')
+
+        patch = scm.create_patch()
+        self.assertFalse(re.search(r'rename from ', patch))
+        self.assertFalse(re.search(r'rename to ', patch))
+
+
+class GitSVNTest(SCMTest):
+
+    def _setup_git_checkout(self):
+        self.git_checkout_path = tempfile.mkdtemp(suffix="git_test_checkout")
+        # --quiet doesn't make git svn silent, so we use run_silent to redirect output
+        run_silent(['git', 'svn', 'clone', '-T', 'trunk', self.svn_repo_url, self.git_checkout_path])
+        os.chdir(self.git_checkout_path)
+
+    def _tear_down_git_checkout(self):
+        # Change back to a valid directory so that later calls to os.getcwd() do not fail.
+        os.chdir(self.original_dir)
+        run_command(['rm', '-rf', self.git_checkout_path])
+
+    def setUp(self):
+        self.original_dir = os.getcwd()
+
+        SVNTestRepository.setup(self)
+        self._setup_git_checkout()
+        self.scm = detect_scm_system(self.git_checkout_path)
+        # For historical reasons, we test some checkout code here too.
+        self.checkout = Checkout(self.scm)
+
+    def tearDown(self):
+        SVNTestRepository.tear_down(self)
+        self._tear_down_git_checkout()
+
+    def test_detection(self):
+        scm = detect_scm_system(self.git_checkout_path)
+        self.assertEqual(scm.display_name(), "git")
+        self.assertEqual(scm.supports_local_commits(), True)
+
+    def test_read_git_config(self):
+        key = 'test.git-config'
+        value = 'git-config value'
+        run_command(['git', 'config', key, value])
+        self.assertEqual(self.scm.read_git_config(key), value)
+
+    def test_local_commits(self):
+        test_file = os.path.join(self.git_checkout_path, 'test_file')
+        write_into_file_at_path(test_file, 'foo')
+        run_command(['git', 'commit', '-a', '-m', 'local commit'])
+
+        self.assertEqual(len(self.scm.local_commits()), 1)
+
+    def test_discard_local_commits(self):
+        test_file = os.path.join(self.git_checkout_path, 'test_file')
+        write_into_file_at_path(test_file, 'foo')
+        run_command(['git', 'commit', '-a', '-m', 'local commit'])
+
+        self.assertEqual(len(self.scm.local_commits()), 1)
+        self.scm.discard_local_commits()
+        self.assertEqual(len(self.scm.local_commits()), 0)
+
+    def test_delete_branch(self):
+        new_branch = 'foo'
+
+        run_command(['git', 'checkout', '-b', new_branch])
+        self.assertEqual(run_command(['git', 'symbolic-ref', 'HEAD']).strip(), 'refs/heads/' + new_branch)
+
+        run_command(['git', 'checkout', '-b', 'bar'])
+        self.scm.delete_branch(new_branch)
+
+        self.assertFalse(re.search(r'foo', run_command(['git', 'branch'])))
+
+    def test_remote_merge_base(self):
+        # Diff to merge-base should include working-copy changes,
+        # which the diff to svn_branch.. doesn't.
+        test_file = os.path.join(self.git_checkout_path, 'test_file')
+        write_into_file_at_path(test_file, 'foo')
+
+        diff_to_common_base = _git_diff(self.scm.remote_branch_ref() + '..')
+        diff_to_merge_base = _git_diff(self.scm.remote_merge_base())
+
+        self.assertFalse(re.search(r'foo', diff_to_common_base))
+        self.assertTrue(re.search(r'foo', diff_to_merge_base))
+
+    def test_rebase_in_progress(self):
+        svn_test_file = os.path.join(self.svn_checkout_path, 'test_file')
+        write_into_file_at_path(svn_test_file, "svn_checkout")
+        run_command(['svn', 'commit', '--message', 'commit to conflict with git commit'], cwd=self.svn_checkout_path)
+
+        git_test_file = os.path.join(self.git_checkout_path, 'test_file')
+        write_into_file_at_path(git_test_file, "git_checkout")
+        run_command(['git', 'commit', '-a', '-m', 'commit to be thrown away by rebase abort'])
+
+        # --quiet doesn't make git svn silent, so use run_silent to redirect output
+        self.assertRaises(ScriptError, run_silent, ['git', 'svn', '--quiet', 'rebase']) # Will fail due to a conflict leaving us mid-rebase.
+
+        scm = detect_scm_system(self.git_checkout_path)
+        self.assertTrue(scm.rebase_in_progress())
+
+        # Make sure our cleanup works.
+        scm.clean_working_directory()
+        self.assertFalse(scm.rebase_in_progress())
+
+        # Make sure cleanup doesn't throw when no rebase is in progress.
+        scm.clean_working_directory()
+
+    def test_commitish_parsing(self):
+        scm = detect_scm_system(self.git_checkout_path)
+
+        # Multiple revisions are cherry-picked.
+        self.assertEqual(len(scm.commit_ids_from_commitish_arguments(['HEAD~2'])), 1)
+        self.assertEqual(len(scm.commit_ids_from_commitish_arguments(['HEAD', 'HEAD~2'])), 2)
+
+        # ... is an invalid range specifier
+        self.assertRaises(ScriptError, scm.commit_ids_from_commitish_arguments, ['trunk...HEAD'])
+
+    def test_commitish_order(self):
+        scm = detect_scm_system(self.git_checkout_path)
+
+        commit_range = 'HEAD~3..HEAD'
+
+        actual_commits = scm.commit_ids_from_commitish_arguments([commit_range])
+        expected_commits = []
+        expected_commits += reversed(run_command(['git', 'rev-list', commit_range]).splitlines())
+
+        self.assertEqual(actual_commits, expected_commits)
+
+    def test_apply_git_patch(self):
+        scm = detect_scm_system(self.git_checkout_path)
+        # We carefullly pick a diff which does not have a directory addition
+        # as currently svn-apply will error out when trying to remove directories
+        # in Git: https://bugs.webkit.org/show_bug.cgi?id=34871
+        patch = self._create_patch(_git_diff('HEAD..HEAD^'))
+        self._setup_webkittools_scripts_symlink(scm)
+        Checkout(scm).apply_patch(patch)
+
+    def test_commit_text_parsing(self):
+        write_into_file_at_path('test_file', 'more test content')
+        commit_text = self.scm.commit_with_message("another test commit")
+        self.assertEqual(self.scm.svn_revision_from_commit_text(commit_text), '6')
+
+    def test_commit_with_message_working_copy_only(self):
+        write_into_file_at_path('test_file_commit1', 'more test content')
+        run_command(['git', 'add', 'test_file_commit1'])
+        scm = detect_scm_system(self.git_checkout_path)
+        commit_text = scm.commit_with_message("yet another test commit")
+
+        self.assertEqual(scm.svn_revision_from_commit_text(commit_text), '6')
+        svn_log = run_command(['git', 'svn', 'log', '--limit=1', '--verbose'])
+        self.assertTrue(re.search(r'test_file_commit1', svn_log))
+
+    def _local_commit(self, filename, contents, message):
+        write_into_file_at_path(filename, contents)
+        run_command(['git', 'add', filename])
+        self.scm.commit_locally_with_message(message)
+
+    def _one_local_commit(self):
+        self._local_commit('test_file_commit1', 'more test content', 'another test commit')
+
+    def _one_local_commit_plus_working_copy_changes(self):
+        self._one_local_commit()
+        write_into_file_at_path('test_file_commit2', 'still more test content')
+        run_command(['git', 'add', 'test_file_commit2'])
+
+    def _second_local_commit(self):
+        self._local_commit('test_file_commit2', 'still more test content', 'yet another test commit')
+
+    def _two_local_commits(self):
+        self._one_local_commit()
+        self._second_local_commit()
+
+    def _three_local_commits(self):
+        self._local_commit('test_file_commit0', 'more test content', 'another test commit')
+        self._two_local_commits()
+
+    def test_revisions_changing_files_with_local_commit(self):
+        self._one_local_commit()
+        self.assertEquals(self.scm.revisions_changing_file('test_file_commit1'), [])
+
+    def test_commit_with_message(self):
+        self._one_local_commit_plus_working_copy_changes()
+        scm = detect_scm_system(self.git_checkout_path)
+        self.assertRaises(AmbiguousCommitError, scm.commit_with_message, "yet another test commit")
+        commit_text = scm.commit_with_message("yet another test commit", force_squash=True)
+
+        self.assertEqual(scm.svn_revision_from_commit_text(commit_text), '6')
+        svn_log = run_command(['git', 'svn', 'log', '--limit=1', '--verbose'])
+        self.assertTrue(re.search(r'test_file_commit2', svn_log))
+        self.assertTrue(re.search(r'test_file_commit1', svn_log))
+
+    def test_commit_with_message_git_commit(self):
+        self._two_local_commits()
+
+        scm = detect_scm_system(self.git_checkout_path)
+        commit_text = scm.commit_with_message("another test commit", git_commit="HEAD^")
+        self.assertEqual(scm.svn_revision_from_commit_text(commit_text), '6')
+
+        svn_log = run_command(['git', 'svn', 'log', '--limit=1', '--verbose'])
+        self.assertTrue(re.search(r'test_file_commit1', svn_log))
+        self.assertFalse(re.search(r'test_file_commit2', svn_log))
+
+    def test_commit_with_message_git_commit_range(self):
+        self._three_local_commits()
+
+        scm = detect_scm_system(self.git_checkout_path)
+        commit_text = scm.commit_with_message("another test commit", git_commit="HEAD~2..HEAD")
+        self.assertEqual(scm.svn_revision_from_commit_text(commit_text), '6')
+
+        svn_log = run_command(['git', 'svn', 'log', '--limit=1', '--verbose'])
+        self.assertFalse(re.search(r'test_file_commit0', svn_log))
+        self.assertTrue(re.search(r'test_file_commit1', svn_log))
+        self.assertTrue(re.search(r'test_file_commit2', svn_log))
+
+    def test_commit_with_message_only_local_commit(self):
+        self._one_local_commit()
+        scm = detect_scm_system(self.git_checkout_path)
+        commit_text = scm.commit_with_message("another test commit")
+        svn_log = run_command(['git', 'svn', 'log', '--limit=1', '--verbose'])
+        self.assertTrue(re.search(r'test_file_commit1', svn_log))
+
+    def test_commit_with_message_multiple_local_commits_and_working_copy(self):
+        self._two_local_commits()
+        write_into_file_at_path('test_file_commit1', 'working copy change')
+        scm = detect_scm_system(self.git_checkout_path)
+
+        self.assertRaises(AmbiguousCommitError, scm.commit_with_message, "another test commit")
+        commit_text = scm.commit_with_message("another test commit", force_squash=True)
+
+        self.assertEqual(scm.svn_revision_from_commit_text(commit_text), '6')
+        svn_log = run_command(['git', 'svn', 'log', '--limit=1', '--verbose'])
+        self.assertTrue(re.search(r'test_file_commit2', svn_log))
+        self.assertTrue(re.search(r'test_file_commit1', svn_log))
+
+    def test_commit_with_message_git_commit_and_working_copy(self):
+        self._two_local_commits()
+        write_into_file_at_path('test_file_commit1', 'working copy change')
+        scm = detect_scm_system(self.git_checkout_path)
+        self.assertRaises(ScriptError, scm.commit_with_message, "another test commit", git_commit="HEAD^")
+
+    def test_commit_with_message_multiple_local_commits_always_squash(self):
+        self._two_local_commits()
+        scm = detect_scm_system(self.git_checkout_path)
+        scm._assert_can_squash = lambda working_directory_is_clean: True
+        commit_text = scm.commit_with_message("yet another test commit")
+        self.assertEqual(scm.svn_revision_from_commit_text(commit_text), '6')
+
+        svn_log = run_command(['git', 'svn', 'log', '--limit=1', '--verbose'])
+        self.assertTrue(re.search(r'test_file_commit2', svn_log))
+        self.assertTrue(re.search(r'test_file_commit1', svn_log))
+
+    def test_commit_with_message_multiple_local_commits(self):
+        self._two_local_commits()
+        scm = detect_scm_system(self.git_checkout_path)
+        self.assertRaises(AmbiguousCommitError, scm.commit_with_message, "yet another test commit")
+        commit_text = scm.commit_with_message("yet another test commit", force_squash=True)
+
+        self.assertEqual(scm.svn_revision_from_commit_text(commit_text), '6')
+
+        svn_log = run_command(['git', 'svn', 'log', '--limit=1', '--verbose'])
+        self.assertTrue(re.search(r'test_file_commit2', svn_log))
+        self.assertTrue(re.search(r'test_file_commit1', svn_log))
+
+    def test_commit_with_message_not_synced(self):
+        run_command(['git', 'checkout', '-b', 'my-branch', 'trunk~3'])
+        self._two_local_commits()
+        scm = detect_scm_system(self.git_checkout_path)
+        self.assertRaises(AmbiguousCommitError, scm.commit_with_message, "another test commit")
+        commit_text = scm.commit_with_message("another test commit", force_squash=True)
+
+        self.assertEqual(scm.svn_revision_from_commit_text(commit_text), '6')
+
+        svn_log = run_command(['git', 'svn', 'log', '--limit=1', '--verbose'])
+        self.assertFalse(re.search(r'test_file2', svn_log))
+        self.assertTrue(re.search(r'test_file_commit2', svn_log))
+        self.assertTrue(re.search(r'test_file_commit1', svn_log))
+
+    def test_commit_with_message_not_synced_with_conflict(self):
+        run_command(['git', 'checkout', '-b', 'my-branch', 'trunk~3'])
+        self._local_commit('test_file2', 'asdf', 'asdf commit')
+
+        scm = detect_scm_system(self.git_checkout_path)
+        # There's a conflict between trunk and the test_file2 modification.
+        self.assertRaises(ScriptError, scm.commit_with_message, "another test commit", force_squash=True)
+
+    def test_upstream_branch(self):
+        run_command(['git', 'checkout', '-t', '-b', 'my-branch'])
+        run_command(['git', 'checkout', '-t', '-b', 'my-second-branch'])
+        self.assertEquals(self.scm._upstream_branch(), 'my-branch')
+
+    def test_remote_branch_ref(self):
+        self.assertEqual(self.scm.remote_branch_ref(), 'refs/remotes/trunk')
+
+    def test_reverse_diff(self):
+        self._shared_test_reverse_diff()
+
+    def test_diff_for_revision(self):
+        self._shared_test_diff_for_revision()
+
+    def test_svn_apply_git_patch(self):
+        self._shared_test_svn_apply_git_patch()
+
+    def test_create_patch_local_plus_working_copy(self):
+        self._one_local_commit_plus_working_copy_changes()
+        scm = detect_scm_system(self.git_checkout_path)
+        patch = scm.create_patch()
+        self.assertTrue(re.search(r'test_file_commit1', patch))
+        self.assertTrue(re.search(r'test_file_commit2', patch))
+
+    def test_create_patch(self):
+        self._one_local_commit_plus_working_copy_changes()
+        scm = detect_scm_system(self.git_checkout_path)
+        patch = scm.create_patch()
+        self.assertTrue(re.search(r'test_file_commit2', patch))
+        self.assertTrue(re.search(r'test_file_commit1', patch))
+        self.assertTrue(re.search(r'Subversion Revision: 5', patch))
+
+    def test_create_patch_after_merge(self):
+        run_command(['git', 'checkout', '-b', 'dummy-branch', 'trunk~3'])
+        self._one_local_commit()
+        run_command(['git', 'merge', 'trunk'])
+
+        scm = detect_scm_system(self.git_checkout_path)
+        patch = scm.create_patch()
+        self.assertTrue(re.search(r'test_file_commit1', patch))
+        self.assertTrue(re.search(r'Subversion Revision: 5', patch))
+
+    def test_create_patch_with_changed_files(self):
+        self._one_local_commit_plus_working_copy_changes()
+        scm = detect_scm_system(self.git_checkout_path)
+        patch = scm.create_patch(changed_files=['test_file_commit2'])
+        self.assertTrue(re.search(r'test_file_commit2', patch))
+
+    def test_create_patch_with_rm_and_changed_files(self):
+        self._one_local_commit_plus_working_copy_changes()
+        scm = detect_scm_system(self.git_checkout_path)
+        os.remove('test_file_commit1')
+        patch = scm.create_patch()
+        patch_with_changed_files = scm.create_patch(changed_files=['test_file_commit1', 'test_file_commit2'])
+        self.assertEquals(patch, patch_with_changed_files)
+
+    def test_create_patch_git_commit(self):
+        self._two_local_commits()
+        scm = detect_scm_system(self.git_checkout_path)
+        patch = scm.create_patch(git_commit="HEAD^")
+        self.assertTrue(re.search(r'test_file_commit1', patch))
+        self.assertFalse(re.search(r'test_file_commit2', patch))
+
+    def test_create_patch_git_commit_range(self):
+        self._three_local_commits()
+        scm = detect_scm_system(self.git_checkout_path)
+        patch = scm.create_patch(git_commit="HEAD~2..HEAD")
+        self.assertFalse(re.search(r'test_file_commit0', patch))
+        self.assertTrue(re.search(r'test_file_commit2', patch))
+        self.assertTrue(re.search(r'test_file_commit1', patch))
+
+    def test_create_patch_working_copy_only(self):
+        self._one_local_commit_plus_working_copy_changes()
+        scm = detect_scm_system(self.git_checkout_path)
+        patch = scm.create_patch(git_commit="HEAD....")
+        self.assertFalse(re.search(r'test_file_commit1', patch))
+        self.assertTrue(re.search(r'test_file_commit2', patch))
+
+    def test_create_patch_multiple_local_commits(self):
+        self._two_local_commits()
+        scm = detect_scm_system(self.git_checkout_path)
+        patch = scm.create_patch()
+        self.assertTrue(re.search(r'test_file_commit2', patch))
+        self.assertTrue(re.search(r'test_file_commit1', patch))
+
+    def test_create_patch_not_synced(self):
+        run_command(['git', 'checkout', '-b', 'my-branch', 'trunk~3'])
+        self._two_local_commits()
+        scm = detect_scm_system(self.git_checkout_path)
+        patch = scm.create_patch()
+        self.assertFalse(re.search(r'test_file2', patch))
+        self.assertTrue(re.search(r'test_file_commit2', patch))
+        self.assertTrue(re.search(r'test_file_commit1', patch))
+
+    def test_create_binary_patch(self):
+        # Create a git binary patch and check the contents.
+        scm = detect_scm_system(self.git_checkout_path)
+        test_file_name = 'binary_file'
+        test_file_path = os.path.join(self.git_checkout_path, test_file_name)
+        file_contents = ''.join(map(chr, range(256)))
+        write_into_file_at_path(test_file_path, file_contents, encoding=None)
+        run_command(['git', 'add', test_file_name])
+        patch = scm.create_patch()
+        self.assertTrue(re.search(r'\nliteral 0\n', patch))
+        self.assertTrue(re.search(r'\nliteral 256\n', patch))
+
+        # Check if we can apply the created patch.
+        run_command(['git', 'rm', '-f', test_file_name])
+        self._setup_webkittools_scripts_symlink(scm)
+        self.checkout.apply_patch(self._create_patch(patch))
+        self.assertEqual(file_contents, read_from_path(test_file_path, encoding=None))
+
+        # Check if we can create a patch from a local commit.
+        write_into_file_at_path(test_file_path, file_contents, encoding=None)
+        run_command(['git', 'add', test_file_name])
+        run_command(['git', 'commit', '-m', 'binary diff'])
+        patch_from_local_commit = scm.create_patch('HEAD')
+        self.assertTrue(re.search(r'\nliteral 0\n', patch_from_local_commit))
+        self.assertTrue(re.search(r'\nliteral 256\n', patch_from_local_commit))
+
+    def test_changed_files_local_plus_working_copy(self):
+        self._one_local_commit_plus_working_copy_changes()
+        scm = detect_scm_system(self.git_checkout_path)
+        files = scm.changed_files()
+        self.assertTrue('test_file_commit1' in files)
+        self.assertTrue('test_file_commit2' in files)
+
+        # working copy should *not* be in the list.
+        files = scm.changed_files('trunk..')
+        self.assertTrue('test_file_commit1' in files)
+        self.assertFalse('test_file_commit2' in files)
+
+        # working copy *should* be in the list.
+        files = scm.changed_files('trunk....')
+        self.assertTrue('test_file_commit1' in files)
+        self.assertTrue('test_file_commit2' in files)
+
+    def test_changed_files_git_commit(self):
+        self._two_local_commits()
+        scm = detect_scm_system(self.git_checkout_path)
+        files = scm.changed_files(git_commit="HEAD^")
+        self.assertTrue('test_file_commit1' in files)
+        self.assertFalse('test_file_commit2' in files)
+
+    def test_changed_files_git_commit_range(self):
+        self._three_local_commits()
+        scm = detect_scm_system(self.git_checkout_path)
+        files = scm.changed_files(git_commit="HEAD~2..HEAD")
+        self.assertTrue('test_file_commit0' not in files)
+        self.assertTrue('test_file_commit1' in files)
+        self.assertTrue('test_file_commit2' in files)
+
+    def test_changed_files_working_copy_only(self):
+        self._one_local_commit_plus_working_copy_changes()
+        scm = detect_scm_system(self.git_checkout_path)
+        files = scm.changed_files(git_commit="HEAD....")
+        self.assertFalse('test_file_commit1' in files)
+        self.assertTrue('test_file_commit2' in files)
+
+    def test_changed_files_multiple_local_commits(self):
+        self._two_local_commits()
+        scm = detect_scm_system(self.git_checkout_path)
+        files = scm.changed_files()
+        self.assertTrue('test_file_commit2' in files)
+        self.assertTrue('test_file_commit1' in files)
+
+    def test_changed_files_not_synced(self):
+        run_command(['git', 'checkout', '-b', 'my-branch', 'trunk~3'])
+        self._two_local_commits()
+        scm = detect_scm_system(self.git_checkout_path)
+        files = scm.changed_files()
+        self.assertFalse('test_file2' in files)
+        self.assertTrue('test_file_commit2' in files)
+        self.assertTrue('test_file_commit1' in files)
+
+    def test_changed_files_not_synced(self):
+        run_command(['git', 'checkout', '-b', 'my-branch', 'trunk~3'])
+        self._two_local_commits()
+        scm = detect_scm_system(self.git_checkout_path)
+        files = scm.changed_files()
+        self.assertFalse('test_file2' in files)
+        self.assertTrue('test_file_commit2' in files)
+        self.assertTrue('test_file_commit1' in files)
+
+    def test_changed_files(self):
+        self._shared_test_changed_files()
+
+    def test_changed_files_for_revision(self):
+        self._shared_test_changed_files_for_revision()
+
+    def test_changed_files_upstream(self):
+        run_command(['git', 'checkout', '-t', '-b', 'my-branch'])
+        self._one_local_commit()
+        run_command(['git', 'checkout', '-t', '-b', 'my-second-branch'])
+        self._second_local_commit()
+        write_into_file_at_path('test_file_commit0', 'more test content')
+        run_command(['git', 'add', 'test_file_commit0'])
+
+        # equivalent to 'git diff my-branch..HEAD, should not include working changes
+        files = self.scm.changed_files(git_commit='UPSTREAM..')
+        self.assertFalse('test_file_commit1' in files)
+        self.assertTrue('test_file_commit2' in files)
+        self.assertFalse('test_file_commit0' in files)
+
+        # equivalent to 'git diff my-branch', *should* include working changes
+        files = self.scm.changed_files(git_commit='UPSTREAM....')
+        self.assertFalse('test_file_commit1' in files)
+        self.assertTrue('test_file_commit2' in files)
+        self.assertTrue('test_file_commit0' in files)
+
+    def test_contents_at_revision(self):
+        self._shared_test_contents_at_revision()
+
+    def test_revisions_changing_file(self):
+        self._shared_test_revisions_changing_file()
+
+    def test_added_files(self):
+        self._shared_test_added_files()
+
+    def test_committer_email_for_revision(self):
+        self._shared_test_committer_email_for_revision()
+
+    def test_add_recursively(self):
+        self._shared_test_add_recursively()
+
+    def test_delete(self):
+        self._two_local_commits()
+        self.scm.delete('test_file_commit1')
+        self.assertTrue("test_file_commit1" in self.scm.deleted_files())
+
+    def test_delete_list(self):
+        self._two_local_commits()
+        self.scm.delete_list(["test_file_commit1", "test_file_commit2"])
+        self.assertTrue("test_file_commit1" in self.scm.deleted_files())
+        self.assertTrue("test_file_commit2" in self.scm.deleted_files())
+
+    def test_delete_recursively(self):
+        self._shared_test_delete_recursively()
+
+    def test_delete_recursively_or_not(self):
+        self._shared_test_delete_recursively_or_not()
+
+    def test_head_svn_revision(self):
+        self._shared_test_head_svn_revision()
+
+    def test_to_object_name(self):
+        relpath = 'test_file_commit1'
+        fullpath = os.path.join(self.git_checkout_path, relpath)
+        self._two_local_commits()
+        self.assertEqual(relpath, self.scm.to_object_name(fullpath))
+
+    def test_show_head(self):
+        self._two_local_commits()
+        self.assertEqual("more test content", self.scm.show_head('test_file_commit1'))
+
+    def test_show_head_binary(self):
+        self._two_local_commits()
+        data = "\244"
+        write_into_file_at_path("binary_file", data, encoding=None)
+        self.scm.add("binary_file")
+        self.scm.commit_locally_with_message("a test commit")
+        self.assertEqual(data, self.scm.show_head('binary_file'))
+
+    def test_diff_for_file(self):
+        self._two_local_commits()
+        write_into_file_at_path('test_file_commit1', "Updated", encoding=None)
+
+        diff = self.scm.diff_for_file('test_file_commit1')
+        cached_diff = self.scm.diff_for_file('test_file_commit1')
+        self.assertTrue("+Updated" in diff)
+        self.assertTrue("-more test content" in diff)
+
+        self.scm.add('test_file_commit1')
+
+        cached_diff = self.scm.diff_for_file('test_file_commit1')
+        self.assertTrue("+Updated" in cached_diff)
+        self.assertTrue("-more test content" in cached_diff)
+
+    def test_exists(self):
+        scm = detect_scm_system(self.git_checkout_path)
+        self._shared_test_exists(scm, scm.commit_locally_with_message)
+
+
+# We need to split off more of these SCM tests to use mocks instead of the filesystem.
+# This class is the first part of that.
+class GitTestWithMock(unittest.TestCase):
+    def make_scm(self, logging_executive=False):
+        # We do this should_log dance to avoid logging when Git.__init__ runs sysctl on mac to check for 64-bit support.
+        scm = Git(cwd=None, executive=MockExecutive())
+        scm._executive._should_log = logging_executive
+        return scm
+
+    def test_create_patch(self):
+        scm = self.make_scm(logging_executive=True)
+        expected_stderr = "MOCK run_command: ['git', 'merge-base', u'refs/remotes/origin/master', 'HEAD'], cwd=%(checkout)s\nMOCK run_command: ['git', 'diff', '--binary', '--no-ext-diff', '--full-index', '-M', 'MOCK output of child process', '--'], cwd=%(checkout)s\nMOCK run_command: ['git', 'log', '-25'], cwd=None\n" % {'checkout': scm.checkout_root}
+        OutputCapture().assert_outputs(self, scm.create_patch, expected_stderr=expected_stderr)
+
+    def test_push_local_commits_to_server_with_username_and_password(self):
+        self.assertEquals(self.make_scm().push_local_commits_to_server(username='dbates@webkit.org', password='blah'), "MOCK output of child process")
+
+    def test_push_local_commits_to_server_without_username_and_password(self):
+        self.assertRaises(AuthenticationError, self.make_scm().push_local_commits_to_server)
+
+    def test_push_local_commits_to_server_with_username_and_without_password(self):
+        self.assertRaises(AuthenticationError, self.make_scm().push_local_commits_to_server, {'username': 'dbates@webkit.org'})
+
+    def test_push_local_commits_to_server_without_username_and_with_password(self):
+        self.assertRaises(AuthenticationError, self.make_scm().push_local_commits_to_server, {'password': 'blah'})
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/Tools/Scripts/webkitpy/common/checkout/scm/svn.py b/Tools/Scripts/webkitpy/common/checkout/scm/svn.py
new file mode 100644
index 0000000..25b7e3b
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/checkout/scm/svn.py
@@ -0,0 +1,356 @@
+# Copyright (c) 2009, 2010, 2011 Google Inc. All rights reserved.
+# Copyright (c) 2009 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import logging
+import os
+import re
+import shutil
+import sys
+
+from webkitpy.common.memoized import memoized
+from webkitpy.common.system.deprecated_logging import log
+from webkitpy.common.system.executive import Executive, ScriptError
+
+from .scm import AuthenticationError, SCM, commit_error_handler
+
+
+_log = logging.getLogger(__name__)
+
+
+# A mixin class that represents common functionality for SVN and Git-SVN.
+class SVNRepository:
+    def has_authorization_for_realm(self, realm, home_directory=os.getenv("HOME")):
+        # Assumes find and grep are installed.
+        if not os.path.isdir(os.path.join(home_directory, ".subversion")):
+            return False
+        find_args = ["find", ".subversion", "-type", "f", "-exec", "grep", "-q", realm, "{}", ";", "-print"]
+        find_output = self.run(find_args, cwd=home_directory, error_handler=Executive.ignore_error).rstrip()
+        if not find_output or not os.path.isfile(os.path.join(home_directory, find_output)):
+            return False
+        # Subversion either stores the password in the credential file, indicated by the presence of the key "password",
+        # or uses the system password store (e.g. Keychain on Mac OS X) as indicated by the presence of the key "passtype".
+        # We assume that these keys will not coincide with the actual credential data (e.g. that a person's username
+        # isn't "password") so that we can use grep.
+        if self.run(["grep", "password", find_output], cwd=home_directory, return_exit_code=True) == 0:
+            return True
+        return self.run(["grep", "passtype", find_output], cwd=home_directory, return_exit_code=True) == 0
+
+
+class SVN(SCM, SVNRepository):
+    # FIXME: These belong in common.config.urls
+    svn_server_host = "svn.webkit.org"
+    svn_server_realm = "<http://svn.webkit.org:80> Mac OS Forge"
+
+    executable_name = "svn"
+
+    _svn_metadata_files = frozenset(['.svn', '_svn'])
+
+    def __init__(self, cwd, patch_directories, **kwargs):
+        SCM.__init__(self, cwd, **kwargs)
+        self._bogus_dir = None
+        if patch_directories == []:
+            # FIXME: ScriptError is for Executive, this should probably be a normal Exception.
+            raise ScriptError(script_args=svn_info_args, message='Empty list of patch directories passed to SCM.__init__')
+        elif patch_directories == None:
+            self._patch_directories = [self._filesystem.relpath(cwd, self.checkout_root)]
+        else:
+            self._patch_directories = patch_directories
+
+    @classmethod
+    def in_working_directory(cls, path, executive=None):
+        if os.path.isdir(os.path.join(path, '.svn')):
+            # This is a fast shortcut for svn info that is usually correct for SVN < 1.7,
+            # but doesn't work for SVN >= 1.7.
+            return True
+
+        executive = executive or Executive()
+        svn_info_args = [cls.executable_name, 'info']
+        exit_code = executive.run_command(svn_info_args, cwd=path, return_exit_code=True)
+        return (exit_code == 0)
+
+    def find_uuid(self, path):
+        if not self.in_working_directory(path):
+            return None
+        return self.value_from_svn_info(path, 'Repository UUID')
+
+    @classmethod
+    def value_from_svn_info(cls, path, field_name):
+        svn_info_args = [cls.executable_name, 'info']
+        # FIXME: This method should use a passed in executive or be made an instance method and use self._executive.
+        info_output = Executive().run_command(svn_info_args, cwd=path).rstrip()
+        match = re.search("^%s: (?P<value>.+)$" % field_name, info_output, re.MULTILINE)
+        if not match:
+            raise ScriptError(script_args=svn_info_args, message='svn info did not contain a %s.' % field_name)
+        return match.group('value')
+
+    def find_checkout_root(self, path):
+        uuid = self.find_uuid(path)
+        # If |path| is not in a working directory, we're supposed to return |path|.
+        if not uuid:
+            return path
+        # Search up the directory hierarchy until we find a different UUID.
+        last_path = None
+        while True:
+            if uuid != self.find_uuid(path):
+                return last_path
+            last_path = path
+            (path, last_component) = self._filesystem.split(path)
+            if last_path == path:
+                return None
+
+    @staticmethod
+    def commit_success_regexp():
+        return "^Committed revision (?P<svn_revision>\d+)\.$"
+
+    def _run_svn(self, args, **kwargs):
+        return self.run([self.executable_name] + args, **kwargs)
+
+    @memoized
+    def svn_version(self):
+        return self._run_svn(['--version', '--quiet'])
+
+    def working_directory_is_clean(self):
+        return self._run_svn(["diff"], cwd=self.checkout_root, decode_output=False) == ""
+
+    def clean_working_directory(self):
+        # Make sure there are no locks lying around from a previously aborted svn invocation.
+        # This is slightly dangerous, as it's possible the user is running another svn process
+        # on this checkout at the same time.  However, it's much more likely that we're running
+        # under windows and svn just sucks (or the user interrupted svn and it failed to clean up).
+        self._run_svn(["cleanup"], cwd=self.checkout_root)
+
+        # svn revert -R is not as awesome as git reset --hard.
+        # It will leave added files around, causing later svn update
+        # calls to fail on the bots.  We make this mirror git reset --hard
+        # by deleting any added files as well.
+        added_files = reversed(sorted(self.added_files()))
+        # added_files() returns directories for SVN, we walk the files in reverse path
+        # length order so that we remove files before we try to remove the directories.
+        self._run_svn(["revert", "-R", "."], cwd=self.checkout_root)
+        for path in added_files:
+            # This is robust against cwd != self.checkout_root
+            absolute_path = self.absolute_path(path)
+            # Completely lame that there is no easy way to remove both types with one call.
+            if os.path.isdir(path):
+                os.rmdir(absolute_path)
+            else:
+                os.remove(absolute_path)
+
+    def status_command(self):
+        return [self.executable_name, 'status']
+
+    def _status_regexp(self, expected_types):
+        field_count = 6 if self.svn_version() > "1.6" else 5
+        return "^(?P<status>[%s]).{%s} (?P<filename>.+)$" % (expected_types, field_count)
+
+    def _add_parent_directories(self, path):
+        """Does 'svn add' to the path and its parents."""
+        if self.in_working_directory(path):
+            return
+        self.add(path)
+
+    def add_list(self, paths, return_exit_code=False):
+        for path in paths:
+            self._add_parent_directories(os.path.dirname(os.path.abspath(path)))
+        return self._run_svn(["add"] + paths, return_exit_code=return_exit_code)
+
+    def _delete_parent_directories(self, path):
+        if not self.in_working_directory(path):
+            return
+        if set(os.listdir(path)) - self._svn_metadata_files:
+            return  # Directory has non-trivial files in it.
+        self.delete(path)
+
+    def delete_list(self, paths):
+        for path in paths:
+            abs_path = os.path.abspath(path)
+            parent, base = os.path.split(abs_path)
+            result = self._run_svn(["delete", "--force", base], cwd=parent)
+            self._delete_parent_directories(os.path.dirname(abs_path))
+        return result
+
+    def exists(self, path):
+        return not self._run_svn(["info", path], return_exit_code=True, decode_output=False)
+
+    def changed_files(self, git_commit=None):
+        status_command = [self.executable_name, "status"]
+        status_command.extend(self._patch_directories)
+        # ACDMR: Addded, Conflicted, Deleted, Modified or Replaced
+        return self.run_status_and_extract_filenames(status_command, self._status_regexp("ACDMR"))
+
+    def changed_files_for_revision(self, revision):
+        # As far as I can tell svn diff --summarize output looks just like svn status output.
+        # No file contents printed, thus utf-8 auto-decoding in self.run is fine.
+        status_command = [self.executable_name, "diff", "--summarize", "-c", revision]
+        return self.run_status_and_extract_filenames(status_command, self._status_regexp("ACDMR"))
+
+    def revisions_changing_file(self, path, limit=5):
+        revisions = []
+        # svn log will exit(1) (and thus self.run will raise) if the path does not exist.
+        log_command = ['log', '--quiet', '--limit=%s' % limit, path]
+        for line in self._run_svn(log_command, cwd=self.checkout_root).splitlines():
+            match = re.search('^r(?P<revision>\d+) ', line)
+            if not match:
+                continue
+            revisions.append(int(match.group('revision')))
+        return revisions
+
+    def conflicted_files(self):
+        return self.run_status_and_extract_filenames(self.status_command(), self._status_regexp("C"))
+
+    def added_files(self):
+        return self.run_status_and_extract_filenames(self.status_command(), self._status_regexp("A"))
+
+    def deleted_files(self):
+        return self.run_status_and_extract_filenames(self.status_command(), self._status_regexp("D"))
+
+    @staticmethod
+    def supports_local_commits():
+        return False
+
+    def display_name(self):
+        return "svn"
+
+    def svn_revision(self, path):
+        return self.value_from_svn_info(path, 'Revision')
+
+    # FIXME: This method should be on Checkout.
+    def create_patch(self, git_commit=None, changed_files=None):
+        """Returns a byte array (str()) representing the patch file.
+        Patch files are effectively binary since they may contain
+        files of multiple different encodings."""
+        if changed_files == []:
+            return ""
+        elif changed_files == None:
+            changed_files = []
+        return self.run([self.script_path("svn-create-patch")] + changed_files,
+            cwd=self.checkout_root, return_stderr=False,
+            decode_output=False)
+
+    def committer_email_for_revision(self, revision):
+        return self._run_svn(["propget", "svn:author", "--revprop", "-r", revision]).rstrip()
+
+    def contents_at_revision(self, path, revision):
+        """Returns a byte array (str()) containing the contents
+        of path @ revision in the repository."""
+        remote_path = "%s/%s" % (self._repository_url(), path)
+        return self._run_svn(["cat", "-r", revision, remote_path], decode_output=False)
+
+    def diff_for_revision(self, revision):
+        # FIXME: This should probably use cwd=self.checkout_root
+        return self._run_svn(['diff', '-c', revision])
+
+    def _bogus_dir_name(self):
+        if sys.platform.startswith("win"):
+            parent_dir = tempfile.gettempdir()
+        else:
+            parent_dir = sys.path[0]  # tempdir is not secure.
+        return os.path.join(parent_dir, "temp_svn_config")
+
+    def _setup_bogus_dir(self, log):
+        self._bogus_dir = self._bogus_dir_name()
+        if not os.path.exists(self._bogus_dir):
+            os.mkdir(self._bogus_dir)
+            self._delete_bogus_dir = True
+        else:
+            self._delete_bogus_dir = False
+        if log:
+            log.debug('  Html: temp config dir: "%s".', self._bogus_dir)
+
+    def _teardown_bogus_dir(self, log):
+        if self._delete_bogus_dir:
+            shutil.rmtree(self._bogus_dir, True)
+            if log:
+                log.debug('  Html: removed temp config dir: "%s".', self._bogus_dir)
+        self._bogus_dir = None
+
+    def diff_for_file(self, path, log=None):
+        self._setup_bogus_dir(log)
+        try:
+            args = ['diff']
+            if self._bogus_dir:
+                args += ['--config-dir', self._bogus_dir]
+            args.append(path)
+            return self._run_svn(args, cwd=self.checkout_root)
+        finally:
+            self._teardown_bogus_dir(log)
+
+    def show_head(self, path):
+        return self._run_svn(['cat', '-r', 'BASE', path], decode_output=False)
+
+    def _repository_url(self):
+        return self.value_from_svn_info(self.checkout_root, 'URL')
+
+    def apply_reverse_diff(self, revision):
+        # '-c -revision' applies the inverse diff of 'revision'
+        svn_merge_args = ['merge', '--non-interactive', '-c', '-%s' % revision, self._repository_url()]
+        log("WARNING: svn merge has been known to take more than 10 minutes to complete.  It is recommended you use git for rollouts.")
+        log("Running 'svn %s'" % " ".join(svn_merge_args))
+        # FIXME: Should this use cwd=self.checkout_root?
+        self._run_svn(svn_merge_args)
+
+    def revert_files(self, file_paths):
+        # FIXME: This should probably use cwd=self.checkout_root.
+        self._run_svn(['revert'] + file_paths)
+
+    def commit_with_message(self, message, username=None, password=None, git_commit=None, force_squash=False, changed_files=None):
+        # git-commit and force are not used by SVN.
+        svn_commit_args = ["commit"]
+
+        if not username and not self.has_authorization_for_realm(self.svn_server_realm):
+            raise AuthenticationError(self.svn_server_host)
+        if username:
+            svn_commit_args.extend(["--username", username])
+
+        svn_commit_args.extend(["-m", message])
+
+        if changed_files:
+            svn_commit_args.extend(changed_files)
+
+        return self._run_svn(svn_commit_args, cwd=self.checkout_root, error_handler=commit_error_handler)
+
+    def svn_commit_log(self, svn_revision):
+        svn_revision = self.strip_r_from_svn_revision(svn_revision)
+        return self._run_svn(['log', '--non-interactive', '--revision', svn_revision])
+
+    def last_svn_commit_log(self):
+        # BASE is the checkout revision, HEAD is the remote repository revision
+        # http://svnbook.red-bean.com/en/1.0/ch03s03.html
+        return self.svn_commit_log('BASE')
+
+    def svn_blame(self, path):
+        return self._run_svn(['blame', path])
+
+    def propset(self, pname, pvalue, path):
+        dir, base = os.path.split(path)
+        return self._run_svn(['pset', pname, pvalue, base], cwd=dir)
+
+    def propget(self, pname, path):
+        dir, base = os.path.split(path)
+        return self._run_svn(['pget', pname, base], cwd=dir).encode('utf-8').rstrip("\n")
diff --git a/Tools/Scripts/webkitpy/common/checksvnconfigfile.py b/Tools/Scripts/webkitpy/common/checksvnconfigfile.py
new file mode 100644
index 0000000..e6165f6
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/checksvnconfigfile.py
@@ -0,0 +1,65 @@
+# Copyright (C) 2012 Balazs Ankes (bank@inf.u-szeged.hu) University of Szeged
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1.  Redistributions of source code must retain the above copyright
+#     notice, this list of conditions and the following disclaimer.
+# 2.  Redistributions in binary form must reproduce the above copyright
+#     notice, this list of conditions and the following disclaimer in the
+#     documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# This file is used by:
+# webkitpy/tool/steps/addsvnmimetypeforpng.py
+# webkitpy/style/checkers/png.py
+
+import os
+import re
+
+
+def check(host, fs):
+    """
+    check the svn config file
+    return with three logical value:
+    is svn config file missing, is auto-props missing, is the svn:mime-type for png missing
+    """
+
+    cfg_file_path = config_file_path(host, fs)
+
+    try:
+        config_file = fs.read_text_file(cfg_file_path)
+    except IOError:
+        return (True, True, True)
+
+    errorcode_autoprop = not re.search("^\s*enable-auto-props\s*=\s*yes", config_file, re.MULTILINE)
+    errorcode_png = not re.search("^\s*\*\.png\s*=\s*svn:mime-type=image/png", config_file, re.MULTILINE)
+
+    return (False, errorcode_autoprop, errorcode_png)
+
+
+def config_file_path(host, fs):
+    if host.platform.is_win():
+        config_file_path = fs.join(os.environ['APPDATA'], "Subversion", "config")
+    else:
+        config_file_path = fs.join(fs.expanduser("~"), ".subversion", "config")
+    return config_file_path
+
+
+def errorstr_autoprop(config_file_path):
+    return 'Have to enable auto props in the subversion config file (%s "enable-auto-props = yes"). ' % config_file_path
+
+
+def errorstr_png(config_file_path):
+    return 'Have to set the svn:mime-type in the subversion config file (%s "*.png = svn:mime-type=image/png").' % config_file_path
diff --git a/Tools/Scripts/webkitpy/common/config/__init__.py b/Tools/Scripts/webkitpy/common/config/__init__.py
new file mode 100644
index 0000000..ef65bee
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/config/__init__.py
@@ -0,0 +1 @@
+# Required for Python to search this directory for module files
diff --git a/Tools/Scripts/webkitpy/common/config/build.py b/Tools/Scripts/webkitpy/common/config/build.py
new file mode 100644
index 0000000..2ecacc7
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/config/build.py
@@ -0,0 +1,135 @@
+# Copyright (C) 2010 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1.  Redistributions of source code must retain the above copyright
+#     notice, this list of conditions and the following disclaimer.
+# 2.  Redistributions in binary form must reproduce the above copyright
+#     notice, this list of conditions and the following disclaimer in the
+#     documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
+# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Functions relating to building WebKit"""
+
+import re
+
+
+def _should_file_trigger_build(target_platform, file):
+    # The directories and patterns lists below map directory names or
+    # regexp patterns to the bot platforms for which they should trigger a
+    # build. Mapping to the empty list means that no builds should be
+    # triggered on any platforms. Earlier directories/patterns take
+    # precendence over later ones.
+
+    # FIXME: The patterns below have only been verified to be correct on
+    # the platforms listed below. We should implement this for other platforms
+    # and start using it for their bots. Someone familiar with each platform
+    # will have to figure out what the right set of directories/patterns is for
+    # that platform.
+    assert(target_platform in ("mac-leopard", "mac-lion", "mac-mountainlion", "mac-snowleopard", "win"))
+
+    directories = [
+        # Directories that shouldn't trigger builds on any bots.
+        ("Examples", []),
+        ("PerformanceTests", []),
+        ("ManualTests", []),
+        ("Tools/BuildSlaveSupport/build.webkit.org-config/public_html", []),
+        ("Websites", []),
+        ("efl", []),
+        ("iphone", []),
+        ("opengl", []),
+        ("opentype", []),
+        ("openvg", []),
+        ("wince", []),
+        ("wx", []),
+
+        # Directories that should trigger builds on only some bots.
+        ("Source/WebCore/image-decoders", ["chromium"]),
+        ("LayoutTests/platform/mac", ["mac", "win"]),
+        ("cairo", ["gtk", "wincairo"]),
+        ("cf", ["chromium-mac", "mac", "qt", "win"]),
+        ("chromium", ["chromium"]),
+        ("cocoa", ["chromium-mac", "mac"]),
+        ("curl", ["gtk", "wincairo"]),
+        ("gobject", ["gtk"]),
+        ("gpu", ["chromium", "mac"]),
+        ("gstreamer", ["gtk"]),
+        ("gtk", ["gtk"]),
+        ("mac", ["chromium-mac", "mac"]),
+        ("mac-leopard", ["mac-leopard"]),
+        ("mac-lion", ["mac-leopard", "mac-lion", "mac-snowleopard", "win"]),
+        ("mac-snowleopard", ["mac-leopard", "mac-snowleopard"]),
+        ("mac-wk2", ["mac-lion", "mac-snowleopard", "mac-mountainlion", "win"]),
+        ("objc", ["mac"]),
+        ("qt", ["qt"]),
+        ("skia", ["chromium"]),
+        ("soup", ["gtk"]),
+        ("v8", ["chromium"]),
+        ("win", ["chromium-win", "win"]),
+    ]
+    patterns = [
+        # Patterns that shouldn't trigger builds on any bots.
+        (r"(?:^|/)ChangeLog.*$", []),
+        (r"(?:^|/)Makefile$", []),
+        (r"/ARM", []),
+        (r"/CMake.*", []),
+        (r"/LICENSE[^/]+$", []),
+        (r"ARM(?:v7)?\.(?:cpp|h)$", []),
+        (r"MIPS\.(?:cpp|h)$", []),
+        (r"WinCE\.(?:cpp|h|mm)$", []),
+        (r"\.(?:bkl|mk)$", []),
+
+        # Patterns that should trigger builds on only some bots.
+        (r"(?:^|/)GNUmakefile\.am$", ["gtk"]),
+        (r"/\w+Chromium\w*\.(?:cpp|h|mm)$", ["chromium"]),
+        (r"Mac\.(?:cpp|h|mm)$", ["mac"]),
+        (r"\.(?:vcproj|vsprops|sln)$", ["win"]),
+        (r"\.exp(?:\.in)?$", ["mac"]),
+        (r"\.gypi?", ["chromium"]),
+        (r"\.order$", ["mac"]),
+        (r"\.pr[io]$", ["qt"]),
+        (r"\.vcproj/", ["win"]),
+        (r"\.xcconfig$", ["mac"]),
+        (r"\.xcodeproj/", ["mac"]),
+    ]
+
+    base_platform = target_platform.split("-")[0]
+
+    # See if the file is in one of the known directories.
+    for directory, platforms in directories:
+        if re.search(r"(?:^|/)%s/" % directory, file):
+            return target_platform in platforms or base_platform in platforms
+
+    # See if the file matches a known pattern.
+    for pattern, platforms in patterns:
+        if re.search(pattern, file):
+            return target_platform in platforms or base_platform in platforms
+
+    # See if the file is a platform-specific test result.
+    match = re.match("LayoutTests/platform/(?P<platform>[^/]+)/", file)
+    if match:
+        # See if the file is a test result for this platform, our base
+        # platform, or one of our sub-platforms.
+        return match.group("platform") in (target_platform, base_platform) or match.group("platform").startswith("%s-" % target_platform)
+
+    # The file isn't one we know about specifically, so we should assume we
+    # have to build.
+    return True
+
+
+def should_build(target_platform, changed_files):
+    """Returns true if the changed files affect the given platform, and
+    thus a build should be performed. target_platform should be one of the
+    platforms used in the build.webkit.org master's config.json file."""
+    return any(_should_file_trigger_build(target_platform, file) for file in changed_files)
diff --git a/Tools/Scripts/webkitpy/common/config/build_unittest.py b/Tools/Scripts/webkitpy/common/config/build_unittest.py
new file mode 100644
index 0000000..c496179
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/config/build_unittest.py
@@ -0,0 +1,70 @@
+# Copyright (C) 2010 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1.  Redistributions of source code must retain the above copyright
+#     notice, this list of conditions and the following disclaimer.
+# 2.  Redistributions in binary form must reproduce the above copyright
+#     notice, this list of conditions and the following disclaimer in the
+#     documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
+# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from webkitpy.common.config import build
+
+
+class ShouldBuildTest(unittest.TestCase):
+    _should_build_tests = [
+        (["ChangeLog", "Source/WebCore/ChangeLog", "Source/WebKit2/ChangeLog-2011-02-11"], []),
+        (["GNUmakefile.am", "Source/WebCore/GNUmakefile.am"], ["gtk"]),
+        (["Websites/bugs.webkit.org/foo", "Source/WebCore/bar"], ["*"]),
+        (["Websites/bugs.webkit.org/foo"], []),
+        (["Source/JavaScriptCore/JavaScriptCore.xcodeproj/foo"], ["mac-leopard", "mac-lion",  "mac-mountainlion", "mac-snowleopard"]),
+        (["Source/JavaScriptCore/JavaScriptCore.vcproj/foo", "Source/WebKit2/win/WebKit2.vcproj", "Source/WebKit/win/WebKit.sln", "Tools/WebKitTestRunner/Configurations/WebKitTestRunnerCommon.vsprops"], ["win"]),
+        (["LayoutTests/platform/mac/foo", "Source/WebCore/bar"], ["*"]),
+        (["LayoutTests/foo"], ["*"]),
+        (["LayoutTests/canvas/philip/tests/size.attributes.parse.exp-expected.txt", "LayoutTests/canvas/philip/tests/size.attributes.parse.exp.html"], ["*"]),
+        (["LayoutTests/platform/chromium-linux/foo"], ["chromium-linux"]),
+        (["LayoutTests/platform/chromium-win/fast/compact/001-expected.txt"], ["chromium-win"]),
+        (["LayoutTests/platform/mac-leopard/foo"], ["mac-leopard"]),
+        (["LayoutTests/platform/mac-lion/foo"], ["mac-leopard", "mac-lion", "mac-snowleopard", "win"]),
+        (["LayoutTests/platform/mac-snowleopard/foo"], ["mac-leopard", "mac-snowleopard"]),
+        (["LayoutTests/platform/mac-wk2/Skipped"], ["mac-lion",  "mac-mountainlion", "mac-snowleopard", "win"]),
+        (["LayoutTests/platform/mac/foo"], ["mac-leopard", "mac-lion", "mac-mountainlion", "mac-snowleopard", "win"]),
+        (["LayoutTests/platform/win-xp/foo"], ["win"]),
+        (["LayoutTests/platform/win-wk2/foo"], ["win"]),
+        (["LayoutTests/platform/win/foo"], ["win"]),
+        (["Source/WebCore.exp.in", "Source/WebKit/mac/WebKit.exp"], ["mac-leopard", "mac-lion",  "mac-mountainlion", "mac-snowleopard"]),
+        (["Source/WebCore/mac/foo"], ["chromium-mac", "mac-leopard", "mac-lion",  "mac-mountainlion", "mac-snowleopard"]),
+        (["Source/WebCore/win/foo"], ["chromium-win", "win"]),
+        (["Source/WebCore/platform/graphics/gpu/foo"], ["mac-leopard", "mac-lion",  "mac-mountainlion", "mac-snowleopard"]),
+        (["Source/WebCore/platform/wx/wxcode/win/foo"], []),
+        (["Source/WebCore/rendering/RenderThemeMac.mm", "Source/WebCore/rendering/RenderThemeMac.h"], ["mac-leopard", "mac-lion",  "mac-mountainlion", "mac-snowleopard"]),
+        (["Source/WebCore/rendering/RenderThemeChromiumLinux.h"], ["chromium-linux"]),
+        (["Source/WebCore/rendering/RenderThemeWinCE.h"], []),
+        (["Tools/BuildSlaveSupport/build.webkit.org-config/public_html/LeaksViewer/LeaksViewer.js"], []),
+    ]
+
+    def test_should_build(self):
+        for files, platforms in self._should_build_tests:
+            # FIXME: We should test more platforms here once
+            # build._should_file_trigger_build is implemented for them.
+            for platform in ["mac-leopard", "mac-lion",  "mac-mountainlion", "mac-snowleopard", "win"]:
+                should_build = platform in platforms or "*" in platforms
+                self.assertEqual(build.should_build(platform, files), should_build, "%s should%s have built but did%s (files: %s)" % (platform, "" if should_build else "n't", "n't" if should_build else "", str(files)))
+
+
+if __name__ == "__main__":
+    unittest.main()
diff --git a/Tools/Scripts/webkitpy/common/config/committers.py b/Tools/Scripts/webkitpy/common/config/committers.py
new file mode 100644
index 0000000..36df3db
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/config/committers.py
@@ -0,0 +1,717 @@
+# Copyright (c) 2011, Apple Inc. All rights reserved.
+# Copyright (c) 2009, 2011, 2012 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# WebKit's Python module for committer and reviewer validation.
+
+from webkitpy.common.editdistance import edit_distance
+
+class Account(object):
+    def __init__(self, name, email_or_emails, irc_nickname_or_nicknames=None):
+        assert(name)
+        assert(email_or_emails)
+        self.full_name = name
+        if isinstance(email_or_emails, str):
+            self.emails = [email_or_emails]
+        else:
+            self.emails = email_or_emails
+        self.emails = map(lambda email: email.lower(), self.emails)  # Emails are case-insensitive.
+        if isinstance(irc_nickname_or_nicknames, str):
+            self.irc_nicknames = [irc_nickname_or_nicknames]
+        else:
+            self.irc_nicknames = irc_nickname_or_nicknames
+        self.can_commit = False
+        self.can_review = False
+
+    def bugzilla_email(self):
+        # FIXME: We're assuming the first email is a valid bugzilla email,
+        # which might not be right.
+        return self.emails[0]
+
+    def __str__(self):
+        return '"%s" <%s>' % (self.full_name, self.emails[0])
+
+    def contains_string(self, search_string):
+        string = search_string.lower()
+        if string in self.full_name.lower():
+            return True
+        if self.irc_nicknames:
+            for nickname in self.irc_nicknames:
+                if string in nickname.lower():
+                    return True
+        for email in self.emails:
+            if string in email:
+                return True
+        return False
+
+
+class Contributor(Account):
+    def __init__(self, name, email_or_emails, irc_nickname=None):
+        Account.__init__(self, name, email_or_emails, irc_nickname)
+        self.is_contributor = True
+
+
+class Committer(Contributor):
+    def __init__(self, name, email_or_emails, irc_nickname=None):
+        Contributor.__init__(self, name, email_or_emails, irc_nickname)
+        self.can_commit = True
+
+
+class Reviewer(Committer):
+    def __init__(self, name, email_or_emails, irc_nickname=None):
+        Committer.__init__(self, name, email_or_emails, irc_nickname)
+        self.can_review = True
+
+
+# This is a list of email addresses that have bugzilla accounts but are not
+# used for contributing (such as mailing lists).
+
+
+watchers_who_are_not_contributors = [
+    Account("Chromium Compositor Bugs", ["cc-bugs@chromium.org"], ""),
+    Account("Chromium Media Reviews", ["feature-media-reviews@chromium.org"], ""),
+    Account("David Levin", ["levin+threading@chromium.org"], ""),
+    Account("David Levin", ["levin+watchlist@chromium.org"], ""),
+    Account("Kent Tamura", ["tkent+wkapi@chromium.org"], ""),
+]
+
+
+# This is a list of people (or bots) who are neither committers nor reviewers, but get
+# frequently CC'ed by others on Bugzilla bugs, so their names should be
+# supported by autocomplete. No review needed to add to the list.
+
+
+contributors_who_are_not_committers = [
+    Contributor("Adobe Bug Tracker", "WebkitBugTracker@adobe.com"),
+    Contributor("Aharon Lanin", "aharon@google.com"),
+    Contributor("Alan Stearns", "stearns@adobe.com", "astearns"),
+    Contributor("Alejandro Pineiro", "apinheiro@igalia.com"),
+    Contributor("Alexey Marinichev", ["amarinichev@chromium.org", "amarinichev@google.com"], "amarinichev"),
+    Contributor("Andras Piroska", "pandras@inf.u-szeged.hu", "andris88"),
+    Contributor("Andrei Bucur", "abucur@adobe.com", "abucur"),
+    Contributor("Anne van Kesteren", "annevankesteren+webkit@gmail.com", "annevk"),
+    Contributor("Annie Sullivan", "sullivan@chromium.org", "annie"),
+    Contributor("Aryeh Gregor", "ayg@aryeh.name", "AryehGregor"),
+    Contributor("Balazs Ankes", "bank@inf.u-szeged.hu", "abalazs"),
+    Contributor("Brian Salomon", "bsalomon@google.com"),
+    Contributor("Commit Queue", "commit-queue@webkit.org"),
+    Contributor("Daniel Sievers", "sievers@chromium.org"),
+    Contributor("David Dorwin", "ddorwin@chromium.org", "ddorwin"),
+    Contributor("David Reveman", "reveman@chromium.org", "reveman"),
+    Contributor("Dongsung Huang", "luxtella@company100.net", "Huang"),
+    Contributor("Douglas Davidson", "ddavidso@apple.com"),
+    Contributor("Edward O'Connor", "eoconnor@apple.com", "hober"),
+    Contributor("Elliott Sprehn", "esprehn@chromium.org", "esprehn"),
+    Contributor("Eric Penner", "epenner@chromium.org", "epenner"),
+    Contributor("Felician Marton", ["felician@inf.u-szeged.hu", "marton.felician.zoltan@stud.u-szeged.hu"], "Felician"),
+    Contributor("Finnur Thorarinsson", ["finnur@chromium.org", "finnur.webkit@gmail.com"], "finnur"),
+    Contributor("Forms Bugs", "forms-bugs@chromium.org"),
+    Contributor("Glenn Adams", "glenn@skynav.com", "gasubic"),
+    Contributor("Gabor Ballabas", "gaborb@inf.u-szeged.hu", "bgabor"),
+    Contributor("Grace Kloba", "klobag@chromium.org", "klobag"),
+    Contributor("Greg Simon", "gregsimon@chromium.org", "gregsimon"),
+    Contributor("Gregg Tavares", ["gman@google.com", "gman@chromium.org"], "gman"),
+    Contributor("Hao Zheng", "zhenghao@chromium.org"),
+    Contributor("Harald Alvestrand", "hta@google.com", "hta"),
+    Contributor("Ian Hickson", "ian@hixie.ch", "hixie"),
+    Contributor("Janos Badics", "jbadics@inf.u-szeged.hu", "dicska"),
+    Contributor("Jonathan Backer", "backer@chromium.org", "backer"),
+    Contributor("Jeff Timanus", ["twiz@chromium.org", "twiz@google.com"], "twiz"),
+    Contributor("Jing Zhao", "jingzhao@chromium.org"),
+    Contributor("Joanmarie Diggs", "jdiggs@igalia.com"),
+    Contributor("John Bates", ["jbates@google.com", "jbates@chromium.org"], "jbates"),
+    Contributor("John Bauman", ["jbauman@chromium.org", "jbauman@google.com"], "jbauman"),
+    Contributor("John Mellor", "johnme@chromium.org", "johnme"),
+    Contributor("Kulanthaivel Palanichamy", "kulanthaivel@codeaurora.org", "kvel"),
+    Contributor("Kiran Muppala", "cmuppala@apple.com", "kiranm"),
+    Contributor("Mihai Balan", "mibalan@adobe.com", "miChou"),
+    Contributor("Min Qin", "qinmin@chromium.org"),
+    Contributor("Nandor Huszka", "hnandor@inf.u-szeged.hu", "hnandor"),
+    Contributor("Oliver Varga", ["voliver@inf.u-szeged.hu", "Varga.Oliver@stud.u-szeged.hu"], "TwistO"),
+    Contributor("Peter Gal", "galpeter@inf.u-szeged.hu", "elecro"),
+    Contributor("Peter Linss", "peter.linss@hp.com", "plinss"),
+    Contributor("Pravin D", "pravind.2k4@gmail.com", "pravind"),
+    Contributor("Radar WebKit Bug Importer", "webkit-bug-importer@group.apple.com"),
+    Contributor("Raul Hudea", "rhudea@adobe.com", "rhudea"),
+    Contributor("Roland Takacs", "rtakacs@inf.u-szeged.hu", "rtakacs"),
+    Contributor(u"Sami Ky\u00f6stil\u00e4", "skyostil@chromium.org", "skyostil"),
+    Contributor("Szilard Ledan-Muntean", "szledan@inf.u-szeged.hu", "szledan"),
+    Contributor("Tab Atkins", ["tabatkins@google.com", "jackalmage@gmail.com"], "tabatkins"),
+    Contributor("Tamas Czene", ["tczene@inf.u-szeged.hu", "Czene.Tamas@stud.u-szeged.hu"], "tczene"),
+    Contributor("Tien-Ren Chen", "trchen@chromium.org", "trchen"),
+    Contributor("WebKit Review Bot", "webkit.review.bot@gmail.com", "sheriff-bot"),
+    Contributor("Web Components Team", "webcomponents-bugzilla@chromium.org"),
+    Contributor("Wyatt Carss", ["wcarss@chromium.org", "wcarss@google.com"], "wcarss"),
+    Contributor("Zeev Lieber", "zlieber@chromium.org"),
+    Contributor("Zoltan Arvai", "zarvai@inf.u-szeged.hu", "azbest_hu"),
+    Contributor("Zsolt Feher", "feherzs@inf.u-szeged.hu", "Smith"),
+]
+
+
+# This is intended as a canonical, machine-readable list of all non-reviewer
+# committers for WebKit.  If your name is missing here and you are a committer,
+# please add it.  No review needed.  All reviewers are committers, so this list
+# is only of committers who are not reviewers.
+
+
+committers_unable_to_review = [
+    Committer("Aaron Boodman", "aa@chromium.org", "aboodman"),
+    Committer("Adam Bergkvist", "adam.bergkvist@ericsson.com", "adambe"),
+    Committer("Adam Kallai", "kadam@inf.u-szeged.hu", "kadam"),
+    Committer("Adam Klein", "adamk@chromium.org", "aklein"),
+    Committer("Adam Langley", "agl@chromium.org", "agl"),
+    Committer("Ademar de Souza Reis Jr", ["ademar.reis@gmail.com", "ademar@webkit.org"], "ademar"),
+    Committer("Albert J. Wong", "ajwong@chromium.org"),
+    Committer("Alec Flett", ["alecflett@chromium.org", "alecflett@google.com"], "alecf"),
+    Committer(u"Alexander F\u00e6r\u00f8y", ["ahf@0x90.dk", "alexander.faeroy@nokia.com"], "ahf"),
+    Committer("Alexander Kellett", ["lypanov@mac.com", "a-lists001@lypanov.net", "lypanov@kde.org"], "lypanov"),
+    Committer("Alexandre Elias", ["aelias@chromium.org", "aelias@google.com"], "aelias"),
+    Committer("Alexandru Chiculita", "achicu@adobe.com", "achicu"),
+    Committer("Alice Boxhall", "aboxhall@chromium.org", "aboxhall"),
+    Committer("Allan Sandfeld Jensen", ["allan.jensen@digia.com", "kde@carewolf.com", "sandfeld@kde.org", "allan.jensen@nokia.com"], "carewolf"),
+    Committer("Alok Priyadarshi", "alokp@chromium.org", "alokp"),
+    Committer("Ami Fischman", ["fischman@chromium.org", "fischman@google.com"], "fischman"),
+    Committer("Amruth Raj", "amruthraj@motorola.com", "amruthraj"),
+    Committer("Andre Boule", "aboule@apple.com"),
+    Committer("Andrei Popescu", "andreip@google.com", "andreip"),
+    Committer("Andrew Wellington", ["andrew@webkit.org", "proton@wiretapped.net"], "proton"),
+    Committer("Andrew Scherkus", "scherkus@chromium.org", "scherkus"),
+    Committer("Andrey Kosyakov", "caseq@chromium.org", "caseq"),
+    Committer("Andras Becsi", ["abecsi@webkit.org", "andras.becsi@digia.com"], "bbandix"),
+    Committer("Andy Wingo", "wingo@igalia.com", "wingo"),
+    Committer("Anna Cavender", "annacc@chromium.org", "annacc"),
+    Committer("Anthony Ricaud", "rik@webkit.org", "rik"),
+    Committer("Antoine Labour", "piman@chromium.org", "piman"),
+    Committer("Anton D'Auria", "adauria@apple.com", "antonlefou"),
+    Committer("Anton Muhin", "antonm@chromium.org", "antonm"),
+    Committer("Arko Saha", "arko@motorola.com", "arkos"),
+    Committer("Arvid Nilsson", "anilsson@rim.com", "anilsson"),
+    Committer("Balazs Kelemen", "kbalazs@webkit.org", "kbalazs"),
+    Committer("Ben Murdoch", "benm@google.com", "benm"),
+    Committer("Ben Wells", "benwells@chromium.org", "benwells"),
+    Committer("Benjamin C Meyer", ["ben@meyerhome.net", "ben@webkit.org", "bmeyer@rim.com"], "icefox"),
+    Committer("Benjamin Kalman", ["kalman@chromium.org", "kalman@google.com"], "kalman"),
+    Committer("Benjamin Otte", ["otte@gnome.org", "otte@webkit.org"], "otte"),
+    Committer("Bill Budge", ["bbudge@chromium.org", "bbudge@gmail.com"], "bbudge"),
+    Committer("Brett Wilson", "brettw@chromium.org", "brettx"),
+    Committer("Bruno de Oliveira Abinader", ["bruno.abinader@basyskom.com", "brunoabinader@gmail.com"], "abinader"),
+    Committer("Cameron McCormack", ["cam@mcc.id.au", "cam@webkit.org"], "heycam"),
+    Committer("Carol Szabo", ["carol@webkit.org", "carol.szabo@nokia.com"], "cszabo1"),
+    Committer("Cary Clark", ["caryclark@google.com", "caryclark@chromium.org"], "caryclark"),
+    Committer("Charles Reis", "creis@chromium.org", "creis"),
+    Committer("Charles Wei", ["charles.wei@torchmobile.com.cn"], "cswei"),
+    Committer("Chris Evans", ["cevans@google.com", "cevans@chromium.org"]),
+    Committer("Chris Guillory", ["ctguil@chromium.org", "chris.guillory@google.com"], "ctguil"),
+    Committer("Chris Petersen", "cpetersen@apple.com", "cpetersen"),
+    Committer("Christian Dywan", ["christian@twotoasts.de", "christian@webkit.org", "christian@lanedo.com"]),
+    Committer("Collin Jackson", "collinj@webkit.org", "collinjackson"),
+    Committer("Cris Neckar", "cdn@chromium.org", "cneckar"),
+    Committer("Dan Winship", "danw@gnome.org", "danw"),
+    Committer("Dana Jansens", "danakj@chromium.org", "danakj"),
+    Committer("Daniel Cheng", "dcheng@chromium.org", "dcheng"),
+    Committer("Dave Barton", "dbarton@mathscribe.com", "davebarton"),
+    Committer("Dave Tharp", "dtharp@codeaurora.org", "dtharp"),
+    Committer("David Michael Barr", ["davidbarr@chromium.org", "davidbarr@google.com", "b@rr-dav.id.au"], "barrbrain"),
+    Committer("David Grogan", ["dgrogan@chromium.org", "dgrogan@google.com"], "dgrogan"),
+    Committer("David Smith", ["catfish.man@gmail.com", "dsmith@webkit.org"], "catfishman"),
+    Committer("Diego Gonzalez", ["diegohcg@webkit.org", "diego.gonzalez@openbossa.org"], "diegohcg"),
+    Committer("Dinu Jacob", "dinu.s.jacob@intel.com", "dsjacob"),
+    Committer("Dmitry Lomov", ["dslomov@google.com", "dslomov@chromium.org"], "dslomov"),
+    Committer("Dominic Cooney", ["dominicc@chromium.org", "dominicc@google.com"], "dominicc"),
+    Committer("Dominic Mazzoni", ["dmazzoni@google.com", "dmazzoni@chromium.org"], "dmazzoni"),
+    Committer(u"Dominik R\u00f6ttsches", ["dominik.rottsches@intel.com", "d-r@roettsches.de"], "drott"),
+    Committer("Drew Wilson", "atwilson@chromium.org", "atwilson"),
+    Committer("Eli Fidler", ["eli@staikos.net", "efidler@rim.com"], "efidler"),
+    Committer("Elliot Poger", "epoger@chromium.org", "epoger"),
+    Committer("Erik Arvidsson", "arv@chromium.org", "arv"),
+    Committer("Eric Roman", "eroman@chromium.org", "eroman"),
+    Committer("Eric Uhrhane", "ericu@chromium.org", "ericu"),
+    Committer("Evan Martin", "evan@chromium.org", "evmar"),
+    Committer("Evan Stade", "estade@chromium.org", "estade"),
+    Committer("Fady Samuel", "fsamuel@chromium.org", "fsamuel"),
+    Committer("Feng Qian", "feng@chromium.org"),
+    Committer("Florin Malita", ["fmalita@chromium.org", "fmalita@google.com"], "fmalita"),
+    Committer("Fumitoshi Ukai", "ukai@chromium.org", "ukai"),
+    Committer("Gabor Loki", "loki@webkit.org", "loki04"),
+    Committer("Gabor Rapcsanyi", ["rgabor@webkit.org", "rgabor@inf.u-szeged.hu"], "rgabor"),
+    Committer("Gavin Peters", ["gavinp@chromium.org", "gavinp@webkit.org", "gavinp@google.com"], "gavinp"),
+    Committer("Girish Ramakrishnan", ["girish@forwardbias.in", "ramakrishnan.girish@gmail.com"], "girishr"),
+    Committer("Graham Dennis", ["Graham.Dennis@gmail.com", "gdennis@webkit.org"]),
+    Committer("Greg Bolsinga", "bolsinga@apple.com"),
+    Committer("Grzegorz Czajkowski", "g.czajkowski@samsung.com", "grzegorz"),
+    Committer("Hans Wennborg", "hans@chromium.org", "hwennborg"),
+    Committer("Hayato Ito", "hayato@chromium.org", "hayato"),
+    Committer("Hironori Bono", "hbono@chromium.org", "hbono"),
+    Committer("Helder Correia", "helder.correia@nokia.com", "helder"),
+    Committer("Hin-Chung Lam", ["hclam@google.com", "hclam@chromium.org"]),
+    Committer("Hugo Parente Lima", "hugo.lima@openbossa.org", "hugopl"),
+    Committer("Ian Vollick", "vollick@chromium.org", "vollick"),
+    Committer("Igor Trindade Oliveira", ["igor.oliveira@webkit.org", "igor.o@sisa.samsung.com"], "igoroliveira"),
+    Committer("Ilya Sherman", "isherman@chromium.org", "isherman"),
+    Committer("Ilya Tikhonovsky", "loislo@chromium.org", "loislo"),
+    Committer("Ivan Krsti\u0107", "ike@apple.com"),
+    Committer("Jacky Jiang", ["jkjiang@webkit.org", "zkjiang008@gmail.com", "zhajiang@rim.com"], "jkjiang"),
+    Committer("Jakob Petsovits", ["jpetsovits@rim.com", "jpetso@gmx.at"], "jpetso"),
+    Committer("Jakub Wieczorek", "jwieczorek@webkit.org", "fawek"),
+    Committer("James Hawkins", ["jhawkins@chromium.org", "jhawkins@google.com"], "jhawkins"),
+    Committer("James Kozianski", ["koz@chromium.org", "koz@google.com"], "koz"),
+    Committer("James Simonsen", "simonjam@chromium.org", "simonjam"),
+    Committer("Jarred Nicholls", ["jarred@webkit.org", "jarred@sencha.com"], "jarrednicholls"),
+    Committer("Jason Liu", ["jason.liu@torchmobile.com.cn", "jasonliuwebkit@gmail.com"], "jasonliu"),
+    Committer("Jay Civelli", "jcivelli@chromium.org", "jcivelli"),
+    Committer("Jeff Miller", "jeffm@apple.com", "jeffm7"),
+    Committer("Jeffrey Pfau", ["jeffrey@endrift.com", "jpfau@apple.com"], "jpfau"),
+    Committer("Jenn Braithwaite", "jennb@chromium.org", "jennb"),
+    Committer("Jens Alfke", ["snej@chromium.org", "jens@apple.com"]),
+    Committer("Jer Noble", "jer.noble@apple.com", "jernoble"),
+    Committer("Jeremy Moskovich", ["playmobil@google.com", "jeremy@chromium.org"], "jeremymos"),
+    Committer("Jesus Sanchez-Palencia", ["jesus@webkit.org", "jesus.palencia@openbossa.org"], "jeez_"),
+    Committer("Jia Pu", "jpu@apple.com"),
+    Committer("Joe Thomas", "joethomas@motorola.com", "joethomas"),
+    Committer("John Abd-El-Malek", "jam@chromium.org", "jam"),
+    Committer("John Gregg", ["johnnyg@google.com", "johnnyg@chromium.org"], "johnnyg"),
+    Committer("John Knottenbelt", "jknotten@chromium.org", "jknotten"),
+    Committer("Johnny Ding", ["jnd@chromium.org", "johnnyding.webkit@gmail.com"], "johnnyding"),
+    Committer("Jon Lee", "jonlee@apple.com", "jonlee"),
+    Committer("Jonathan Dong", ["jonathan.dong@torchmobile.com.cn"], "jondong"),
+    Committer("Joone Hur", ["joone@webkit.org", "joone.hur@intel.com"], "joone"),
+    Committer("Joost de Valk", ["joost@webkit.org", "webkit-dev@joostdevalk.nl"], "Altha"),
+    Committer("Joshua Bell", ["jsbell@chromium.org", "jsbell@google.com"], "jsbell"),
+    Committer("Julie Parent", ["jparent@google.com", "jparent@chromium.org"], "jparent"),
+    Committer("Jungshik Shin", "jshin@chromium.org"),
+    Committer("Justin Novosad", ["junov@google.com", "junov@chromium.org"], "junov"),
+    Committer("Justin Schuh", "jschuh@chromium.org", "jschuh"),
+    Committer("Kaustubh Atrawalkar", ["kaustubh@motorola.com"], "silverroots"),
+    Committer("Keishi Hattori", "keishi@webkit.org", "keishi"),
+    Committer("Kelly Norton", "knorton@alum.mit.edu"),
+    Committer("Ken Buchanan", "kenrb@chromium.org", "kenrb"),
+    Committer("Kenichi Ishibashi", "bashi@chromium.org", "bashi"),
+    Committer("Kenji Imasaki", "imasaki@chromium.org", "imasaki"),
+    Committer("Kent Hansen", "kent.hansen@nokia.com", "khansen"),
+    Committer("Kihong Kwon", "kihong.kwon@samsung.com", "kihong"),
+    Committer(u"Kim Gr\u00f6nholm", "kim.1.gronholm@nokia.com"),
+    Committer("Kimmo Kinnunen", ["kimmo.t.kinnunen@nokia.com", "kimmok@iki.fi", "ktkinnun@webkit.org"], "kimmok"),
+    Committer("Kinuko Yasuda", "kinuko@chromium.org", "kinuko"),
+    Committer("Konrad Piascik", "kpiascik@rim.com", "kpiascik"),
+    Committer("Kristof Kosztyo", "kkristof@inf.u-szeged.hu", "kkristof"),
+    Committer("Krzysztof Kowalczyk", "kkowalczyk@gmail.com"),
+    Committer("Kwang Yul Seo", ["skyul@company100.net", "kseo@webkit.org"], "kseo"),
+    Committer("Lauro Neto", "lauro.neto@openbossa.org", "lmoura"),
+    Committer("Leandro Gracia Gil", "leandrogracia@chromium.org", "leandrogracia"),
+    Committer("Leandro Pereira", ["leandro@profusion.mobi", "leandro@webkit.org"], "acidx"),
+    Committer("Leo Yang", ["leoyang@rim.com", "leoyang@webkit.org", "leoyang.webkit@gmail.com"], "leoyang"),
+    Committer("Lucas De Marchi", ["demarchi@webkit.org", "lucas.demarchi@profusion.mobi"], "demarchi"),
+    Committer("Lucas Forschler", ["lforschler@apple.com"], "lforschler"),
+    Committer("Luciano Wolf", "luciano.wolf@openbossa.org", "luck"),
+    Committer("Luke Macpherson", ["macpherson@chromium.org", "macpherson@google.com"], "macpherson"),
+    Committer("Mads Ager", "ager@chromium.org"),
+    Committer("Mahesh Kulkarni", ["mahesh.kulkarni@nokia.com", "maheshk@webkit.org"], "maheshk"),
+    Committer("Marcus Voltis Bulach", "bulach@chromium.org"),
+    Committer("Mario Sanchez Prada", ["msanchez@igalia.com", "mario@webkit.org"], "msanchez"),
+    Committer("Mark Lam", "mark.lam@apple.com", "mlam"),
+    Committer("Mary Wu", ["mary.wu@torchmobile.com.cn", "wwendy2007@gmail.com"], "marywu"),
+    Committer("Matt Delaney", "mdelaney@apple.com"),
+    Committer("Matt Lilek", ["mlilek@apple.com", "webkit@mattlilek.com", "pewtermoose@webkit.org"], "pewtermoose"),
+    Committer("Matt Perry", "mpcomplete@chromium.org"),
+    Committer("Maxime Britto", ["maxime.britto@gmail.com", "britto@apple.com"]),
+    Committer("Maxime Simon", ["simon.maxime@gmail.com", "maxime.simon@webkit.org"], "maxime.simon"),
+    Committer(u"Michael Br\u00fcning", ["michaelbruening@gmail.com", "michael.bruning@digia.com", "michael.bruning@nokia.com"], "mibrunin"),
+    Committer("Michael Nordman", "michaeln@google.com", "michaeln"),
+    Committer("Michelangelo De Simone", "michelangelo@webkit.org", "michelangelo"),
+    Committer("Mihnea Ovidenie", "mihnea@adobe.com", "mihnea"),
+    Committer("Mike Belshe", ["mbelshe@chromium.org", "mike@belshe.com"]),
+    Committer("Mike Fenton", ["mifenton@rim.com", "mike.fenton@torchmobile.com"], "mfenton"),
+    Committer("Mike Lawther", "mikelawther@chromium.org", "mikelawther"),
+    Committer("Mike Reed", "reed@google.com", "reed"),
+    Committer("Mike Thole", ["mthole@mikethole.com", "mthole@apple.com"]),
+    Committer("Mike West", ["mkwst@chromium.org", "mike@mikewest.org"], "mkwst"),
+    Committer("Mikhail Naganov", "mnaganov@chromium.org"),
+    Committer("Naoki Takano", ["honten@chromium.org", "takano.naoki@gmail.com"], "honten"),
+    Committer("Nat Duca", ["nduca@chromium.org", "nduca@google.com"], "nduca"),
+    Committer("Nayan Kumar K", ["nayankk@motorola.com", "nayankk@gmail.com"], "xc0ffee"),
+    Committer("Nico Weber", ["thakis@chromium.org", "thakis@google.com"], "thakis"),
+    Committer("Noel Gordon", ["noel.gordon@gmail.com", "noel@chromium.org", "noel@google.com"], "noel"),
+    Committer("Pam Greene", "pam@chromium.org", "pamg"),
+    Committer("Patrick Gansterer", ["paroga@paroga.com", "paroga@webkit.org"], "paroga"),
+    Committer("Pavel Podivilov", "podivilov@chromium.org", "podivilov"),
+    Committer("Peter Beverloo", ["peter@chromium.org", "peter@webkit.org", "beverloo@google.com"], "beverloo"),
+    Committer("Peter Kasting", ["pkasting@google.com", "pkasting@chromium.org"], "pkasting"),
+    Committer("Peter Varga", ["pvarga@webkit.org", "pvarga@inf.u-szeged.hu"], "stampho"),
+    Committer("Philip Rogers", ["pdr@google.com", "pdr@chromium.org"], "pdr"),
+    Committer("Pierre d'Herbemont", ["pdherbemont@free.fr", "pdherbemont@apple.com"], "pdherbemont"),
+    Committer("Pierre-Olivier Latour", "pol@apple.com", "pol"),
+    Committer("Pierre Rossi", "pierre.rossi@gmail.com", "elproxy"),
+    Committer("Pratik Solanki", "psolanki@apple.com", "psolanki"),
+    Committer("Qi Zhang", "qi.zhang02180@gmail.com", "qi"),
+    Committer("Rafael Antognolli", "antognolli@profusion.mobi", "antognolli"),
+    Committer("Rafael Brandao", "rafael.lobo@openbossa.org", "rafaelbrandao"),
+    Committer("Rafael Weinstein", "rafaelw@chromium.org", "rafaelw"),
+    Committer("Raphael Kubo da Costa", ["rakuco@webkit.org", "rakuco@FreeBSD.org", "raphael.kubo.da.costa@intel.com"], "rakuco"),
+    Committer("Ravi Kasibhatla", "ravi.kasibhatla@motorola.com", "kphanee"),
+    Committer("Renata Hodovan", "reni@webkit.org", "reni"),
+    Committer("Robert Hogan", ["robert@webkit.org", "robert@roberthogan.net", "lists@roberthogan.net"], "mwenge"),
+    Committer("Robert Kroeger", "rjkroege@chromium.org", "rjkroege"),
+    Committer("Roger Fong", "roger_fong@apple.com", "rfong"),
+    Committer("Roland Steiner", "rolandsteiner@chromium.org"),
+    Committer("Ryuan Choi", "ryuan.choi@samsung.com", "ryuan"),
+    Committer("Satish Sampath", "satish@chromium.org"),
+    Committer("Scott Violet", "sky@chromium.org", "sky"),
+    Committer("Sergio Villar Senin", ["svillar@igalia.com", "sergio@webkit.org"], "svillar"),
+    Committer("Shawn Singh", "shawnsingh@chromium.org", "shawnsingh"),
+    Committer("Shinya Kawanaka", "shinyak@chromium.org", "shinyak"),
+    Committer("Siddharth Mathur", "siddharth.mathur@nokia.com", "simathur"),
+    Committer("Simon Pena", "spena@igalia.com", "spenap"),
+    Committer("Stephen Chenney", "schenney@chromium.org", "schenney"),
+    Committer("Steve Lacey", "sjl@chromium.org", "stevela"),
+    Committer("Taiju Tsuiki", "tzik@chromium.org", "tzik"),
+    Committer("Takashi Sakamoto", "tasak@google.com", "tasak"),
+    Committer("Takashi Toyoshima", "toyoshim@chromium.org", "toyoshim"),
+    Committer("Terry Anderson", "tdanderson@chromium.org", "tdanderson"),
+    Committer("Thomas Sepez", "tsepez@chromium.org", "tsepez"),
+    Committer("Tom Hudson", ["tomhudson@google.com", "tomhudson@chromium.org"], "tomhudson"),
+    Committer("Tom Zakrajsek", "tomz@codeaurora.org", "tomz"),
+    Committer("Tommy Widenflycht", "tommyw@google.com", "tommyw"),
+    Committer("Trey Matteson", "trey@usa.net", "trey"),
+    Committer("Tristan O'Tierney", ["tristan@otierney.net", "tristan@apple.com"]),
+    Committer("Vangelis Kokkevis", "vangelis@chromium.org", "vangelis"),
+    Committer("Viatcheslav Ostapenko", ["ostap73@gmail.com", "v.ostapenko@samsung.com", "v.ostapenko@sisa.samsung.com"], "ostap"),
+    Committer("Victor Carbune", "victor@rosedu.org", "vcarbune"),
+    Committer("Victor Wang", "victorw@chromium.org", "victorw"),
+    Committer("Victoria Kirst", ["vrk@chromium.org", "vrk@google.com"], "vrk"),
+    Committer("Vincent Scheib", "scheib@chromium.org", "scheib"),
+    Committer("Vitaly Repeshko", "vitalyr@chromium.org"),
+    Committer("William Siegrist", "wsiegrist@apple.com", "wms"),
+    Committer("W. James MacLean", "wjmaclean@chromium.org", "seumas"),
+    Committer("Xianzhu Wang", ["wangxianzhu@chromium.org", "phnixwxz@gmail.com", "wangxianzhu@google.com"], "wangxianzhu"),
+    Committer("Xiaomei Ji", "xji@chromium.org", "xji"),
+    Committer("Yael Aharon", ["yael.aharon.m@gmail.com", "yael@webkit.org"], "yael"),
+    Committer("Yaar Schnitman", ["yaar@chromium.org", "yaar@google.com"]),
+    Committer("Yi Shen", ["yi.4.shen@nokia.com", "shenyi2006@gmail.com"]),
+    Committer("Yongjun Zhang", ["yongjun.zhang@nokia.com", "yongjun_zhang@apple.com"]),
+    Committer("Yoshifumi Inoue", "yosin@chromium.org", "yosin"),
+    Committer("Yuqiang Xian", "yuqiang.xian@intel.com"),
+    Committer("Yuzo Fujishima", "yuzo@google.com", "yuzo"),
+    Committer("Zalan Bujtas", ["zbujtas@gmail.com", "zalan.bujtas@nokia.com"], "zalan"),
+    Committer("Zeno Albisser", ["zeno@webkit.org", "zeno.albisser@nokia.com"], "zalbisser"),
+    Committer("Zhenyao Mo", "zmo@google.com", "zhenyao"),
+    Committer("Zoltan Horvath", ["zoltan@webkit.org", "hzoltan@inf.u-szeged.hu", "horvath.zoltan.6@stud.u-szeged.hu"], "zoltan"),
+    Committer(u"\u017dan Dober\u0161ek", "zandobersek@gmail.com", "zdobersek"),
+]
+
+
+# This is intended as a canonical, machine-readable list of all reviewers for
+# WebKit.  If your name is missing here and you are a reviewer, please add it.
+# No review needed.
+
+
+reviewers_list = [
+    Reviewer("Abhishek Arya", "inferno@chromium.org", "inferno-sec"),
+    Reviewer("Ada Chan", "adachan@apple.com", "chanada"),
+    Reviewer("Adam Barth", "abarth@webkit.org", "abarth"),
+    Reviewer("Adam Roben", ["aroben@webkit.org", "aroben@apple.com"], "aroben"),
+    Reviewer("Adam Treat", ["treat@kde.org", "treat@webkit.org", "atreat@rim.com"], "manyoso"),
+    Reviewer("Adele Peterson", "adele@apple.com", "adele"),
+    Reviewer("Adrienne Walker", ["enne@google.com", "enne@chromium.org"], "enne"),
+    Reviewer("Alejandro G. Castro", ["alex@igalia.com", "alex@webkit.org"], "alexg__"),
+    Reviewer("Alexander Pavlov", ["apavlov@chromium.org", "pavlov81@gmail.com"], "apavlov"),
+    Reviewer("Alexey Proskuryakov", ["ap@webkit.org", "ap@apple.com"], "ap"),
+    Reviewer("Alexis Menard", ["alexis@webkit.org", "menard@kde.org"], "darktears"),
+    Reviewer("Alice Liu", "alice.liu@apple.com", "aliu"),
+    Reviewer("Alp Toker", ["alp@nuanti.com", "alp@atoker.com", "alp@webkit.org"], "alp"),
+    Reviewer("Anders Carlsson", ["andersca@apple.com", "acarlsson@apple.com"], "andersca"),
+    Reviewer("Andreas Kling", ["kling@webkit.org", "awesomekling@apple.com", "andreas.kling@nokia.com"], "kling"),
+    Reviewer("Andy Estes", "aestes@apple.com", "estes"),
+    Reviewer("Antonio Gomes", ["tonikitoo@webkit.org", "agomes@rim.com"], "tonikitoo"),
+    Reviewer("Antti Koivisto", ["koivisto@iki.fi", "antti@apple.com", "antti.j.koivisto@nokia.com"], "anttik"),
+    Reviewer("Ariya Hidayat", ["ariya.hidayat@gmail.com", "ariya@sencha.com", "ariya@webkit.org"], "ariya"),
+    Reviewer("Benjamin Poulain", ["benjamin@webkit.org", "benjamin.poulain@nokia.com", "ikipou@gmail.com"], "benjaminp"),
+    Reviewer("Beth Dakin", "bdakin@apple.com", "dethbakin"),
+    Reviewer("Brady Eidson", "beidson@apple.com", "bradee-oh"),
+    Reviewer("Brent Fulgham", "bfulgham@webkit.org", "bfulgham"),
+    Reviewer("Brian Weinstein", "bweinstein@apple.com", "bweinstein"),
+    Reviewer("Caio Marcelo de Oliveira Filho", ["cmarcelo@webkit.org", "caio.oliveira@openbossa.org"], "cmarcelo"),
+    Reviewer("Cameron Zwarich", ["zwarich@apple.com", "cwzwarich@apple.com", "cwzwarich@webkit.org"]),
+    Reviewer("Carlos Garcia Campos", ["cgarcia@igalia.com", "carlosgc@gnome.org", "carlosgc@webkit.org"], "KaL"),
+    Reviewer("Chang Shu", ["cshu@webkit.org", "c.shu@sisa.samsung.com"], "cshu"),
+    Reviewer("Chris Blumenberg", "cblu@apple.com", "cblu"),
+    Reviewer("Chris Marrin", "cmarrin@apple.com", "cmarrin"),
+    Reviewer("Chris Fleizach", "cfleizach@apple.com", "cfleizach"),
+    Reviewer("Chris Jerdonek", "cjerdonek@webkit.org", "cjerdonek"),
+    Reviewer("Chris Rogers", "crogers@google.com", "crogers"),
+    Reviewer(u"Csaba Osztrogon\u00e1c", "ossy@webkit.org", "ossy"),
+    Reviewer("Dan Bernstein", ["mitz@webkit.org", "mitz@apple.com"], "mitzpettel"),
+    Reviewer("Daniel Bates", ["dbates@webkit.org", "dbates@rim.com"], "dydz"),
+    Reviewer("Darin Adler", "darin@apple.com", "darin"),
+    Reviewer("Darin Fisher", ["fishd@chromium.org", "darin@chromium.org"], "fishd"),
+    Reviewer("David Harrison", "harrison@apple.com", "harrison"),
+    Reviewer("David Hyatt", "hyatt@apple.com", ["dhyatt", "hyatt"]),
+    Reviewer("David Kilzer", ["ddkilzer@webkit.org", "ddkilzer@apple.com"], "ddkilzer"),
+    Reviewer("David Levin", "levin@chromium.org", "dave_levin"),
+    Reviewer("Dean Jackson", "dino@apple.com", "dino"),
+    Reviewer("Dimitri Glazkov", "dglazkov@chromium.org", "dglazkov"),
+    Reviewer("Dirk Pranke", "dpranke@chromium.org", "dpranke"),
+    Reviewer("Dirk Schulze", "krit@webkit.org", "krit"),
+    Reviewer("Dmitry Titov", "dimich@chromium.org", "dimich"),
+    Reviewer("Don Melton", "gramps@apple.com", "gramps"),
+    Reviewer("Dumitru Daniliuc", "dumi@chromium.org", "dumi"),
+    Reviewer("Emil A Eklund", "eae@chromium.org", "eae"),
+    Reviewer("Enrica Casucci", "enrica@apple.com", "enrica"),
+    Reviewer("Eric Carlson", "eric.carlson@apple.com", "eric_carlson"),
+    Reviewer("Eric Seidel", "eric@webkit.org", "eseidel"),
+    Reviewer("Filip Pizlo", "fpizlo@apple.com", "pizlo"),
+    Reviewer("Gavin Barraclough", "barraclough@apple.com", "gbarra"),
+    Reviewer("Geoffrey Garen", "ggaren@apple.com", "ggaren"),
+    Reviewer("George Staikos", ["staikos@kde.org", "staikos@webkit.org"]),
+    Reviewer("Gustavo Noronha Silva", ["gns@gnome.org", "kov@webkit.org", "gustavo.noronha@collabora.co.uk", "gustavo.noronha@collabora.com"], "kov"),
+    Reviewer("Gyuyoung Kim", ["gyuyoung.kim@samsung.com", "gyuyoung.kim@webkit.org"], "gyuyoung"),
+    Reviewer("Hajime Morita", ["morrita@google.com", "morrita@chromium.org"], "morrita"),
+    Reviewer("Holger Freyther", ["zecke@selfish.org", "zecke@webkit.org"], "zecke"),
+    Reviewer("James Robinson", ["jamesr@chromium.org", "jamesr@google.com"], "jamesr"),
+    Reviewer("Jan Alonzo", ["jmalonzo@gmail.com", "jmalonzo@webkit.org"], "janm"),
+    Reviewer("Jeremy Orlow", ["jorlow@webkit.org", "jorlow@chromium.org"], "jorlow"),
+    Reviewer("Jessie Berlin", ["jberlin@webkit.org", "jberlin@apple.com"], "jessieberlin"),
+    Reviewer("Jian Li", "jianli@chromium.org", "jianli"),
+    Reviewer("Jocelyn Turcotte", ["jocelyn.turcotte@digia.com", "jocelyn.turcotte@nokia.com"], "jturcotte"),
+    Reviewer("Jochen Eisinger", "jochen@chromium.org", "jochen__"),
+    Reviewer("John Sullivan", "sullivan@apple.com", "sullivan"),
+    Reviewer("Jon Honeycutt", "jhoneycutt@apple.com", "jhoneycutt"),
+    Reviewer("Joseph Pecoraro", ["joepeck@webkit.org", "pecoraro@apple.com"], "JoePeck"),
+    Reviewer("Julien Chaffraix", ["jchaffraix@webkit.org", "julien.chaffraix@gmail.com", "jchaffraix@google.com", "jchaffraix@codeaurora.org"], "jchaffraix"),
+    Reviewer("Justin Garcia", "justin.garcia@apple.com", "justing"),
+    Reviewer("Ken Kocienda", "kocienda@apple.com"),
+    Reviewer("Kenneth Rohde Christiansen", ["kenneth@webkit.org", "kenneth.r.christiansen@intel.com", "kenneth.christiansen@gmail.com"], ["kenneth_", "kenneth", "kenne"]),
+    Reviewer("Kenneth Russell", ["kbr@google.com", "kbr@chromium.org"], ["kbr_google", "kbrgg"]),
+    Reviewer("Kent Tamura", ["tkent@chromium.org", "tkent@google.com"], "tkent"),
+    Reviewer("Kentaro Hara", ["haraken@chromium.org"], "haraken"),
+    Reviewer("Kevin Decker", "kdecker@apple.com", "superkevin"),
+    Reviewer("Kevin McCullough", "kmccullough@apple.com", "maculloch"),
+    Reviewer("Kevin Ollivier", ["kevino@theolliviers.com", "kevino@webkit.org"], "kollivier"),
+    Reviewer("Lars Knoll", ["lars@trolltech.com", "lars@kde.org", "lars.knoll@nokia.com"], "lars"),
+    Reviewer("Laszlo Gombos", ["laszlo.gombos@webkit.org", "l.gombos@samsung.com", "laszlo.1.gombos@nokia.com"], "lgombos"),
+    Reviewer("Levi Weintraub", ["leviw@chromium.org", "leviw@google.com", "lweintraub@apple.com"], "leviw"),
+    Reviewer("Luiz Agostini", ["luiz@webkit.org", "luiz.agostini@openbossa.org"], "lca"),
+    Reviewer("Maciej Stachowiak", "mjs@apple.com", "othermaciej"),
+    Reviewer("Mark Hahnenberg", "mhahnenberg@apple.com", "mhahnenberg"),
+    Reviewer("Mark Rowe", "mrowe@apple.com", "bdash"),
+    Reviewer("Martin Robinson", ["mrobinson@webkit.org", "mrobinson@igalia.com", "martin.james.robinson@gmail.com"], "mrobinson"),
+    Reviewer("Michael Saboff", "msaboff@apple.com", "msaboff"),
+    Reviewer("Mihai Parparita", "mihaip@chromium.org", "mihaip"),
+    Reviewer("Nate Chapin", "japhet@chromium.org", ["japhet", "natechapin"]),
+    Reviewer("Nikolas Zimmermann", ["zimmermann@kde.org", "zimmermann@physik.rwth-aachen.de", "zimmermann@webkit.org", "nzimmermann@rim.com"], "wildfox"),
+    Reviewer("Noam Rosenthal", "noam.rosenthal@nokia.com", "noamr"),
+    Reviewer("Ojan Vafai", "ojan@chromium.org", "ojan"),
+    Reviewer("Oliver Hunt", "oliver@apple.com", "olliej"),
+    Reviewer("Pavel Feldman", ["pfeldman@chromium.org", "pfeldman@google.com"], "pfeldman"),
+    Reviewer("Philippe Normand", ["pnormand@igalia.com", "philn@webkit.org", "philn@igalia.com"], ["philn-tp", "pnormand"]),
+    Reviewer("Richard Williamson", "rjw@apple.com", "rjw"),
+    Reviewer("Rob Buis", ["rwlbuis@gmail.com", "rwlbuis@webkit.org", "rbuis@rim.com"], "rwlbuis"),
+    Reviewer("Ryosuke Niwa", "rniwa@webkit.org", "rniwa"),
+    Reviewer("Sam Weinig", ["sam@webkit.org", "weinig@apple.com"], "weinig"),
+    Reviewer("Shinichiro Hamaji", "hamaji@chromium.org", "hamaji"),
+    Reviewer("Simon Fraser", "simon.fraser@apple.com", "smfr"),
+    Reviewer("Simon Hausmann", ["hausmann@webkit.org", "hausmann@kde.org", "simon.hausmann@digia.com"], "tronical"),
+    Reviewer("Stephanie Lewis", "slewis@apple.com", "sundiamonde"),
+    Reviewer("Stephen White", "senorblanco@chromium.org", "senorblanco"),
+    Reviewer("Steve Block", "steveblock@google.com", "steveblock"),
+    Reviewer("Steve Falkenburg", "sfalken@apple.com", "sfalken"),
+    Reviewer("Tim Omernick", "timo@apple.com"),
+    Reviewer("Timothy Hatcher", ["timothy@apple.com", "timothy@hatcher.name"], "xenon"),
+    Reviewer("Tim Horton", "timothy_horton@apple.com", "thorton"),
+    Reviewer("Tony Chang", "tony@chromium.org", "tony^work"),
+    Reviewer("Tony Gentilcore", "tonyg@chromium.org", "tonyg-cr"),
+    Reviewer(u"Tor Arne Vestb\u00f8", ["vestbo@webkit.org", "tor.arne.vestbo@nokia.com"], "torarne"),
+    Reviewer("Vicki Murley", "vicki@apple.com"),
+    Reviewer("Vsevolod Vlasov", "vsevik@chromium.org", "vsevik"),
+    Reviewer("Xan Lopez", ["xan.lopez@gmail.com", "xan@gnome.org", "xan@webkit.org", "xlopez@igalia.com"], "xan"),
+    Reviewer("Yong Li", ["yoli@rim.com", "yong.li.webkit@gmail.com"], "yoli"),
+    Reviewer("Yury Semikhatsky", "yurys@chromium.org", "yurys"),
+    Reviewer("Yuta Kitamura", "yutak@chromium.org", "yutak"),
+    Reviewer("Zack Rusin", "zack@kde.org", "zackr"),
+    Reviewer("Zoltan Herczeg", ["zherczeg@webkit.org", "zherczeg@inf.u-szeged.hu"], "zherczeg"),
+]
+
+class CommitterList(object):
+
+    # Committers and reviewers are passed in to allow easy testing
+    def __init__(self,
+                 committers=committers_unable_to_review,
+                 reviewers=reviewers_list,
+                 contributors=contributors_who_are_not_committers,
+                 watchers=watchers_who_are_not_contributors):
+        self._accounts = watchers + contributors + committers + reviewers
+        self._contributors = contributors + committers + reviewers
+        self._committers = committers + reviewers
+        self._reviewers = reviewers
+        self._contributors_by_name = {}
+        self._accounts_by_email = {}
+        self._accounts_by_login = {}
+
+    def accounts(self):
+        return self._accounts
+
+    def contributors(self):
+        return self._contributors
+
+    def committers(self):
+        return self._committers
+
+    def reviewers(self):
+        return self._reviewers
+
+    def _name_to_contributor_map(self):
+        if not len(self._contributors_by_name):
+            for contributor in self._contributors:
+                assert(contributor.full_name)
+                assert(contributor.full_name.lower() not in self._contributors_by_name)  # We should never have duplicate names.
+                self._contributors_by_name[contributor.full_name.lower()] = contributor
+        return self._contributors_by_name
+
+    def _email_to_account_map(self):
+        if not len(self._accounts_by_email):
+            for account in self._accounts:
+                for email in account.emails:
+                    assert(email not in self._accounts_by_email)  # We should never have duplicate emails.
+                    self._accounts_by_email[email] = account
+        return self._accounts_by_email
+
+    def _login_to_account_map(self):
+        if not len(self._accounts_by_login):
+            for account in self._accounts:
+                if account.emails:
+                    login = account.bugzilla_email()
+                    assert(login not in self._accounts_by_login)  # We should never have duplicate emails.
+                    self._accounts_by_login[login] = account
+        return self._accounts_by_login
+
+    def _contributor_only(self, record):
+        if record and not record.is_contributor:
+            return None
+        return record
+
+    def _committer_only(self, record):
+        if record and not record.can_commit:
+            return None
+        return record
+
+    def _reviewer_only(self, record):
+        if record and not record.can_review:
+            return None
+        return record
+
+    def committer_by_name(self, name):
+        return self._committer_only(self.contributor_by_name(name))
+
+    def contributor_by_irc_nickname(self, irc_nickname):
+        for contributor in self.contributors():
+            # FIXME: This should do case-insensitive comparison or assert that all IRC nicknames are in lowercase
+            if contributor.irc_nicknames and irc_nickname in contributor.irc_nicknames:
+                return contributor
+        return None
+
+    def contributors_by_search_string(self, string):
+        return filter(lambda contributor: contributor.contains_string(string), self.contributors())
+
+    def contributors_by_email_username(self, string):
+        string = string + '@'
+        result = []
+        for contributor in self.contributors():
+            for email in contributor.emails:
+                if email.startswith(string):
+                    result.append(contributor)
+                    break
+        return result
+
+    def _contributor_name_shorthands(self, contributor):
+        if ' ' not in contributor.full_name:
+            return []
+        split_fullname = contributor.full_name.split()
+        first_name = split_fullname[0]
+        last_name = split_fullname[-1]
+        return first_name, last_name, first_name + last_name[0], first_name + ' ' + last_name[0]
+
+    def _tokenize_contributor_name(self, contributor):
+        full_name_in_lowercase = contributor.full_name.lower()
+        tokens = [full_name_in_lowercase] + full_name_in_lowercase.split()
+        if contributor.irc_nicknames:
+            return tokens + [nickname.lower() for nickname in contributor.irc_nicknames if len(nickname) > 5]
+        return tokens
+
+    def contributors_by_fuzzy_match(self, string):
+        string_in_lowercase = string.lower()
+
+        # 1. Exact match for fullname, email and irc_nicknames
+        account = self.contributor_by_name(string_in_lowercase) or self.account_by_email(string_in_lowercase) or self.contributor_by_irc_nickname(string_in_lowercase)
+        if account:
+            return [account], 0
+
+        # 2. Exact match for email username (before @)
+        accounts = self.contributors_by_email_username(string_in_lowercase)
+        if accounts and len(accounts) == 1:
+            return accounts, 0
+
+        # 3. Exact match for first name, last name, and first name + initial combinations such as "Dan B" and "Tim H"
+        accounts = [contributor for contributor in self.contributors() if string in self._contributor_name_shorthands(contributor)]
+        if accounts and len(accounts) == 1:
+            return accounts, 0
+
+        # 4. Finally, fuzzy-match using edit-distance
+        string = string_in_lowercase
+        contributorWithMinDistance = []
+        minDistance = len(string) / 2 - 1
+        for contributor in self.contributors():
+            tokens = self._tokenize_contributor_name(contributor)
+            editdistances = [edit_distance(token, string) for token in tokens if abs(len(token) - len(string)) <= minDistance]
+            if not editdistances:
+                continue
+            distance = min(editdistances)
+            if distance == minDistance:
+                contributorWithMinDistance.append(contributor)
+            elif distance < minDistance:
+                contributorWithMinDistance = [contributor]
+                minDistance = distance
+        if not len(contributorWithMinDistance):
+            return [], len(string)
+        return contributorWithMinDistance, minDistance
+
+    def account_by_login(self, login):
+        return self._login_to_account_map().get(login.lower()) if login else None
+
+    def account_by_email(self, email):
+        return self._email_to_account_map().get(email.lower()) if email else None
+
+    def contributor_by_name(self, name):
+        return self._name_to_contributor_map().get(name.lower()) if name else None
+
+    def contributor_by_email(self, email):
+        return self._contributor_only(self.account_by_email(email))
+
+    def committer_by_email(self, email):
+        return self._committer_only(self.account_by_email(email))
+
+    def reviewer_by_email(self, email):
+        return self._reviewer_only(self.account_by_email(email))
diff --git a/Tools/Scripts/webkitpy/common/config/committers_unittest.py b/Tools/Scripts/webkitpy/common/config/committers_unittest.py
new file mode 100644
index 0000000..1c8c86a
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/config/committers_unittest.py
@@ -0,0 +1,368 @@
+# Copyright (C) 2009 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the
+# distribution.
+#    * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+from webkitpy.common.config.committers import Account, CommitterList, Contributor, Committer, Reviewer
+
+class CommittersTest(unittest.TestCase):
+    def test_committer_lookup(self):
+        account = Account('Test Zero', ['zero@test.com', 'zero@gmail.com'], 'zero')
+        committer = Committer('Test One', 'one@test.com', 'one')
+        reviewer = Reviewer('Test Two', ['two@test.com', 'Two@rad.com', 'so_two@gmail.com'])
+        contributor = Contributor('Test Three', ['Three@test.com'], 'three')
+        contributor_with_two_nicknames = Contributor('Other Four', ['otherfour@webkit.org', 'otherfour@webkit2.org'], ['four', 'otherfour'])
+        contributor_with_same_email_username = Contributor('Yet Another Four', ['otherfour@webkit.com'], ['yetanotherfour'])
+        committer_list = CommitterList(watchers=[account], committers=[committer], reviewers=[reviewer],
+            contributors=[contributor, contributor_with_two_nicknames, contributor_with_same_email_username])
+
+        # Test valid committer, reviewer and contributor lookup
+        self.assertEqual(committer_list.account_by_email('zero@test.com'), account)
+        self.assertEqual(committer_list.committer_by_email('one@test.com'), committer)
+        self.assertEqual(committer_list.reviewer_by_email('two@test.com'), reviewer)
+        self.assertEqual(committer_list.committer_by_email('two@test.com'), reviewer)
+        self.assertEqual(committer_list.committer_by_email('two@rad.com'), reviewer)
+        self.assertEqual(committer_list.reviewer_by_email('so_two@gmail.com'), reviewer)
+        self.assertEqual(committer_list.contributor_by_email('three@test.com'), contributor)
+
+        # Test valid committer, reviewer and contributor lookup
+        self.assertEqual(committer_list.committer_by_name("Test One"), committer)
+        self.assertEqual(committer_list.committer_by_name("Test Two"), reviewer)
+        self.assertEqual(committer_list.committer_by_name("Test Three"), None)
+        self.assertEqual(committer_list.contributor_by_name("Test Three"), contributor)
+        self.assertEqual(committer_list.contributor_by_name("test one"), committer)
+        self.assertEqual(committer_list.contributor_by_name("test two"), reviewer)
+        self.assertEqual(committer_list.contributor_by_name("test three"), contributor)
+
+        # Test that the first email is assumed to be the Bugzilla email address (for now)
+        self.assertEqual(committer_list.committer_by_email('two@rad.com').bugzilla_email(), 'two@test.com')
+
+        # Test lookup by login email address
+        self.assertEqual(committer_list.account_by_login('zero@test.com'), account)
+        self.assertEqual(committer_list.account_by_login('zero@gmail.com'), None)
+        self.assertEqual(committer_list.account_by_login('one@test.com'), committer)
+        self.assertEqual(committer_list.account_by_login('two@test.com'), reviewer)
+        self.assertEqual(committer_list.account_by_login('Two@rad.com'), None)
+        self.assertEqual(committer_list.account_by_login('so_two@gmail.com'), None)
+
+        # Test that a known committer is not returned during reviewer lookup
+        self.assertEqual(committer_list.reviewer_by_email('one@test.com'), None)
+        self.assertEqual(committer_list.reviewer_by_email('three@test.com'), None)
+        # and likewise that a known contributor is not returned for committer lookup.
+        self.assertEqual(committer_list.committer_by_email('three@test.com'), None)
+
+        # Test that unknown email address fail both committer and reviewer lookup
+        self.assertEqual(committer_list.committer_by_email('bar@bar.com'), None)
+        self.assertEqual(committer_list.reviewer_by_email('bar@bar.com'), None)
+
+        # Test that emails returns a list.
+        self.assertEqual(committer.emails, ['one@test.com'])
+
+        self.assertEqual(committer.irc_nicknames, ['one'])
+        self.assertEqual(committer_list.contributor_by_irc_nickname('one'), committer)
+        self.assertEqual(committer_list.contributor_by_irc_nickname('three'), contributor)
+        self.assertEqual(committer_list.contributor_by_irc_nickname('four'), contributor_with_two_nicknames)
+        self.assertEqual(committer_list.contributor_by_irc_nickname('otherfour'), contributor_with_two_nicknames)
+
+        # Test that the lists returned are are we expect them.
+        self.assertEqual(committer_list.contributors(), [contributor, contributor_with_two_nicknames, contributor_with_same_email_username, committer, reviewer])
+        self.assertEqual(committer_list.committers(), [committer, reviewer])
+        self.assertEqual(committer_list.reviewers(), [reviewer])
+
+        self.assertEqual(committer_list.contributors_by_search_string('test'), [contributor, committer, reviewer])
+        self.assertEqual(committer_list.contributors_by_search_string('rad'), [reviewer])
+        self.assertEqual(committer_list.contributors_by_search_string('Two'), [reviewer])
+
+        self.assertEqual(committer_list.contributors_by_email_username("one"), [committer])
+        self.assertEqual(committer_list.contributors_by_email_username("four"), [])
+        self.assertEqual(committer_list.contributors_by_email_username("otherfour"), [contributor_with_two_nicknames, contributor_with_same_email_username])
+
+    def _assert_fuzz_match(self, text, name_of_expected_contributor, expected_distance):
+        committers = CommitterList()
+        contributors, distance = committers.contributors_by_fuzzy_match(text)
+        if type(name_of_expected_contributor) is list:
+            expected_names = name_of_expected_contributor
+        else:
+            expected_names = [name_of_expected_contributor] if name_of_expected_contributor else []
+        self.assertEqual(([contributor.full_name for contributor in contributors], distance), (expected_names, expected_distance))
+
+    # Basic testing of the edit distance matching ...
+    def test_contributors_by_fuzzy_match(self):
+        self._assert_fuzz_match('Geoff Garen', 'Geoffrey Garen', 3)
+        self._assert_fuzz_match('Kenneth Christiansen', 'Kenneth Rohde Christiansen', 6)
+        self._assert_fuzz_match('Sam', 'Sam Weinig', 0)
+        self._assert_fuzz_match('me', None, 2)
+
+    # The remaining tests test that certain names are resolved in a specific way.
+    # We break this up into multiple tests so that each is faster and they can
+    # be run in parallel. Unfortunately each test scans the entire committers list,
+    # so these are inherently slow (see https://bugs.webkit.org/show_bug.cgi?id=79179).
+    #
+    # Commented out lines are test cases imported from the bug 26533 yet to pass.
+
+    def integration_test_contributors__none(self):
+        self._assert_fuzz_match('myself', None, 6)
+        self._assert_fuzz_match('others', None, 6)
+        self._assert_fuzz_match('BUILD FIX', None, 9)
+
+    def integration_test_contributors__none_2(self):
+        self._assert_fuzz_match('but Dan Bernstein also reviewed', None, 31)
+        self._assert_fuzz_match('asked thoughtful questions', None, 26)
+        self._assert_fuzz_match('build fix of mac', None, 16)
+
+    def integration_test_contributors__none_3(self):
+        self._assert_fuzz_match('a spell checker', None, 15)
+        self._assert_fuzz_match('nobody, build fix', None, 17)
+        self._assert_fuzz_match('NOBODY (chromium build fix)', None, 27)
+
+    def integration_test_contributors_ada_chan(self):
+        self._assert_fuzz_match('Ada', 'Ada Chan', 0)
+
+    def integration_test_contributors_adele_peterson(self):
+        self._assert_fuzz_match('adele', 'Adele Peterson', 0)
+
+    def integration_test_contributors_adele_peterson(self):
+        # self._assert_fuzz_match('Adam', 'Adam Roben', 0)
+        self._assert_fuzz_match('aroben', 'Adam Roben', 0)
+
+    def integration_test_contributors_alexey_proskuryakov(self):
+        # self._assert_fuzz_match('Alexey', 'Alexey Proskuryakov', 0)
+        self._assert_fuzz_match('ap', 'Alexey Proskuryakov', 0)
+        self._assert_fuzz_match('Alexey P', 'Alexey Proskuryakov', 0)
+
+    def integration_test_contributors_alice_liu(self):
+        # self._assert_fuzz_match('Alice', 'Alice Liu', 0)
+        self._assert_fuzz_match('aliu', 'Alice Liu', 0)
+        self._assert_fuzz_match('Liu', 'Alice Liu', 0)
+
+    def integration_test_contributors_alp_toker(self):
+        self._assert_fuzz_match('Alp', 'Alp Toker', 0)
+
+    def integration_test_contributors_anders_carlsson(self):
+        self._assert_fuzz_match('Anders', 'Anders Carlsson', 0)
+        self._assert_fuzz_match('andersca', 'Anders Carlsson', 0)
+        self._assert_fuzz_match('anders', 'Anders Carlsson', 0)
+        self._assert_fuzz_match('Andersca', 'Anders Carlsson', 0)
+
+    def integration_test_contributors_antti_koivisto(self):
+        self._assert_fuzz_match('Antti "printf" Koivisto', 'Antti Koivisto', 9)
+        self._assert_fuzz_match('Antti', 'Antti Koivisto', 0)
+
+    def integration_test_contributors_beth_dakin(self):
+        self._assert_fuzz_match('Beth', 'Beth Dakin', 0)
+        self._assert_fuzz_match('beth', 'Beth Dakin', 0)
+        self._assert_fuzz_match('bdakin', 'Beth Dakin', 0)
+
+    def integration_test_contributors_brady_eidson(self):
+        self._assert_fuzz_match('Brady', 'Brady Eidson', 0)
+        self._assert_fuzz_match('bradee-oh', 'Brady Eidson', 0)
+        self._assert_fuzz_match('Brady', 'Brady Eidson', 0)
+
+    def integration_test_contributors_cameron_zwarich(self):
+        pass  # self._assert_fuzz_match('Cameron', 'Cameron Zwarich', 0)
+        # self._assert_fuzz_match('cpst', 'Cameron Zwarich', 1)
+
+    def integration_test_contributors_chris_blumenberg(self):
+        # self._assert_fuzz_match('Chris', 'Chris Blumenberg', 0)
+        self._assert_fuzz_match('cblu', 'Chris Blumenberg', 0)
+
+    def integration_test_contributors_dan_bernstein(self):
+        self._assert_fuzz_match('Dan', ['Dan Winship', 'Dan Bernstein'], 0)
+        self._assert_fuzz_match('Dan B', 'Dan Bernstein', 0)
+        # self._assert_fuzz_match('mitz', 'Dan Bernstein', 0)
+        self._assert_fuzz_match('Mitz Pettel', 'Dan Bernstein', 1)
+        self._assert_fuzz_match('Mitzpettel', 'Dan Bernstein', 0)
+        self._assert_fuzz_match('Mitz Pettel RTL', 'Dan Bernstein', 5)
+
+    def integration_test_contributors_dan_bernstein_2(self):
+        self._assert_fuzz_match('Teh Mitzpettel', 'Dan Bernstein', 4)
+        # self._assert_fuzz_match('The Mitz', 'Dan Bernstein', 0)
+        self._assert_fuzz_match('Dr Dan Bernstein', 'Dan Bernstein', 3)
+
+    def integration_test_contributors_darin_adler(self):
+        self._assert_fuzz_match('Darin Adler\'', 'Darin Adler', 1)
+        self._assert_fuzz_match('Darin', 'Darin Adler', 0)  # Thankfully "Fisher" is longer than "Adler"
+        self._assert_fuzz_match('darin', 'Darin Adler', 0)
+
+    def integration_test_contributors_david_harrison(self):
+        self._assert_fuzz_match('Dave Harrison', 'David Harrison', 2)
+        self._assert_fuzz_match('harrison', 'David Harrison', 0)
+        self._assert_fuzz_match('Dr. Harrison', 'David Harrison', 4)
+
+    def integration_test_contributors_david_harrison_2(self):
+        self._assert_fuzz_match('Dave Harrson', 'David Harrison', 3)
+        self._assert_fuzz_match('Dave Harrsion', 'David Harrison', 4)  # Damerau-Levenshtein distance is 3
+
+    def integration_test_contributors_david_hyatt(self):
+        self._assert_fuzz_match('Dave Hyatt', 'David Hyatt', 2)
+        self._assert_fuzz_match('Daddy Hyatt', 'David Hyatt', 3)
+        # self._assert_fuzz_match('Dave', 'David Hyatt', 0)  # 'Dave' could mean harrison.
+        self._assert_fuzz_match('hyatt', 'David Hyatt', 0)
+        # self._assert_fuzz_match('Haytt', 'David Hyatt', 0)  # Works if we had implemented Damerau-Levenshtein distance!
+
+    def integration_test_contributors_david_kilzer(self):
+        self._assert_fuzz_match('Dave Kilzer', 'David Kilzer', 2)
+        self._assert_fuzz_match('David D. Kilzer', 'David Kilzer', 3)
+        self._assert_fuzz_match('ddkilzer', 'David Kilzer', 0)
+
+    def integration_test_contributors_don_melton(self):
+        self._assert_fuzz_match('Don', 'Don Melton', 0)
+        self._assert_fuzz_match('Gramps', 'Don Melton', 0)
+
+    def integration_test_contributors_eric_seidel(self):
+        # self._assert_fuzz_match('eric', 'Eric Seidel', 0)
+        self._assert_fuzz_match('Eric S', 'Eric Seidel', 0)
+        # self._assert_fuzz_match('MacDome', 'Eric Seidel', 0)
+        self._assert_fuzz_match('eseidel', 'Eric Seidel', 0)
+
+    def integration_test_contributors_geoffrey_garen(self):
+        # self._assert_fuzz_match('Geof', 'Geoffrey Garen', 4)
+        # self._assert_fuzz_match('Geoff', 'Geoffrey Garen', 3)
+        self._assert_fuzz_match('Geoff Garen', 'Geoffrey Garen', 3)
+        self._assert_fuzz_match('ggaren', 'Geoffrey Garen', 0)
+        # self._assert_fuzz_match('geoff', 'Geoffrey Garen', 0)
+        self._assert_fuzz_match('Geoffrey', 'Geoffrey Garen', 0)
+        self._assert_fuzz_match('GGaren', 'Geoffrey Garen', 0)
+
+    def integration_test_contributors_greg_bolsinga(self):
+        pass  # self._assert_fuzz_match('Greg', 'Greg Bolsinga', 0)
+
+    def integration_test_contributors_holger_freyther(self):
+        self._assert_fuzz_match('Holger', 'Holger Freyther', 0)
+        self._assert_fuzz_match('Holger Hans Peter Freyther', 'Holger Freyther', 11)
+
+    def integration_test_contributors_jon_sullivan(self):
+        # self._assert_fuzz_match('john', 'John Sullivan', 0)
+        self._assert_fuzz_match('sullivan', 'John Sullivan', 0)
+
+    def integration_test_contributors_jon_honeycutt(self):
+        self._assert_fuzz_match('John Honeycutt', 'Jon Honeycutt', 1)
+        # self._assert_fuzz_match('Jon', 'Jon Honeycutt', 0)
+
+    def integration_test_contributors_jon_honeycutt(self):
+        # self._assert_fuzz_match('justin', 'Justin Garcia', 0)
+        self._assert_fuzz_match('justing', 'Justin Garcia', 0)
+
+    def integration_test_contributors_joseph_pecoraro(self):
+        self._assert_fuzz_match('Joe Pecoraro', 'Joseph Pecoraro', 3)
+
+    def integration_test_contributors_ken_kocienda(self):
+        self._assert_fuzz_match('ken', 'Ken Kocienda', 0)
+        self._assert_fuzz_match('kocienda', 'Ken Kocienda', 0)
+
+    def integration_test_contributors_kenneth_russell(self):
+        self._assert_fuzz_match('Ken Russell', 'Kenneth Russell', 4)
+
+    def integration_test_contributors_kevin_decker(self):
+        self._assert_fuzz_match('kdecker', 'Kevin Decker', 0)
+
+    def integration_test_contributors_kevin_mccullough(self):
+        self._assert_fuzz_match('Kevin M', 'Kevin McCullough', 0)
+        self._assert_fuzz_match('Kevin McCulough', 'Kevin McCullough', 1)
+        self._assert_fuzz_match('mccullough', 'Kevin McCullough', 0)
+
+    def integration_test_contributors_lars_knoll(self):
+        self._assert_fuzz_match('lars', 'Lars Knoll', 0)
+
+    def integration_test_contributors_lars_weintraub(self):
+        self._assert_fuzz_match('levi', 'Levi Weintraub', 0)
+
+    def integration_test_contributors_maciej_stachowiak(self):
+        self._assert_fuzz_match('Maciej', 'Maciej Stachowiak', 0)
+        # self._assert_fuzz_match('mjs', 'Maciej Stachowiak', 0)
+        self._assert_fuzz_match('Maciej S', 'Maciej Stachowiak', 0)
+
+    def integration_test_contributors_mark_rowe(self):
+        # self._assert_fuzz_match('Mark', 'Mark Rowe', 0)
+        self._assert_fuzz_match('bdash', 'Mark Rowe', 0)
+        self._assert_fuzz_match('mrowe', 'Mark Rowe', 0)
+        # self._assert_fuzz_match('Brian Dash', 'Mark Rowe', 0)
+
+    def integration_test_contributors_nikolas_zimmermann(self):
+        # self._assert_fuzz_match('Niko', 'Nikolas Zimmermann', 1)
+        self._assert_fuzz_match('Niko Zimmermann', 'Nikolas Zimmermann', 3)
+        self._assert_fuzz_match('Nikolas', 'Nikolas Zimmermann', 0)
+
+    def integration_test_contributors_oliver_hunt(self):
+        #  self._assert_fuzz_match('Oliver', 'Oliver Hunt', 0)
+        self._assert_fuzz_match('Ollie', 'Oliver Hunt', 1)
+        self._assert_fuzz_match('Olliej', 'Oliver Hunt', 0)
+        self._assert_fuzz_match('Olliej Hunt', 'Oliver Hunt', 3)
+        self._assert_fuzz_match('olliej', 'Oliver Hunt', 0)
+        self._assert_fuzz_match('ollie', 'Oliver Hunt', 1)
+        self._assert_fuzz_match('ollliej', 'Oliver Hunt', 1)
+
+    def integration_test_contributors_oliver_hunt(self):
+        self._assert_fuzz_match('Richard', 'Richard Williamson', 0)
+        self._assert_fuzz_match('rjw', 'Richard Williamson', 0)
+
+    def integration_test_contributors_oliver_hunt(self):
+        self._assert_fuzz_match('Rob', 'Rob Buis', 0)
+        self._assert_fuzz_match('rwlbuis', 'Rob Buis', 0)
+
+    def integration_test_contributors_rniwa(self):
+        self._assert_fuzz_match('rniwa@webkit.org', 'Ryosuke Niwa', 0)
+
+    def disabled_integration_test_contributors_simon_fraser(self):
+        pass  # self._assert_fuzz_match('Simon', 'Simon Fraser', 0)
+
+    def integration_test_contributors_steve_falkenburg(self):
+        self._assert_fuzz_match('Sfalken', 'Steve Falkenburg', 0)
+        # self._assert_fuzz_match('Steve', 'Steve Falkenburg', 0)
+
+    def integration_test_contributors_sam_weinig(self):
+        self._assert_fuzz_match('Sam', 'Sam Weinig', 0)
+        # self._assert_fuzz_match('Weinig Sam', 'weinig', 0)
+        self._assert_fuzz_match('Weinig', 'Sam Weinig', 0)
+        self._assert_fuzz_match('Sam W', 'Sam Weinig', 0)
+        self._assert_fuzz_match('Sammy Weinig', 'Sam Weinig', 2)
+
+    def integration_test_contributors_tim_omernick(self):
+        # self._assert_fuzz_match('timo', 'Tim Omernick', 0)
+        self._assert_fuzz_match('TimO', 'Tim Omernick', 0)
+        # self._assert_fuzz_match('Timo O', 'Tim Omernick', 0)
+        # self._assert_fuzz_match('Tim O.', 'Tim Omernick', 0)
+        self._assert_fuzz_match('Tim O', 'Tim Omernick', 0)
+
+    def integration_test_contributors_timothy_hatcher(self):
+        # self._assert_fuzz_match('Tim', 'Timothy Hatcher', 0)
+        # self._assert_fuzz_match('Tim H', 'Timothy Hatcher', 0)
+        self._assert_fuzz_match('Tim Hatcher', 'Timothy Hatcher', 4)
+        self._assert_fuzz_match('Tim Hatcheri', 'Timothy Hatcher', 5)
+        self._assert_fuzz_match('timothy', 'Timothy Hatcher', 0)
+        self._assert_fuzz_match('thatcher', 'Timothy Hatcher', 1)
+        self._assert_fuzz_match('xenon', 'Timothy Hatcher', 0)
+        self._assert_fuzz_match('Hatcher', 'Timothy Hatcher', 0)
+        # self._assert_fuzz_match('TimH', 'Timothy Hatcher', 0)
+
+    def integration_test_contributors_tor_arne_vestbo(self):
+        self._assert_fuzz_match('Tor Arne', u"Tor Arne Vestb\u00f8", 1)  # Matches IRC nickname
+
+    def integration_test_contributors_vicki_murley(self):
+        self._assert_fuzz_match('Vicki', u"Vicki Murley", 0)
+
+    def integration_test_contributors_zack_rusin(self):
+        self._assert_fuzz_match('Zack', 'Zack Rusin', 0)
diff --git a/Tools/Scripts/webkitpy/common/config/committervalidator.py b/Tools/Scripts/webkitpy/common/config/committervalidator.py
new file mode 100644
index 0000000..6cec3da
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/config/committervalidator.py
@@ -0,0 +1,95 @@
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Copyright (c) 2009 Apple Inc. All rights reserved.
+# Copyright (c) 2010 Research In Motion Limited. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from webkitpy.common.config import committers, urls
+
+
+class CommitterValidator(object):
+    def __init__(self, host):
+        self.host = host
+
+    def _committers_py_path(self):
+        # extension can sometimes be .pyc, we always want .py
+        committers_path = self.host.filesystem.path_to_module(committers.__name__)
+        (path, extension) = self.host.filesystem.splitext(committers_path)
+        path = self.host.filesystem.relpath(path, self.host.scm().checkout_root)
+        return ".".join([path, "py"])
+
+    def _flag_permission_rejection_message(self, setter_email, flag_name):
+        # This could be queried from the tool.
+        queue_name = "commit-queue"
+        committers_list = self._committers_py_path()
+        message = "%s does not have %s permissions according to %s." % (
+                        setter_email,
+                        flag_name,
+                        urls.view_source_url(committers_list))
+        message += "\n\n- If you do not have %s rights please read %s for instructions on how to use bugzilla flags." % (
+                        flag_name, urls.contribution_guidelines)
+        message += "\n\n- If you have %s rights please correct the error in %s by adding yourself to the file (no review needed).  " % (
+                        flag_name, committers_list)
+        message += "The %s restarts itself every 2 hours.  After restart the %s will correctly respect your %s rights." % (
+                        queue_name, queue_name, flag_name)
+        return message
+
+    def _validate_setter_email(self, patch, result_key, rejection_function):
+        committer = getattr(patch, result_key)()
+        # If the flag is set, and we don't recognize the setter, reject the flag!
+        setter_email = patch._attachment_dictionary.get("%s_email" % result_key)
+        if setter_email and not committer:
+            rejection_function(patch.id(), self._flag_permission_rejection_message(setter_email, result_key))
+            return False
+        return True
+
+    def _reject_patch_if_flags_are_invalid(self, patch):
+        return (self._validate_setter_email(patch, "reviewer", self.reject_patch_from_review_queue)
+            and self._validate_setter_email(patch, "committer", self.reject_patch_from_commit_queue))
+
+    def patches_after_rejecting_invalid_commiters_and_reviewers(self, patches):
+        return [patch for patch in patches if self._reject_patch_if_flags_are_invalid(patch)]
+
+    def reject_patch_from_commit_queue(self,
+                                       attachment_id,
+                                       additional_comment_text=None):
+        comment_text = "Rejecting attachment %s from commit-queue." % attachment_id
+        self.host.bugs.set_flag_on_attachment(attachment_id,
+                                              "commit-queue",
+                                              "-",
+                                              comment_text,
+                                              additional_comment_text)
+
+    def reject_patch_from_review_queue(self,
+                                       attachment_id,
+                                       additional_comment_text=None):
+        comment_text = "Rejecting attachment %s from review queue." % attachment_id
+        self.host.bugs.set_flag_on_attachment(attachment_id,
+                                              'review',
+                                              '-',
+                                              comment_text,
+                                              additional_comment_text)
diff --git a/Tools/Scripts/webkitpy/common/config/committervalidator_unittest.py b/Tools/Scripts/webkitpy/common/config/committervalidator_unittest.py
new file mode 100644
index 0000000..232f077
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/config/committervalidator_unittest.py
@@ -0,0 +1,44 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#    * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from webkitpy.common.host_mock import MockHost
+from .committervalidator import CommitterValidator
+
+
+class CommitterValidatorTest(unittest.TestCase):
+    def test_flag_permission_rejection_message(self):
+        validator = CommitterValidator(MockHost())
+        self.assertEqual(validator._committers_py_path(), "Tools/Scripts/webkitpy/common/config/committers.py")
+        expected_messsage = """foo@foo.com does not have review permissions according to http://trac.webkit.org/browser/trunk/Tools/Scripts/webkitpy/common/config/committers.py.
+
+- If you do not have review rights please read http://webkit.org/coding/contributing.html for instructions on how to use bugzilla flags.
+
+- If you have review rights please correct the error in Tools/Scripts/webkitpy/common/config/committers.py by adding yourself to the file (no review needed).  The commit-queue restarts itself every 2 hours.  After restart the commit-queue will correctly respect your review rights."""
+        self.assertEqual(validator._flag_permission_rejection_message("foo@foo.com", "review"), expected_messsage)
diff --git a/Tools/Scripts/webkitpy/common/config/contributionareas.py b/Tools/Scripts/webkitpy/common/config/contributionareas.py
new file mode 100644
index 0000000..61a7488
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/config/contributionareas.py
@@ -0,0 +1,217 @@
+# Copyright (c) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import re
+
+
+class _Intersection(object):
+    def __init__(self, *tokens):
+        self._tokens = tokens
+
+    def matches(self, tokens):
+        for token in self._tokens:
+            if token not in tokens and (token + 's') not in tokens:
+                return False
+        return True
+
+
+class _Area(object):
+    def __init__(self, name, tokens=None):
+        self._name = name
+        self._tokens = tokens if tokens else [self._name_to_token(name)]
+
+    def _name_to_token(self, word):
+        token = word.lower()
+        return token[:-1] if word[-1] == 's' else token
+
+    def matches(self, tokens):
+        # FIXME: Support pluraization properly
+        for token in self._tokens:
+            if isinstance(token, _Intersection):
+                if token.matches(tokens):
+                    return True
+            elif token in tokens or (token + 's') in tokens:
+                return True
+        return False
+
+    def name(self):
+        return self._name
+
+    def tokens(self):
+        return self._tokens
+
+contribution_areas = [
+    _Area('ARM JIT', ['arm']),
+# FIXME: 'Accelerated compositing / GPU acceleration'
+    _Area('Accessibility'),
+    _Area('Android port', ['android']),
+    _Area('Animation', ['animation', 'animator']),
+    _Area('Apple\'s Windows port', ['win', 'windows']),  # FIXME: need to exclude chromium...
+    _Area('Autotools Build', ['autotools']),
+    _Area('Basic types and data structures (WTF)', ['wtf']),
+# FIXME: 'Bidirectional text'
+# FIXME: 'Build/test infrastructure (stuff under Tools/Scripts)'
+    _Area('CMake Build', ['cmakelist']),
+    _Area('CSS (Cascading Style Sheets)', ['css']),
+    _Area('CSS Transforms', [_Intersection('css', 'transforms')]),
+    _Area('CSS/SVG Filters', [_Intersection('css', 'filters'), _Intersection('svg', 'filters')]),
+    _Area('CURL HTTP Backend', ['CURL']),
+    _Area('Resource Cache', [_Intersection('loader', 'cache')]),
+    _Area('Memory Cache', [_Intersection('graphics', 'cache')]),
+    _Area('Cairo'),
+    _Area('Canvas'),
+    _Area('Chromium Linux', [_Intersection('chromium', 'linux')]),
+# FIXME: 'Commit Queue'
+    _Area('Core DOM', ['dom']),
+    _Area('Core Graphics', ['cg']),
+    _Area('Bindings'),
+    _Area('DOM Storage', ['storage']),
+    _Area('Drag and Drop', ['drag']),
+    _Area('DumpRenderTree'),
+    _Area('EFL', ['efl']),
+    _Area('Editing / Selection', ['editing']),
+    _Area('Event Handling', ['event']),
+    _Area('FastMalloc'),
+    _Area('File API', ['fileapi']),
+    _Area('Fonts'),
+    _Area('Forms'),
+# FIXME: 'Frame Flattening'
+    _Area('Frame Loader'),
+# FIXME: 'General' Maybe auto-detect people contributing to all subdirectories?
+    _Area('Geolocation API', ['geolocation']),
+    _Area('Graphics', ['graphics']),
+    _Area('HTML', ['html']),
+    _Area('HTML Parser', [_Intersection('html', 'parser')]),  # FIXME: matches html/track/WebVTTParser.cpp
+    _Area('HTML5 Media Support', ['media']),
+    _Area('History', ['history']),
+# FIXME: 'Hit testing'
+    _Area('Image Decoder', ['imagedecoder']),
+# FIXME: 'Input methods'
+    _Area('JSC Bindings', [_Intersection('bindings', 'js')]),
+    _Area('JavaScriptCore'),
+    _Area('JavaScriptCore Regular Expressions', [_Intersection('JavaScriptCore', 'regexp')]),
+# FIXME: 'Layout tests' but what does it mean to say you're an expert on layout tests? Maybe worked on tools?
+    _Area('Loader', ['loader']),
+    _Area('MathML'),
+    _Area('Memory Use / Leaks', ['leaks']),  # Probably need more tokens
+    _Area('MessagePorts'),
+    _Area('Network', [_Intersection('platform', 'network')]),
+    _Area('new-run-webkit-tests', ['layout_tests']),
+    _Area('OpenVG graphics backend', ['openvg']),
+# FIXME: 'Performance'
+    _Area('Plug-ins', ['plugins']),
+    _Area('Printing', ['printing', 'print']),
+    _Area('Rendering'),
+    _Area('SVG (Scalable Vector Graphics)', ['svg']),
+    _Area('Scrollbars', ['scroll']),
+    _Area('Security'),  # Probably need more tokens
+# FIXME: 'Shadow DOM'
+    _Area('Skia'),
+    _Area('Soup Network Backend', ['soup']),
+# FIXME: 'Spell Checking' just need tokens
+    _Area('Tables', ['htmltable', 'rendertable']),
+# FIXME: 'Text Encoding'
+# FIXME: 'Text Layout'
+    _Area('The Chromium Port', ['chromium']),
+    _Area('The EFLWebKit Port', ['efl']),
+    _Area('The WebKitGTK+ Port', ['gtk']),
+    _Area('The Haiku Port', ['haiku']),
+    _Area('The QtWebKit Port', ['qt']),
+    _Area('The WinCE Port', ['wince']),
+    _Area('The WinCairo Port', ['cairo']),
+    _Area('The wxWebKit Port', ['wx']),
+    _Area('Threading', ['thread']),
+    _Area('Tools'),
+    _Area('Touch Support', ['touch']),
+    _Area('Transforms', ['transforms']),  # There's also CSS transforms
+    _Area('Transitions', ['transitions']),  # This only matches transition events at the moment
+    _Area('URL Parsing', ['KURL']),  # Probably need more tokens
+    _Area('V8', ['v8']),
+    _Area('V8 Bindings', [_Intersection('bindings', 'v8')]),
+    _Area('Web Inspector / Developer Tools', ['inspector']),
+    _Area('Web Timing', ['PerformanceNavigation', 'PerformanceTiming']),  # more tokens?
+    _Area('WebArchive'),
+    _Area('WebCore Icon Database', ['icon']),
+    _Area('WebGL', ['webgl']),
+    _Area('WebKit Websites', ['websites']),
+    _Area('WebKit2'),
+    _Area('WebSQL Databases', [_Intersection('storage', 'database')]),
+    _Area('WebSockets'),
+    _Area('Workers'),
+    _Area('XML'),
+    _Area('XMLHttpRequest'),
+    _Area('XSLT'),
+    _Area('XSSAuditor'),
+    _Area('WebKit API Tests', ['TestWebKitAPI']),
+    _Area('webkit-patch', [_Intersection('webkitpy', 'commands')]),
+]
+
+
+class ContributionAreas(object):
+    def __init__(self, filesystem, table=contribution_areas):
+        self._filesystem = filesystem
+        self._contribution_areas = table
+
+    def names(self):
+        return [area.name() for area in self._contribution_areas]
+
+    def names(self):
+        return [area.name() for area in self._contribution_areas]
+
+    def _split_path(self, path):
+        result = []
+        while path and len(path):
+            next_path, tail = self._filesystem.split(path)
+            if path == next_path:
+                break
+            if tail and len(tail):
+                result.append(tail)
+            path = next_path
+        return result
+
+    def _split_camelcase(self, name, transform=lambda x: x):
+        result = []
+        while name and len(name):
+            m = re.match('^([A-Z][a-z0-9]+)|([A-Z0-9]+(?=([A-Z][a-z0-9]|\.|$)))', name)
+            if m:
+                result.append(transform(m.group(0)))
+                name = name[m.end():]
+            else:
+                return result
+        return result
+
+    def areas_for_touched_files(self, touched_files):
+        areas = set()
+        for file_path in touched_files:
+            split_file_path = self._split_path(file_path)
+            tokenized_file_path = None
+            tokenized_file_path = sum([self._split_camelcase(token, lambda x: x.lower()) for token in split_file_path], [])
+            for area in self._contribution_areas:
+                if area.matches(split_file_path) or area.matches(tokenized_file_path):
+                    areas.add(area.name())
+        return areas
diff --git a/Tools/Scripts/webkitpy/common/config/contributionareas_unittest.py b/Tools/Scripts/webkitpy/common/config/contributionareas_unittest.py
new file mode 100644
index 0000000..c3960d9
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/config/contributionareas_unittest.py
@@ -0,0 +1,61 @@
+# Copyright (c) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from .contributionareas import _Intersection
+from .contributionareas import _Area
+from .contributionareas import ContributionAreas
+from webkitpy.common.system.filesystem_mock import MockFileSystem
+
+
+class ContributionAreasTest(unittest.TestCase):
+
+    def test_contribution(self):
+        self.assertEqual(_Area('CSS').tokens(), ['css'])
+        self.assertEqual(_Area('Forms', ['input']).tokens(), ['input'])
+
+    def _assert_areas_for_touched_files(self, areas, files, expected_areas):
+        self.assertEqual(areas.areas_for_touched_files(files), set(expected_areas))
+
+    def test_areas_for_touched_files(self):
+        areas = ContributionAreas(MockFileSystem(), [
+            _Area('CSS'),
+            _Area('HTML'),
+            _Area('Forms', ['forms', 'input']),
+            _Area('CSS Transforms', [_Intersection('css', 'transforms')]),
+        ])
+        self._assert_areas_for_touched_files(areas, [], [])
+        self._assert_areas_for_touched_files(areas, ['WebCore/css'], ['CSS'])
+        self._assert_areas_for_touched_files(areas, ['WebCore/html/'], ['HTML'])
+        self._assert_areas_for_touched_files(areas, ['WebCore/css/CSSStyleSelector.cpp', 'WebCore/html/HTMLIFrameElement.h'], ['CSS', 'HTML'])
+        self._assert_areas_for_touched_files(areas, ['WebCore'], [])
+        self._assert_areas_for_touched_files(areas, ['WebCore/html2'], [])
+        self._assert_areas_for_touched_files(areas, ['WebCore/html/HTMLInputElement.cpp'], ['HTML', 'Forms'])
+        self._assert_areas_for_touched_files(areas, ['WebCore/svg/transforms'], [])
+        self._assert_areas_for_touched_files(areas, ['WebCore/css/transforms'], ['CSS', 'CSS Transforms'])
diff --git a/Tools/Scripts/webkitpy/common/config/irc.py b/Tools/Scripts/webkitpy/common/config/irc.py
new file mode 100644
index 0000000..950c573
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/config/irc.py
@@ -0,0 +1,31 @@
+# Copyright (c) 2009 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+server="irc.freenode.net"
+port=6667
+channel="#webkit"
diff --git a/Tools/Scripts/webkitpy/common/config/orderfile b/Tools/Scripts/webkitpy/common/config/orderfile
new file mode 100644
index 0000000..9fb4977
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/config/orderfile
@@ -0,0 +1,8 @@
+Source*ChangeLog
+Source*
+Tools*ChangeLog
+Tools*
+Websites*ChangeLog
+Websites*
+LayoutTests*ChangeLog
+LayoutTests*
diff --git a/Tools/Scripts/webkitpy/common/config/ports.py b/Tools/Scripts/webkitpy/common/config/ports.py
new file mode 100644
index 0000000..884380e
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/config/ports.py
@@ -0,0 +1,216 @@
+# Copyright (C) 2009, Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# WebKit's Python module for understanding the various ports
+
+import os
+import platform
+import sys
+
+from webkitpy.common.system.executive import Executive
+
+
+class DeprecatedPort(object):
+    results_directory = "/tmp/layout-test-results"
+
+    # FIXME: This is only used by BotInfo.
+    def name(self):
+        return self.__class__
+
+    def flag(self):
+        if self.port_flag_name:
+            return "--port=%s" % self.port_flag_name
+        return None
+
+    # We might need to pass scm into this function for scm.checkout_root
+    def script_path(self, script_name):
+        return os.path.join("Tools", "Scripts", script_name)
+
+    def script_shell_command(self, script_name):
+        script_path = self.script_path(script_name)
+        return Executive.shell_command_for_script(script_path)
+
+    @staticmethod
+    def port(port_name):
+        ports = {
+            "chromium": ChromiumPort,
+            "chromium-android": ChromiumAndroidPort,
+            "chromium-xvfb": ChromiumXVFBPort,
+            "gtk": GtkPort,
+            "mac": MacPort,
+            "win": WinPort,
+            "qt": QtPort,
+            "efl": EflPort,
+        }
+        default_port = {
+            "Windows": WinPort,
+            "Darwin": MacPort,
+        }
+        # Do we really need MacPort as the ultimate default?
+        return ports.get(port_name, default_port.get(platform.system(), MacPort))()
+
+    def makeArgs(self):
+        # FIXME: This shouldn't use a static Executive().
+        args = '--makeargs="-j%s"' % Executive().cpu_count()
+        if os.environ.has_key('MAKEFLAGS'):
+            args = '--makeargs="%s"' % os.environ['MAKEFLAGS']
+        return args
+
+    def update_webkit_command(self, non_interactive=False):
+        return self.script_shell_command("update-webkit")
+
+    def check_webkit_style_command(self):
+        return self.script_shell_command("check-webkit-style")
+
+    def prepare_changelog_command(self):
+        return self.script_shell_command("prepare-ChangeLog")
+
+    def build_webkit_command(self, build_style=None):
+        command = self.script_shell_command("build-webkit")
+        if build_style == "debug":
+            command.append("--debug")
+        if build_style == "release":
+            command.append("--release")
+        return command
+
+    def run_javascriptcore_tests_command(self):
+        return self.script_shell_command("run-javascriptcore-tests")
+
+    def run_webkit_unit_tests_command(self):
+        return None
+
+    def run_webkit_tests_command(self):
+        return self.script_shell_command("run-webkit-tests")
+
+    def run_python_unittests_command(self):
+        return self.script_shell_command("test-webkitpy")
+
+    def run_perl_unittests_command(self):
+        return self.script_shell_command("test-webkitperl")
+
+    def layout_tests_results_path(self):
+        return os.path.join(self.results_directory, "full_results.json")
+
+    def unit_tests_results_path(self):
+        return os.path.join(self.results_directory, "webkit_unit_tests_output.xml")
+
+
+class MacPort(DeprecatedPort):
+    port_flag_name = "mac"
+
+
+class WinPort(DeprecatedPort):
+    port_flag_name = "win"
+
+
+class GtkPort(DeprecatedPort):
+    port_flag_name = "gtk"
+
+    def build_webkit_command(self, build_style=None):
+        command = super(GtkPort, self).build_webkit_command(build_style=build_style)
+        command.append("--gtk")
+        command.append("--update-gtk")
+        command.append(super(GtkPort, self).makeArgs())
+        return command
+
+    def run_webkit_tests_command(self):
+        command = super(GtkPort, self).run_webkit_tests_command()
+        command.append("--gtk")
+        return command
+
+
+class QtPort(DeprecatedPort):
+    port_flag_name = "qt"
+
+    def build_webkit_command(self, build_style=None):
+        command = super(QtPort, self).build_webkit_command(build_style=build_style)
+        command.append("--qt")
+        command.append(super(QtPort, self).makeArgs())
+        return command
+
+
+class EflPort(DeprecatedPort):
+    port_flag_name = "efl"
+
+    def build_webkit_command(self, build_style=None):
+        command = super(EflPort, self).build_webkit_command(build_style=build_style)
+        command.append("--efl")
+        command.append("--update-efl")
+        command.append(super(EflPort, self).makeArgs())
+        return command
+
+
+class ChromiumPort(DeprecatedPort):
+    port_flag_name = "chromium"
+
+    def update_webkit_command(self, non_interactive=False):
+        command = super(ChromiumPort, self).update_webkit_command(non_interactive=non_interactive)
+        command.append("--chromium")
+        if non_interactive:
+            command.append("--force-update")
+        return command
+
+    def build_webkit_command(self, build_style=None):
+        command = super(ChromiumPort, self).build_webkit_command(build_style=build_style)
+        command.append("--chromium")
+        command.append("--update-chromium")
+        return command
+
+    def run_webkit_tests_command(self):
+        # Note: This could be run-webkit-tests now.
+        command = self.script_shell_command("new-run-webkit-tests")
+        command.append("--chromium")
+        command.append("--skip-failing-tests")
+        return command
+
+    def run_webkit_unit_tests_command(self):
+        return self.script_shell_command("run-chromium-webkit-unit-tests")
+
+    def run_javascriptcore_tests_command(self):
+        return None
+
+
+class ChromiumAndroidPort(ChromiumPort):
+    port_flag_name = "chromium-android"
+
+    def update_webkit_command(self, non_interactive=False):
+        command = super(ChromiumAndroidPort, self).update_webkit_command(non_interactive=non_interactive)
+        command.append("--chromium-android")
+        return command
+
+    def build_webkit_command(self, build_style=None):
+        command = super(ChromiumAndroidPort, self).build_webkit_command(build_style=build_style)
+        command.append("--chromium-android")
+        return command
+
+
+class ChromiumXVFBPort(ChromiumPort):
+    port_flag_name = "chromium-xvfb"
+
+    def run_webkit_tests_command(self):
+        return ["xvfb-run"] + super(ChromiumXVFBPort, self).run_webkit_tests_command()
diff --git a/Tools/Scripts/webkitpy/common/config/ports_mock.py b/Tools/Scripts/webkitpy/common/config/ports_mock.py
new file mode 100644
index 0000000..1d14311
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/config/ports_mock.py
@@ -0,0 +1,67 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#    * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+class MockPort(object):
+    results_directory = "/mock-results"
+
+    def name(self):
+        return "MockPort"
+
+    def layout_tests_results_path(self):
+        return "/mock-results/full_results.json"
+
+    def unit_tests_results_path(self):
+        return "/mock-results/webkit_unit_tests_output.xml"
+
+    def check_webkit_style_command(self):
+        return ["mock-check-webkit-style"]
+
+    def update_webkit_command(self, non_interactive=False):
+        return ["mock-update-webkit"]
+
+    def build_webkit_command(self, build_style=None):
+        return ["mock-build-webkit"]
+
+    def prepare_changelog_command(self):
+        return ['mock-prepare-ChangeLog']
+
+    def run_python_unittests_command(self):
+        return ['mock-test-webkitpy']
+
+    def run_perl_unittests_command(self):
+        return ['mock-test-webkitperl']
+
+    def run_javascriptcore_tests_command(self):
+        return ['mock-run-javacriptcore-tests']
+
+    def run_webkit_unit_tests_command(self):
+        return ['mock-run-webkit-unit-tests']
+
+    def run_webkit_tests_command(self):
+        return ['mock-run-webkit-tests']
diff --git a/Tools/Scripts/webkitpy/common/config/ports_unittest.py b/Tools/Scripts/webkitpy/common/config/ports_unittest.py
new file mode 100644
index 0000000..2720523
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/config/ports_unittest.py
@@ -0,0 +1,76 @@
+#!/usr/bin/env python
+# Copyright (c) 2009, Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+# 
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+# 
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from webkitpy.common.config.ports import *
+
+
+class DeprecatedPortTest(unittest.TestCase):
+    def test_mac_port(self):
+        self.assertEquals(MacPort().flag(), "--port=mac")
+        self.assertEquals(MacPort().run_webkit_tests_command(), DeprecatedPort().script_shell_command("run-webkit-tests"))
+        self.assertEquals(MacPort().build_webkit_command(), DeprecatedPort().script_shell_command("build-webkit"))
+        self.assertEquals(MacPort().build_webkit_command(build_style="debug"), DeprecatedPort().script_shell_command("build-webkit") + ["--debug"])
+        self.assertEquals(MacPort().build_webkit_command(build_style="release"), DeprecatedPort().script_shell_command("build-webkit") + ["--release"])
+
+    def test_gtk_port(self):
+        self.assertEquals(GtkPort().flag(), "--port=gtk")
+        self.assertEquals(GtkPort().run_webkit_tests_command(), DeprecatedPort().script_shell_command("run-webkit-tests") + ["--gtk"])
+        self.assertEquals(GtkPort().build_webkit_command(), DeprecatedPort().script_shell_command("build-webkit") + ["--gtk", "--update-gtk", DeprecatedPort().makeArgs()])
+        self.assertEquals(GtkPort().build_webkit_command(build_style="debug"), DeprecatedPort().script_shell_command("build-webkit") + ["--debug", "--gtk", "--update-gtk", DeprecatedPort().makeArgs()])
+
+    def test_efl_port(self):
+        self.assertEquals(EflPort().flag(), "--port=efl")
+        self.assertEquals(EflPort().build_webkit_command(), DeprecatedPort().script_shell_command("build-webkit") + ["--efl", "--update-efl", DeprecatedPort().makeArgs()])
+        self.assertEquals(EflPort().build_webkit_command(build_style="debug"), DeprecatedPort().script_shell_command("build-webkit") + ["--debug", "--efl", "--update-efl", DeprecatedPort().makeArgs()])
+
+    def test_qt_port(self):
+        self.assertEquals(QtPort().flag(), "--port=qt")
+        self.assertEquals(QtPort().run_webkit_tests_command(), DeprecatedPort().script_shell_command("run-webkit-tests"))
+        self.assertEquals(QtPort().build_webkit_command(), DeprecatedPort().script_shell_command("build-webkit") + ["--qt", DeprecatedPort().makeArgs()])
+        self.assertEquals(QtPort().build_webkit_command(build_style="debug"), DeprecatedPort().script_shell_command("build-webkit") + ["--debug", "--qt", DeprecatedPort().makeArgs()])
+
+    def test_chromium_port(self):
+        self.assertEquals(ChromiumPort().flag(), "--port=chromium")
+        self.assertEquals(ChromiumPort().run_webkit_tests_command(), DeprecatedPort().script_shell_command("new-run-webkit-tests") + ["--chromium", "--skip-failing-tests"])
+        self.assertEquals(ChromiumPort().build_webkit_command(), DeprecatedPort().script_shell_command("build-webkit") + ["--chromium", "--update-chromium"])
+        self.assertEquals(ChromiumPort().build_webkit_command(build_style="debug"), DeprecatedPort().script_shell_command("build-webkit") + ["--debug", "--chromium", "--update-chromium"])
+        self.assertEquals(ChromiumPort().update_webkit_command(), DeprecatedPort().script_shell_command("update-webkit") + ["--chromium"])
+
+    def test_chromium_android_port(self):
+        self.assertEquals(ChromiumAndroidPort().build_webkit_command(), ChromiumPort().build_webkit_command() + ["--chromium-android"])
+        self.assertEquals(ChromiumAndroidPort().update_webkit_command(), ChromiumPort().update_webkit_command() + ["--chromium-android"])
+
+    def test_chromium_xvfb_port(self):
+        self.assertEquals(ChromiumXVFBPort().run_webkit_tests_command(), ['xvfb-run'] + DeprecatedPort().script_shell_command('new-run-webkit-tests') + ['--chromium', '--skip-failing-tests'])
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/Tools/Scripts/webkitpy/common/config/urls.py b/Tools/Scripts/webkitpy/common/config/urls.py
new file mode 100644
index 0000000..88ad373
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/config/urls.py
@@ -0,0 +1,82 @@
+# Copyright (c) 2010, Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import re
+
+
+def view_source_url(local_path):
+    return "http://trac.webkit.org/browser/trunk/%s" % local_path
+
+
+def view_revision_url(revision_number):
+    return "http://trac.webkit.org/changeset/%s" % revision_number
+
+
+def chromium_results_zip_url(builder_name):
+    return 'http://build.chromium.org/f/chromium/layout_test_results/%s/layout-test-results.zip' % builder_name
+
+chromium_lkgr_url = "http://chromium-status.appspot.com/lkgr"
+contribution_guidelines = "http://webkit.org/coding/contributing.html"
+
+bug_server_domain = "webkit.org"
+bug_server_host = "bugs." + bug_server_domain
+_bug_server_regex = "https?://%s/" % re.sub('\.', '\\.', bug_server_host)
+bug_server_url = "https://%s/" % bug_server_host
+bug_url_long = _bug_server_regex + r"show_bug\.cgi\?id=(?P<bug_id>\d+)(&ctype=xml|&excludefield=attachmentdata)*"
+bug_url_short = r"https?\://%s/b/(?P<bug_id>\d+)" % bug_server_domain
+
+attachment_url = _bug_server_regex + r"attachment\.cgi\?id=(?P<attachment_id>\d+)(&action=(?P<action>\w+))?"
+direct_attachment_url = r"https?://bug-(?P<bug_id>\d+)-attachments.%s/attachment\.cgi\?id=(?P<attachment_id>\d+)" % bug_server_domain
+
+buildbot_url = "http://build.webkit.org"
+chromium_buildbot_url = "http://build.chromium.org/p/chromium.webkit"
+
+omahaproxy_url = "http://omahaproxy.appspot.com/"
+
+def parse_bug_id(string):
+    if not string:
+        return None
+    match = re.search(bug_url_short, string)
+    if match:
+        return int(match.group('bug_id'))
+    match = re.search(bug_url_long, string)
+    if match:
+        return int(match.group('bug_id'))
+    return None
+
+
+def parse_attachment_id(string):
+    if not string:
+        return None
+    match = re.search(attachment_url, string)
+    if match:
+        return int(match.group('attachment_id'))
+    match = re.search(direct_attachment_url, string)
+    if match:
+        return int(match.group('attachment_id'))
+    return None
diff --git a/Tools/Scripts/webkitpy/common/config/urls_unittest.py b/Tools/Scripts/webkitpy/common/config/urls_unittest.py
new file mode 100644
index 0000000..2b94b86
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/config/urls_unittest.py
@@ -0,0 +1,61 @@
+# Copyright (C) 2012 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#    * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from .urls import parse_bug_id, parse_attachment_id
+
+
+class URLsTest(unittest.TestCase):
+    def test_parse_bug_id(self):
+        # FIXME: These would be all better as doctests
+        self.assertEquals(12345, parse_bug_id("http://webkit.org/b/12345"))
+        self.assertEquals(12345, parse_bug_id("foo\n\nhttp://webkit.org/b/12345\nbar\n\n"))
+        self.assertEquals(12345, parse_bug_id("http://bugs.webkit.org/show_bug.cgi?id=12345"))
+        self.assertEquals(12345, parse_bug_id("http://bugs.webkit.org/show_bug.cgi?id=12345&ctype=xml"))
+        self.assertEquals(12345, parse_bug_id("http://bugs.webkit.org/show_bug.cgi?id=12345&ctype=xml&excludefield=attachmentdata"))
+        self.assertEquals(12345, parse_bug_id("http://bugs.webkit.org/show_bug.cgi?id=12345excludefield=attachmentdata&ctype=xml"))
+
+        # Our url parser is super-fragile, but at least we're testing it.
+        self.assertEquals(None, parse_bug_id("http://www.webkit.org/b/12345"))
+        self.assertEquals(None, parse_bug_id("http://bugs.webkit.org/show_bug.cgi?ctype=xml&id=12345"))
+        self.assertEquals(None, parse_bug_id("http://bugs.webkit.org/show_bug.cgi?ctype=xml&id=12345&excludefield=attachmentdata"))
+        self.assertEquals(None, parse_bug_id("http://bugs.webkit.org/show_bug.cgi?ctype=xml&excludefield=attachmentdata&id=12345"))
+        self.assertEquals(None, parse_bug_id("http://bugs.webkit.org/show_bug.cgi?excludefield=attachmentdata&ctype=xml&id=12345"))
+        self.assertEquals(None, parse_bug_id("http://bugs.webkit.org/show_bug.cgi?excludefield=attachmentdata&id=12345&ctype=xml"))
+
+    def test_parse_attachment_id(self):
+        self.assertEquals(12345, parse_attachment_id("https://bugs.webkit.org/attachment.cgi?id=12345&action=review"))
+        self.assertEquals(12345, parse_attachment_id("https://bugs.webkit.org/attachment.cgi?id=12345&action=edit"))
+        self.assertEquals(12345, parse_attachment_id("https://bugs.webkit.org/attachment.cgi?id=12345&action=prettypatch"))
+        self.assertEquals(12345, parse_attachment_id("https://bugs.webkit.org/attachment.cgi?id=12345&action=diff"))
+
+        # Direct attachment links are hosted from per-bug subdomains:
+        self.assertEquals(12345, parse_attachment_id("https://bug-23456-attachments.webkit.org/attachment.cgi?id=12345"))
+        # Make sure secure attachment URLs work too.
+        self.assertEquals(12345, parse_attachment_id("https://bug-23456-attachments.webkit.org/attachment.cgi?id=12345&t=Bqnsdkl9fs"))
diff --git a/Tools/Scripts/webkitpy/common/config/watchlist b/Tools/Scripts/webkitpy/common/config/watchlist
new file mode 100755
index 0000000..854a812
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/config/watchlist
@@ -0,0 +1,357 @@
+#  -*- mode: Python;-*-
+#
+# When editing this file, please run the following command to make sure you
+# haven't introduced any syntax errors:
+#
+# ./Tools/Scripts/check-webkit-style
+#
+# If you want to test your regular expressions, you can edit various files and
+# then try following command:
+#
+# ./Tools/Scripts/webkit-patch apply-watchlist-local
+#
+{
+    "DEFINITIONS": {
+        "ChromiumGraphics": {
+            "filename": r"Source/WebCore/platform/graphics/chromium/",
+        },
+        "ChromiumPublicApi": {
+            "filename": r"Source/WebKit/chromium/public/"
+                        r"|Source/Platform/chromium/public/",
+        },
+        "AppleMacPublicApi": {
+            "filename": r"Source/WebCore/bindings/objc/PublicDOMInterfaces.h"
+        },
+        "Forms": {
+            "filename": r"Source/WebCore/html/HTML(DataList|FieldSet|Input|Keygen|Label|Legend|OptGroup|Option|Output|Select|TextArea)Element\."
+                        r"|Source/WebCore/html/.*Form[A-Z].*\."
+                        r"|Source/WebCore/html/\w*InputType\."
+                        r"|Source/WebCore/html/shadow/(SliderThumbElement|TextControlInnerElements)\."
+                        r"|Source/WebCore/rendering/Render(FileUploadControl|ListBox|MenuList|Slider|TextControl.*)\."
+        },
+        "GStreamerGraphics": {
+            "filename": r"Source/WebCore/platform/graphics/gstreamer/",
+        },
+        "WebIDL": {
+            "filename": r"Source/WebCore/(?!inspector)(?!testing).*\.idl"
+        },
+        "ThreadingFiles": {
+            "filename": r"Source/JavaScriptCore/wtf/ThreadSpecific\."
+                        r"|Source/JavaScriptCore/wtf/ThreadSafeRefCounted\."
+                        r"|Source/JavaScriptCore/wtf/ThreadingPrimitives\."
+                        r"|Source/JavaScriptCore/wtf/Threading\."
+                        r"|Source/WebCore/dom/CrossThreadTask\."
+                        r"|Source/WebCore/platform/CrossThreadCopier\.",
+        },
+        "ThreadingUsage": {
+            # The intention of this regex is to detect places where people are using common threading mechanisms,
+            # so that one can look them over for common mistakes. This list is long and likely to get longer over time.
+            # Note the negative look-ahead to avoid new mentions of the files (for builds or includes).
+            "more": r"(AllowCrossThreadAccess|AtomicallyInitialize|CrossThreadCopier|CrossThreadRefCounted|Mutex|ReadWriteLock|ThreadCondition|ThreadSafeRefCounted|ThreadSpecific"
+                    r"|createCallbackTask|crossThreadString|deprecatedTurnOffVerifier|threadsafeCopy)(?!\.(h|cpp))",
+        },
+        "WatchListScript": {
+            "filename": r"Tools/Scripts/webkitpy/common/watchlist/",
+        },
+        "webkitpy": {
+            "filename": r"Tools/Scripts/webkitpy/",
+        },
+        "webkitperl": {
+            "filename": r"Tools/Scripts/webkitperl/"
+                        r"|Tools/Scripts/webkitdirs.pm"
+                        r"|Tools/Scripts/VCSUtils.pm"
+                        r"|Tools/Scripts/test-webkitperl",
+        },
+        "SVNScripts": {
+            "filename": r"Tools/Scripts/svn-.*",
+        },
+        "TestFailures": {
+            "filename": r"Tools/BuildSlaveSupport/build.webkit.org-config/public_html/TestFailures/",
+        },
+        "SecurityCritical": {
+            "more": r"[Ss]ecurityOrigin(?!\.(h|cpp))",
+            "less": r"[Ss]ecurityOrigin(?!\.(h|cpp))",
+            "filename": r"XSS|[Ss]ecurity",
+        },
+        "XSS": {
+            "filename": r".*XSS",
+        },
+        "SkiaGraphics": {
+            "filename": r"Source/WebCore/platform/graphics/skia/"
+                        r"|Source/WebCore/platform/graphics/filters/skia/",
+        },
+        "V8Bindings": {
+            "filename": r"Source/WebCore/bindings/v8/",
+        },
+        "BindingsScripts": {
+            "filename": r"Source/WebCore/bindings/scripts/",
+        },
+        "FrameLoader": {
+            "more": r"FrameLoader\.(cpp|h)",
+        },
+        "Loader": {
+            "filename": r"Source/WebCore/loader/",
+        },
+        "Rendering": {
+            "filename": r"Source/WebCore/rendering/",
+        },
+        "StyleChecker": {
+            "filename": r"Tools/Scripts/webkitpy/style/",
+        },
+        "GtkWebKit2PublicAPI": {
+            "filename": r"Source/WebKit2/UIProcess/API/gtk/",
+        },
+        "QtBuildSystem": {
+            # Project files for each target are intentionally left out, as those
+            # mostly list source and header files, which would just add noise.
+            "filename": r"Tools/qmake/"
+                        r"|WebKit.pro",
+        },
+        "QtWebKit2PublicAPI": {
+            "filename": r"Source/WebKit2/UIProcess/API/qt/"
+                        r"|Source/WebKit2/UIProcess/API/cpp/qt/"
+                        r"|Source/WebKit2/UIProcess/API/C/qt/",
+        },
+        "QtGraphics": {
+            "filename": r"Source/WebCore/platform/graphics/qt/"
+                        r"|Source/WebKit2/WebProcess/WebPage/CoordinatedGraphics/"
+                        r"|Source/WebKit2/UIProcess/CoordinatedGraphics",
+        },
+        "TextureMapper": {
+            "filename": r"Source/WebCore/platform/graphics/texmap/",
+        },
+        "OpenGL": {
+            "filename": r"Source/WebCore/platform/graphics/opengl/",
+        },
+        "QtWebKit2PlatformSpecific": {
+            "filename": r"Source/WebKit2/.*\.(pri|pro)"
+                        r"|Source/WebKit2/Platform/qt/"
+                        r"|Source/WebKit2/qt/"
+                        r"|Source/WebKit2/PluginProcess/qt/"
+                        r"|Source/WebKit2/Platform/qt/"
+                        r"|Source/WebKit2/Shared/API/c/qt/"
+                        r"|Source/WebKit2/Shared/qt/"
+                        r"|Source/WebKit2/WebProcess/InjectedBundle/qt/"
+                        r"|Source/WebKit2/WebProcess/FullScreen/qt/"
+                        r"|Source/WebKit2/WebProcess/WebPage/qt/"
+                        r"|Source/WebKit2/WebProcess/qt/"
+                        r"|Source/WebKit2/WebProcess/Plugins/Netscape/qt/"
+                        r"|Source/WebKit2/WebProcess/Downloads/qt/"
+                        r"|Source/WebKit2/WebProcess/WebCoreSupport/qt/"
+                        r"|Source/WebKit2/WebProcess/Cookies/qt/"
+                        r"|Source/WebKit2/UIProcess/qt/"
+                        r"|Source/WebKit2/UIProcess/Plugins/qt/"
+                        r"|Source/WebKit2/UIProcess/Launcher/qt/",
+        },
+        "CSS": {
+            "filename": r"Source/WebCore/css/",
+        },
+        "DOMAttributes": {
+            "filename": r"Source/WebCore/dom/.*Attr.*"
+                        r"|Source/WebCore/dom/NamedNodeMap\.(cpp|h|idl)"
+                        r"|Source/WebCore/dom/Element\.(cpp|h|idl)",
+        },
+        "EFL": {
+            "filename": r"Source/WebKit/efl/"
+                        r"|Source/WebCore/platform/efl/"
+                        r"|Source/WTF/wtf/efl/"
+                        r"|Tools/EWebLauncher"
+                        r"|Tools/DumpRenderTree/efl/"
+                        r"|LayoutTests/platform/efl/",
+        },
+        "EFLWebKit2PublicAPI": {
+            "filename": r"Source/WebKit2/UIProcess/API/efl/"
+                        r"|Source/WebKit2/UIProcess/API/C/efl/",
+        },
+        "EFLWebKit2PlatformSpecific": {
+            "filename": r"Source/WebKit2/.*\.(cmake|txt)"
+                        r"|Source/WebKit2/Platform/efl/"
+                        r"|Source/WebKit2/efl/"
+                        r"|Source/WebKit2/Shared/API/c/efl/"
+                        r"|Source/WebKit2/Shared/efl/"
+                        r"|Source/WebKit2/WebProcess/InjectedBundle/efl/"
+                        r"|Source/WebKit2/WebProcess/WebPage/efl/"
+                        r"|Source/WebKit2/WebProcess/efl/"
+                        r"|Source/WebKit2/WebProcess/Downloads/efl/"
+                        r"|Source/WebKit2/WebProcess/WebCoreSupport/efl/"
+                        r"|Source/WebKit2/UIProcess/efl/"
+                        r"|Source/WebKit2/UIProcess/Launcher/efl/",
+        },
+        "CMake": {
+            "filename": r".*CMakeLists\w*\.txt"
+                        r"|.*\w+\.cmake"
+                        r"|Source/cmake/",
+        },
+        "Selectors": {
+            "filename": r"Source/WebCore/css/CSSSelector*"
+                        r"|Source/WebCore/css/SelectorChecker.*"
+                        r"|Source/WebCore/css/StyleResolver.*"
+                        r"|Source/WebCore/dom/SelectorQuery.*",
+        },
+        "SoupNetwork": {
+            "filename": r"Source/WebCore/platform/network/soup/",
+        },
+        "ScrollingCoordinator": {
+            "filename": r"Source/WebCore/page/scrolling/",
+        },
+        "WebKitGTKTranslations": {
+            "filename": r"Source/WebKit/gtk/po/",
+        },
+        "Media": {
+            "filename": r"(Source|LayoutTests)/.*([Mm]edia|[Aa]udio|[Vv]ideo)",
+        },
+        "MathML": {
+            "filename": r"(Source|LayoutTests|Websites)/.*mathml",
+        },
+        "Editing": {
+            "filename": r"Source/WebCore/editing/",
+        },
+        "BlackBerry": {
+            "filename": r"Source/WebKit/blackberry/"
+                        r"|Source/WebCore/page/blackberry"
+                        r"|Source/WebCore/history/blackberry"
+                        r"|Source/WebCore/plugins/blackberry"
+                        r"|Source/WebCore/editing/blackberry"
+                        r"|Source/WebCore/Resources/blackberry"
+                        r"|Source/WebCore/platform/image-decoders/blackberry"
+                        r"|Source/WebCore/platform/blackberry"
+                        r"|Source/WebCore/platform/text/blackberry"
+                        r"|Source/WebCore/platform/network/blackberry"
+                        r"|Source/WebCore/platform/graphics/blackberry"
+                        r"|Source/WTF/wtf/blackberry"
+                        r"|ManualTests/blackberry"
+                        r"|Tools/DumpRenderTree/blackberry"
+                        r"|LayoutTests/platform/blackberry",
+        },
+        "NetworkInfo": {
+            "filename": r"Source/WebCore/Modules/networkinfo",
+        },
+        "Battery": {
+            "filename": r"Source/WebCore/Modules/battery",
+        },
+        "WTF": {
+            "filename": r"Source/WTF/wtf",
+        },
+        "WebGL": {
+            "filename": r"Source/WebCore/html/canvas/.*WebGL.*"
+                        r"|Source/WebCore/bindings/js/.*WebGL.*"
+                        r"|Source/WebCore/platform/graphics/gpu"
+                        r"|Source/WebCore/platform/graphics/opengl"
+                        r"|Source/WebCore/platform/graphics/ANGLE.*"
+                        r"|Source/WebCore/platform/graphics/.*GraphicsContext3D.*"
+                        r"|Source/ThirdParty/ANGLE",
+        },
+        "Filters": {
+            "filename": r"Source/WebCore/platform/graphics/filters"
+                        r"|Source/WebCore/rendering/.*Filter.*"
+                        r"|Source/WebCore/rendering/style/.*Filter.*"
+                        r"|Source/WebCore/rendering/svg/.*Filter.*"
+                        r"|Source/WebCore/svg/graphics/filters"
+                        r"|Source/WebCore/svg/graphics/.*Filter.*",
+        },
+        "TouchAdjustment": {
+            "filename": r"Source/WebCore/page/TouchAdjustment.*"
+                        r"|LayoutTests/touchadjustment"
+                        r"|Source/WebKit/blackberry/WebKitSupport/FatFingers.*",
+        },
+        "SVG": {
+            "filename": r"Source/WebCore/svg"
+                        r"|Source/WebCore/rendering/svg",
+        },
+        "WebInspectorAPI": {
+            "filename": r"Source/WebCore/inspector/*.json"
+                        r"|Source/WebCore/inspector/*.idl",
+        },
+        "WebSocket": {
+            "filename": r"Source/WebCore/Modules/websockets"
+                        r"|Source/WebCore/platform/network/(|.+/)SocketStream.*",
+        },
+        "MediaStream": {
+            "filename": r"Source/WebCore/Modules/mediastream"
+                        r"|Source/WebCore/platform/mediastream"
+                        r"|LayoutTests/fast/mediastream",
+        },
+        "Accessibility": {
+            "filename": r"Source/WebCore/accessibility"
+                        r"|LayoutTests/.*accessibility",
+        },
+        "Cairo": {
+            "filename": r"Source/WebCore/platform/graphics/cairo",
+        },
+        "Harfbuzz": {
+            "filename": r"Source/WebCore/platform/graphics/harfbuzz",
+        }
+    },
+    "CC_RULES": {
+        # Note: All email addresses listed must be registered with bugzilla.
+        # Specifically, levin@chromium.org and levin+threading@chromium.org are
+        # two different accounts as far as bugzilla is concerned.
+        "Accessibility": [ "cfleizach@apple.com", "dmazzoni@google.com", "apinheiro@igalia.com", "jdiggs@igalia.com" ],
+        "AppleMacPublicApi": [ "timothy@apple.com" ],
+        "Battery": [ "gyuyoung.kim@samsung.com" ],
+        "BlackBerry": [ "mifenton@rim.com", "rwlbuis@gmail.com", "tonikitoo@webkit.org" ],
+        "Cairo": [ "dominik.rottsches@intel.com" ],
+        "CMake": [ "rakuco@webkit.org", "gyuyoung.kim@samsung.com" ],
+        "CSS": [ "alexis@webkit.org", "macpherson@chromium.org", "cmarcelo@webkit.org"],
+        "ChromiumGraphics": [ "jamesr@chromium.org", "cc-bugs@chromium.org" ],
+        "ChromiumPublicApi": [ "abarth@webkit.org", "dglazkov@chromium.org", "fishd@chromium.org", "jamesr@chromium.org", "tkent+wkapi@chromium.org" ],
+        "DOMAttributes": [ "cmarcelo@webkit.org", ],
+        "EFL": [ "rakuco@webkit.org", "gyuyoung.kim@samsung.com" ],
+        "EFLWebKit2PlatformSpecific": [ "gyuyoung.kim@samsung.com", "rakuco@webkit.org" ],
+        "EFLWebKit2PublicAPI": [ "gyuyoung.kim@samsung.com", "rakuco@webkit.org" ],
+        "Editing": [ "mifenton@rim.com" ],
+        "Filters": [ "dino@apple.com" ],
+        "Forms": [ "tkent@chromium.org", "mifenton@rim.com" ],
+        "FrameLoader": [ "abarth@webkit.org", "japhet@chromium.org" ],
+        "GStreamerGraphics": [ "alexis@webkit.org", "pnormand@igalia.com", "gns@gnome.org", "mrobinson@webkit.org" ],
+        "GtkWebKit2PublicAPI": [ "cgarcia@igalia.com", "gns@gnome.org", "mrobinson@webkit.org" ],
+        "Harfbuzz": [ "dominik.rottsches@intel.com" ],
+        "Loader": [ "japhet@chromium.org" ],
+        "MathML": [ "dbarton@mathscribe.com" ],
+        "Media": [ "feature-media-reviews@chromium.org", "eric.carlson@apple.com" ],
+        "MediaStream": [ "tommyw@google.com", "hta@google.com" ],
+        "NetworkInfo": [ "gyuyoung.kim@samsung.com" ],
+        "OpenGL" : [ "noam.rosenthal@nokia.com", "dino@apple.com" ],
+        "QtBuildSystem" : [ "vestbo@webkit.org", "abecsi@webkit.org" ],
+        "QtGraphics" : [ "noam.rosenthal@nokia.com" ],
+        "QtWebKit2PlatformSpecific": [ "alexis@webkit.org", "cmarcelo@webkit.org", "abecsi@webkit.org" ],
+        "QtWebKit2PublicAPI": [ "alexis@webkit.org", "cmarcelo@webkit.org", "abecsi@webkit.org" ],
+        "Rendering": [ "eric@webkit.org" ],
+        "SVG": ["schenney@chromium.org", "pdr@google.com", "fmalita@chromium.org", "dominik.rottsches@intel.com" ],
+        "SVNScripts": [ "dbates@webkit.org" ],
+        "ScrollingCoordinator": [ "andersca@apple.com", "jamesr@chromium.org", "tonikitoo@webkit.org" ],
+        "SecurityCritical": [ "abarth@webkit.org" ],
+        "SkiaGraphics": [ "senorblanco@chromium.org" ],
+        "Selectors": [ "allan.jensen@digia.com" ],
+        "SoupNetwork": [ "rakuco@webkit.org", "gns@gnome.org", "mrobinson@webkit.org", "danw@gnome.org" ],
+        "StyleChecker": [ "levin@chromium.org", ],
+        "TestFailures": [ "abarth@webkit.org", "dglazkov@chromium.org" ],
+        "TextureMapper" : [ "noam.rosenthal@nokia.com" ],
+        "ThreadingFiles|ThreadingUsage": [ "levin+threading@chromium.org", ],
+        "TouchAdjustment" : [ "allan.jensen@digia.com" ],
+        "V8Bindings|BindingsScripts": [ "abarth@webkit.org", "japhet@chromium.org", "haraken@chromium.org" ],
+        "WTF": [ "benjamin@webkit.org",],
+        "WatchListScript": [ "levin+watchlist@chromium.org", ],
+        "WebGL": [ "dino@apple.com" ],
+        "WebIDL": [ "abarth@webkit.org", "ojan@chromium.org" ],
+        "WebInspectorAPI": [ "timothy@apple.com", "joepeck@webkit.org" ],
+        "WebKitGTKTranslations": [ "gns@gnome.org", "mrobinson@webkit.org" ],
+        "WebSocket": [ "yutak@chromium.org" ],
+        "XSS": [ "dbates@webkit.org" ],
+        "webkitperl": [ "dbates@webkit.org" ],
+        "webkitpy": [ "abarth@webkit.org", "ojan@chromium.org", "dpranke@chromium.org" ],
+    },
+    "MESSAGE_RULES": {
+        "ChromiumPublicApi": [ "Please wait for approval from abarth@webkit.org, dglazkov@chromium.org, "
+                               "fishd@chromium.org, jamesr@chromium.org or tkent@chromium.org before "
+                               "submitting, as this patch contains changes to the Chromium public API. "
+                               "See also https://trac.webkit.org/wiki/ChromiumWebKitAPI." ],
+        "AppleMacPublicApi": [ "Please wait for approval from timothy@apple.com (or another member "
+                               "of the Apple Safari Team) before submitting "
+                               "because this patch contains changes to the Apple Mac "
+                               "WebKit.framework public API.", ],
+        "GtkWebKit2PublicAPI": [ "Thanks for the patch. If this patch contains new public API "
+                                 "please make sure it follows the guidelines for new WebKit2 GTK+ API. "
+                                 "See http://trac.webkit.org/wiki/WebKitGTK/AddingNewWebKit2API", ],
+    },
+}
diff --git a/Tools/Scripts/webkitpy/common/editdistance.py b/Tools/Scripts/webkitpy/common/editdistance.py
new file mode 100644
index 0000000..2eccca8
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/editdistance.py
@@ -0,0 +1,47 @@
+# Copyright (c) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from array import array
+
+
+def edit_distance(str1, str2):
+    unsignedShort = 'h'
+    distances = [array(unsignedShort, (0,) * (len(str2) + 1)) for i in range(0, len(str1) + 1)]
+    # distances[0][0] = 0 since distance between str1[:0] and str2[:0] is 0
+    for i in range(1, len(str1) + 1):
+        distances[i][0] = i  # Distance between str1[:i] and str2[:0] is i
+
+    for j in range(1, len(str2) + 1):
+        distances[0][j] = j  # Distance between str1[:0] and str2[:j] is j
+
+    for i in range(0, len(str1)):
+        for j in range(0, len(str2)):
+            diff = 0 if str1[i] == str2[j] else 1
+            # Deletion, Insertion, Identical / Replacement
+            distances[i + 1][j + 1] = min(distances[i + 1][j] + 1, distances[i][j + 1] + 1, distances[i][j] + diff)
+    return distances[len(str1)][len(str2)]
diff --git a/Tools/Scripts/webkitpy/common/editdistance_unittest.py b/Tools/Scripts/webkitpy/common/editdistance_unittest.py
new file mode 100644
index 0000000..4ae6441
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/editdistance_unittest.py
@@ -0,0 +1,43 @@
+# Copyright (c) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from webkitpy.common.editdistance import edit_distance
+
+
+class EditDistanceTest(unittest.TestCase):
+    def test_edit_distance(self):
+        self.assertEqual(edit_distance('', 'aa'), 2)
+        self.assertEqual(edit_distance('aa', ''), 2)
+        self.assertEqual(edit_distance('a', 'ab'), 1)
+        self.assertEqual(edit_distance('ab', 'a'), 1)
+        self.assertEqual(edit_distance('ab', 'aa'), 1)
+        self.assertEqual(edit_distance('aa', 'ab'), 1)
+        self.assertEqual(edit_distance('abd', 'abcdef'), 3)
+        self.assertEqual(edit_distance('abcdef', 'abd'), 3)
diff --git a/Tools/Scripts/webkitpy/common/find_files.py b/Tools/Scripts/webkitpy/common/find_files.py
new file mode 100644
index 0000000..32ce4d1
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/find_files.py
@@ -0,0 +1,90 @@
+#!/usr/bin/env python
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""This module is used to find files used by run-webkit-tests and
+perftestrunner. It exposes one public function - find() - which takes
+an optional list of paths, optional set of skipped directories and optional
+filter callback.
+
+If a list is passed in, the returned list of files is constrained to those
+found under the paths passed in. i.e. calling find(["LayoutTests/fast"])
+will only return files under that directory.
+
+If a set of skipped directories is passed in, the function will filter out
+the files lying in these directories i.e. find(["LayoutTests"], set(["fast"]))
+will return everything except files in fast subfolder.
+
+If a callback is passed in, it will be called for the each file and the file
+will be included into the result if the callback returns True.
+The callback has to take three arguments: filesystem, dirname and filename."""
+
+
+def find(filesystem, base_dir, paths=None, skipped_directories=None, file_filter=None):
+    """Finds the set of tests under a given list of sub-paths.
+
+    Args:
+      paths: a list of path expressions relative to base_dir
+          to search. Glob patterns are ok, as are path expressions with
+          forward slashes on Windows. If paths is empty, we look at
+          everything under the base_dir.
+    """
+
+    paths = paths or ['*']
+    skipped_directories = skipped_directories or set(['.svn', '_svn'])
+    return _normalized_find(filesystem, _normalize(filesystem, base_dir, paths), skipped_directories, file_filter)
+
+
+def _normalize(filesystem, base_dir, paths):
+    return [filesystem.normpath(filesystem.join(base_dir, path)) for path in paths]
+
+
+def _normalized_find(filesystem, paths, skipped_directories, file_filter):
+    """Finds the set of tests under the list of paths.
+
+    Args:
+      paths: a list of absolute path expressions to search.
+          Glob patterns are ok.
+    """
+    paths_to_walk = set()
+
+    for path in paths:
+        # If there's an * in the name, assume it's a glob pattern.
+        if path.find('*') > -1:
+            filenames = filesystem.glob(path)
+            paths_to_walk.update(filenames)
+        else:
+            paths_to_walk.add(path)
+
+    # FIXME: I'm not sure there's much point in this being a set. A list would probably be faster.
+    all_files = set()
+    for path in paths_to_walk:
+        files = filesystem.files_under(path, skipped_directories, file_filter)
+        all_files.update(set(files))
+
+    return all_files
diff --git a/Tools/Scripts/webkitpy/common/find_files_unittest.py b/Tools/Scripts/webkitpy/common/find_files_unittest.py
new file mode 100644
index 0000000..75beaf0
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/find_files_unittest.py
@@ -0,0 +1,65 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#    * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import sys
+import unittest
+
+from webkitpy.common.system.filesystem import FileSystem
+import find_files
+
+
+class MockWinFileSystem(object):
+    def join(self, *paths):
+        return '\\'.join(paths)
+
+    def normpath(self, path):
+        return path.replace('/', '\\')
+
+
+class TestWinNormalize(unittest.TestCase):
+    def assert_filesystem_normalizes(self, filesystem):
+        self.assertEquals(find_files._normalize(filesystem, "c:\\foo",
+            ['fast/html', 'fast/canvas/*', 'compositing/foo.html']),
+            ['c:\\foo\\fast\html', 'c:\\foo\\fast\canvas\*', 'c:\\foo\compositing\\foo.html'])
+
+    def test_mocked_win(self):
+        # This tests test_files.normalize, using portable behavior emulating
+        # what we think Windows is supposed to do. This test will run on all
+        # platforms.
+        self.assert_filesystem_normalizes(MockWinFileSystem())
+
+    def test_win(self):
+        # This tests the actual windows platform, to ensure we get the same
+        # results that we get in test_mocked_win().
+        if sys.platform != 'win32':
+            return
+        self.assert_filesystem_normalizes(FileSystem())
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/Tools/Scripts/webkitpy/common/host.py b/Tools/Scripts/webkitpy/common/host.py
new file mode 100644
index 0000000..7dd5ad0
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/host.py
@@ -0,0 +1,152 @@
+# Copyright (c) 2010 Google Inc. All rights reserved.
+# Copyright (c) 2009 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import logging
+import os
+import sys
+
+from webkitpy.common.checkout import Checkout
+from webkitpy.common.checkout.scm.detection import SCMDetector
+from webkitpy.common.memoized import memoized
+from webkitpy.common.net import bugzilla, buildbot, web
+from webkitpy.common.net.buildbot.chromiumbuildbot import ChromiumBuildBot
+from webkitpy.common.system.systemhost import SystemHost
+from webkitpy.common.watchlist.watchlistloader import WatchListLoader
+from webkitpy.layout_tests.port.factory import PortFactory
+
+
+_log = logging.getLogger(__name__)
+
+
+class Host(SystemHost):
+    def __init__(self):
+        SystemHost.__init__(self)
+        self.web = web.Web()
+
+        # FIXME: Checkout should own the scm object.
+        self._scm = None
+        self._checkout = None
+
+        # Everything below this line is WebKit-specific and belongs on a higher-level object.
+        self.bugs = bugzilla.Bugzilla()
+        self.buildbot = buildbot.BuildBot()
+
+        # FIXME: Unfortunately Port objects are currently the central-dispatch objects of the NRWT world.
+        # In order to instantiate a port correctly, we have to pass it at least an executive, user, scm, and filesystem
+        # so for now we just pass along the whole Host object.
+        # FIXME: PortFactory doesn't belong on this Host object if Port is going to have a Host (circular dependency).
+        self.port_factory = PortFactory(self)
+
+        self._engage_awesome_locale_hacks()
+
+    # We call this from the Host constructor, as it's one of the
+    # earliest calls made for all webkitpy-based programs.
+    def _engage_awesome_locale_hacks(self):
+        # To make life easier on our non-english users, we override
+        # the locale environment variables inside webkitpy.
+        # If we don't do this, programs like SVN will output localized
+        # messages and svn.py will fail to parse them.
+        # FIXME: We should do these overrides *only* for the subprocesses we know need them!
+        # This hack only works in unix environments.
+        os.environ['LANGUAGE'] = 'en'
+        os.environ['LANG'] = 'en_US.UTF-8'
+        os.environ['LC_MESSAGES'] = 'en_US.UTF-8'
+        os.environ['LC_ALL'] = ''
+
+    # FIXME: This is a horrible, horrible hack for ChromiumWin and should be removed.
+    # Maybe this belongs in SVN in some more generic "find the svn binary" codepath?
+    # Or possibly Executive should have a way to emulate shell path-lookups?
+    # FIXME: Unclear how to test this, since it currently mutates global state on SVN.
+    def _engage_awesome_windows_hacks(self):
+        try:
+            self.executive.run_command(['svn', 'help'])
+        except OSError, e:
+            try:
+                self.executive.run_command(['svn.bat', 'help'])
+                # Chromium Win uses the depot_tools package, which contains a number
+                # of development tools, including Python and svn. Instead of using a
+                # real svn executable, depot_tools indirects via a batch file, called
+                # svn.bat. This batch file allows depot_tools to auto-update the real
+                # svn executable, which is contained in a subdirectory.
+                #
+                # That's all fine and good, except that subprocess.popen can detect
+                # the difference between a real svn executable and batch file when we
+                # don't provide use shell=True. Rather than use shell=True on Windows,
+                # We hack the svn.bat name into the SVN class.
+                _log.debug('Engaging svn.bat Windows hack.')
+                from webkitpy.common.checkout.scm.svn import SVN
+                SVN.executable_name = 'svn.bat'
+            except OSError, e:
+                _log.debug('Failed to engage svn.bat Windows hack.')
+        try:
+            self.executive.run_command(['git', 'help'])
+        except OSError, e:
+            try:
+                self.executive.run_command(['git.bat', 'help'])
+                # Chromium Win uses the depot_tools package, which contains a number
+                # of development tools, including Python and git. Instead of using a
+                # real git executable, depot_tools indirects via a batch file, called
+                # git.bat. This batch file allows depot_tools to auto-update the real
+                # git executable, which is contained in a subdirectory.
+                #
+                # That's all fine and good, except that subprocess.popen can detect
+                # the difference between a real git executable and batch file when we
+                # don't provide use shell=True. Rather than use shell=True on Windows,
+                # We hack the git.bat name into the SVN class.
+                _log.debug('Engaging git.bat Windows hack.')
+                from webkitpy.common.checkout.scm.git import Git
+                Git.executable_name = 'git.bat'
+            except OSError, e:
+                _log.debug('Failed to engage git.bat Windows hack.')
+
+    def initialize_scm(self, patch_directories=None):
+        if sys.platform == "win32":
+            self._engage_awesome_windows_hacks()
+        detector = SCMDetector(self.filesystem, self.executive)
+        self._scm = detector.default_scm(patch_directories)
+        self._checkout = Checkout(self.scm())
+
+    def scm(self):
+        return self._scm
+
+    def checkout(self):
+        return self._checkout
+
+    def buildbot_for_builder_name(self, name):
+        if self.port_factory.get_from_builder_name(name).is_chromium():
+            return self.chromium_buildbot()
+        return self.buildbot
+
+    @memoized
+    def chromium_buildbot(self):
+        return ChromiumBuildBot()
+
+    @memoized
+    def watch_list(self):
+        return WatchListLoader(self.filesystem).load()
diff --git a/Tools/Scripts/webkitpy/common/host_mock.py b/Tools/Scripts/webkitpy/common/host_mock.py
new file mode 100644
index 0000000..8b508bf
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/host_mock.py
@@ -0,0 +1,81 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#    * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from webkitpy.common.checkout.checkout_mock import MockCheckout
+from webkitpy.common.checkout.scm.scm_mock import MockSCM
+from webkitpy.common.net.bugzilla.bugzilla_mock import MockBugzilla
+from webkitpy.common.net.buildbot.buildbot_mock import MockBuildBot
+from webkitpy.common.net.web_mock import MockWeb
+from webkitpy.common.system.systemhost_mock import MockSystemHost
+from webkitpy.common.watchlist.watchlist_mock import MockWatchList
+
+# New-style ports need to move down into webkitpy.common.
+from webkitpy.layout_tests.port.factory import PortFactory
+from webkitpy.layout_tests.port.test import add_unit_tests_to_mock_filesystem
+
+
+class MockHost(MockSystemHost):
+    def __init__(self, log_executive=False, executive_throws_when_run=None, initialize_scm_by_default=True):
+        MockSystemHost.__init__(self, log_executive, executive_throws_when_run)
+        add_unit_tests_to_mock_filesystem(self.filesystem)
+        self.web = MockWeb()
+
+        self._checkout = MockCheckout()
+        self._scm = None
+        # FIXME: we should never initialize the SCM by default, since the real
+        # object doesn't either. This has caused at least one bug (see bug 89498).
+        if initialize_scm_by_default:
+            self.initialize_scm()
+        self.bugs = MockBugzilla()
+        self.buildbot = MockBuildBot()
+        self._chromium_buildbot = MockBuildBot()
+
+        # Note: We're using a real PortFactory here.  Tests which don't wish to depend
+        # on the list of known ports should override this with a MockPortFactory.
+        self.port_factory = PortFactory(self)
+
+        self._watch_list = MockWatchList()
+
+    def initialize_scm(self, patch_directories=None):
+        self._scm = MockSCM(filesystem=self.filesystem, executive=self.executive)
+        # Various pieces of code (wrongly) call filesystem.chdir(checkout_root).
+        # Making the checkout_root exist in the mock filesystem makes that chdir not raise.
+        self.filesystem.maybe_make_directory(self._scm.checkout_root)
+
+    def scm(self):
+        return self._scm
+
+    def checkout(self):
+        return self._checkout
+
+    def chromium_buildbot(self):
+        return self._chromium_buildbot
+
+    def watch_list(self):
+        return self._watch_list
+
diff --git a/Tools/Scripts/webkitpy/common/lru_cache.py b/Tools/Scripts/webkitpy/common/lru_cache.py
new file mode 100644
index 0000000..4178d0f
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/lru_cache.py
@@ -0,0 +1,135 @@
+#!/usr/bin/env python
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+class Node():
+    def __init__(self, key, value):
+        self.key = key
+        self.value = value
+        self.prev = None
+        self.next = None
+
+
+class LRUCache():
+    """An implementation of Least Recently Used (LRU) Cache."""
+
+    def __init__(self, capacity):
+        """Initializes a lru cache with the given capacity.
+
+        Args:
+            capacity: The capacity of the cache.
+        """
+        assert capacity > 0, "capacity (%s) must be greater than zero." % capacity
+        self._first = None
+        self._last = None
+        self._dict = {}
+        self._capacity = capacity
+
+    def __setitem__(self, key, value):
+        if key in self._dict:
+            self.__delitem__(key)
+        if not self._first:
+            self._one_node(key, value)
+            return
+        if len(self._dict) >= self._capacity:
+            del self._dict[self._last.key]
+            if self._capacity == 1:
+                self._one_node(key, value)
+                return
+            self._last = self._last.next
+            self._last.prev = None
+        node = Node(key, value)
+        node.prev = self._first
+        self._first.next = node
+        self._first = node
+        self._dict[key] = node
+
+    def _one_node(self, key, value):
+        node = Node(key, value)
+        self._dict[key] = node
+        self._first = node
+        self._last = node
+
+    def __getitem__(self, key):
+        if not self._first:
+            raise KeyError(str(key))
+        if self._first.key == key:
+            return self._first.value
+
+        if self._last.key == key:
+            next_last = self._last.next
+            next_last.prev = None
+            next_first = self._last
+            next_first.prev = self._first
+            next_first.next = None
+            self._first.next = next_first
+            self._first = next_first
+            self._last = next_last
+            return self._first.value
+
+        node = self._dict[key]
+        node.next.prev = node.prev
+        node.prev.next = node.next
+        node.prev = self._first
+        node.next = None
+        self._first.next = node
+        self._first = node
+        return self._first.value
+
+    def __delitem__(self, key):
+        node = self._dict[key]
+        del self._dict[key]
+        if self._first is self._last:
+            self._last = None
+            self._first = None
+            return
+        if self._first is node:
+            self._first = node.prev
+            self._first.next = None
+            return
+        if self._last is node:
+            self._last = node.next
+            self._last.prev = None
+            return
+        node.next.prev = node.prev
+        node.prev.next = node.next
+
+    def __len__(self):
+        return len(self._dict)
+
+    def __contains__(self, key):
+        return key in self._dict
+
+    def __iter__(self):
+        return iter(self._dict)
+
+    def items(self):
+        return [(key, node.value) for key, node in self._dict.items()]
+
+    def values(self):
+        return [node.value for node in self._dict.values()]
+
+    def keys(self):
+        return self._dict.keys()
diff --git a/Tools/Scripts/webkitpy/common/lru_cache_unittest.py b/Tools/Scripts/webkitpy/common/lru_cache_unittest.py
new file mode 100644
index 0000000..44a09e6
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/lru_cache_unittest.py
@@ -0,0 +1,98 @@
+#!/usr/bin/env python
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+import unittest
+
+from webkitpy.common import lru_cache
+
+
+class LRUCacheTest(unittest.TestCase):
+
+    def setUp(self):
+        self.lru = lru_cache.LRUCache(3)
+        self.lru['key_1'] = 'item_1'
+        self.lru['key_2'] = 'item_2'
+        self.lru['key_3'] = 'item_3'
+
+        self.lru2 = lru_cache.LRUCache(1)
+        self.lru2['key_1'] = 'item_1'
+
+    def test_items(self):
+        self.assertEqual(set(self.lru.items()), set([('key_1', 'item_1'), ('key_3', 'item_3'), ('key_2', 'item_2')]))
+
+    def test_put(self):
+        self.lru['key_4'] = 'item_4'
+        self.assertEqual(set(self.lru.items()), set([('key_4', 'item_4'), ('key_3', 'item_3'), ('key_2', 'item_2')]))
+
+    def test_update(self):
+        self.lru['key_1']
+        self.lru['key_5'] = 'item_5'
+        self.assertEqual(set(self.lru.items()), set([('key_1', 'item_1'), ('key_3', 'item_3'), ('key_5', 'item_5')]))
+
+    def test_keys(self):
+        self.assertEqual(set(self.lru.keys()), set(['key_1', 'key_2', 'key_3']))
+
+    def test_delete(self):
+        del self.lru['key_1']
+        self.assertFalse('key_1' in self.lru)
+
+    def test_contain(self):
+        self.assertTrue('key_1' in self.lru)
+        self.assertFalse('key_4' in self.lru)
+
+    def test_values(self):
+        self.assertEqual(set(self.lru.values()), set(['item_1', 'item_2', 'item_3']))
+
+    def test_len(self):
+        self.assertEqual(len(self.lru), 3)
+
+    def test_size_one_pop(self):
+        self.lru2['key_2'] = 'item_2'
+        self.assertEqual(self.lru2.keys(), ['key_2'])
+
+    def test_size_one_delete(self):
+        del self.lru2['key_1']
+        self.assertFalse('key_1' in self.lru2)
+
+    def test_pop_error(self):
+        self.assertRaises(KeyError, self.lru2.__getitem__, 'key_2')
+        del self.lru2['key_1']
+        self.assertRaises(KeyError, self.lru2.__getitem__, 'key_2')
+
+    def test_get_middle_item(self):
+        self.lru['key_2']
+        self.lru['key_4'] = 'item_4'
+        self.lru['key_5'] = 'item_5'
+        self.assertEqual(set(self.lru.keys()), set(['key_2', 'key_4', 'key_5']))
+
+    def test_set_again(self):
+        self.lru['key_1'] = 'item_4'
+        self.assertEqual(set(self.lru.items()), set([('key_1', 'item_4'), ('key_3', 'item_3'), ('key_2', 'item_2')]))
+
+
+if __name__ == "__main__":
+    unittest.main()
diff --git a/Tools/Scripts/webkitpy/common/memoized.py b/Tools/Scripts/webkitpy/common/memoized.py
new file mode 100644
index 0000000..dc844a5
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/memoized.py
@@ -0,0 +1,55 @@
+# Copyright (c) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# Python does not (yet) seem to provide automatic memoization.  So we've
+# written a small decorator to do so.
+
+import functools
+
+
+class memoized(object):
+    def __init__(self, function):
+        self._function = function
+        self._results_cache = {}
+
+    def __call__(self, *args):
+        try:
+            return self._results_cache[args]
+        except KeyError:
+            # If we didn't find the args in our cache, call and save the results.
+            result = self._function(*args)
+            self._results_cache[args] = result
+            return result
+        # FIXME: We may need to handle TypeError here in the case
+        # that "args" is not a valid dictionary key.
+
+    # Use python "descriptor" protocol __get__ to appear
+    # invisible during property access.
+    def __get__(self, instance, owner):
+        # Return a function partial with obj already bound as self.
+        return functools.partial(self.__call__, instance)
diff --git a/Tools/Scripts/webkitpy/common/memoized_unittest.py b/Tools/Scripts/webkitpy/common/memoized_unittest.py
new file mode 100644
index 0000000..dd7c793
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/memoized_unittest.py
@@ -0,0 +1,65 @@
+# Copyright (c) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from webkitpy.common.memoized import memoized
+
+
+class _TestObject(object):
+    def __init__(self):
+        self.callCount = 0
+
+    @memoized
+    def memoized_add(self, argument):
+        """testing docstring"""
+        self.callCount += 1
+        if argument is None:
+            return None  # Avoid the TypeError from None + 1
+        return argument + 1
+
+
+class MemoizedTest(unittest.TestCase):
+    def test_caching(self):
+        test = _TestObject()
+        test.callCount = 0
+        self.assertEqual(test.memoized_add(1), 2)
+        self.assertEqual(test.callCount, 1)
+        self.assertEqual(test.memoized_add(1), 2)
+        self.assertEqual(test.callCount, 1)
+
+        # Validate that callCount is working as expected.
+        self.assertEqual(test.memoized_add(2), 3)
+        self.assertEqual(test.callCount, 2)
+
+    def test_tearoff(self):
+        test = _TestObject()
+        # Make sure that get()/tear-offs work:
+        tearoff = test.memoized_add
+        self.assertEqual(tearoff(4), 5)
+        self.assertEqual(test.callCount, 1)
diff --git a/Tools/Scripts/webkitpy/common/message_pool.py b/Tools/Scripts/webkitpy/common/message_pool.py
new file mode 100644
index 0000000..2e1e85e
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/message_pool.py
@@ -0,0 +1,324 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Module for handling messages and concurrency for run-webkit-tests
+and test-webkitpy. This module follows the design for multiprocessing.Pool
+and concurrency.futures.ProcessPoolExecutor, with the following differences:
+
+* Tasks are executed in stateful subprocesses via objects that implement the
+  Worker interface - this allows the workers to share state across tasks.
+* The pool provides an asynchronous event-handling interface so the caller
+  may receive events as tasks are processed.
+
+If you don't need these features, use multiprocessing.Pool or concurrency.futures
+intead.
+
+"""
+
+import cPickle
+import logging
+import multiprocessing
+import Queue
+import sys
+import time
+import traceback
+
+
+from webkitpy.common.host import Host
+from webkitpy.common.system import stack_utils
+
+
+_log = logging.getLogger(__name__)
+
+
+def get(caller, worker_factory, num_workers, worker_startup_delay_secs=0.0, host=None):
+    """Returns an object that exposes a run() method that takes a list of test shards and runs them in parallel."""
+    return _MessagePool(caller, worker_factory, num_workers, worker_startup_delay_secs, host)
+
+
+class _MessagePool(object):
+    def __init__(self, caller, worker_factory, num_workers, worker_startup_delay_secs=0.0, host=None):
+        self._caller = caller
+        self._worker_factory = worker_factory
+        self._num_workers = num_workers
+        self._worker_startup_delay_secs = worker_startup_delay_secs
+        self._workers = []
+        self._workers_stopped = set()
+        self._host = host
+        self._name = 'manager'
+        self._running_inline = (self._num_workers == 1)
+        if self._running_inline:
+            self._messages_to_worker = Queue.Queue()
+            self._messages_to_manager = Queue.Queue()
+        else:
+            self._messages_to_worker = multiprocessing.Queue()
+            self._messages_to_manager = multiprocessing.Queue()
+
+    def __enter__(self):
+        return self
+
+    def __exit__(self, exc_type, exc_value, exc_traceback):
+        self._close()
+        return False
+
+    def run(self, shards):
+        """Posts a list of messages to the pool and waits for them to complete."""
+        for message in shards:
+            self._messages_to_worker.put(_Message(self._name, message[0], message[1:], from_user=True, logs=()))
+
+        for _ in xrange(self._num_workers):
+            self._messages_to_worker.put(_Message(self._name, 'stop', message_args=(), from_user=False, logs=()))
+
+        self.wait()
+
+    def _start_workers(self):
+        assert not self._workers
+        self._workers_stopped = set()
+        host = None
+        if self._running_inline or self._can_pickle(self._host):
+            host = self._host
+
+        for worker_number in xrange(self._num_workers):
+            worker = _Worker(host, self._messages_to_manager, self._messages_to_worker, self._worker_factory, worker_number, self._running_inline, self if self._running_inline else None, self._worker_log_level())
+            self._workers.append(worker)
+            worker.start()
+            if self._worker_startup_delay_secs:
+                time.sleep(self._worker_startup_delay_secs)
+
+    def _worker_log_level(self):
+        log_level = logging.NOTSET
+        for handler in logging.root.handlers:
+            if handler.level != logging.NOTSET:
+                if log_level == logging.NOTSET:
+                    log_level = handler.level
+                else:
+                    log_level = min(log_level, handler.level)
+        return log_level
+
+    def wait(self):
+        try:
+            self._start_workers()
+            if self._running_inline:
+                self._workers[0].run()
+                self._loop(block=False)
+            else:
+                self._loop(block=True)
+        finally:
+            self._close()
+
+    def _close(self):
+        for worker in self._workers:
+            if worker.is_alive():
+                worker.terminate()
+                worker.join()
+        self._workers = []
+        if not self._running_inline:
+            # FIXME: This is a hack to get multiprocessing to not log tracebacks during shutdown :(.
+            multiprocessing.util._exiting = True
+            if self._messages_to_worker:
+                self._messages_to_worker.close()
+                self._messages_to_worker = None
+            if self._messages_to_manager:
+                self._messages_to_manager.close()
+                self._messages_to_manager = None
+
+    def _log_messages(self, messages):
+        for message in messages:
+            logging.root.handle(message)
+
+    def _handle_done(self, source):
+        self._workers_stopped.add(source)
+
+    @staticmethod
+    def _handle_worker_exception(source, exception_type, exception_value, _):
+        if exception_type == KeyboardInterrupt:
+            raise exception_type(exception_value)
+        raise WorkerException(str(exception_value))
+
+    def _can_pickle(self, host):
+        try:
+            cPickle.dumps(host)
+            return True
+        except TypeError:
+            return False
+
+    def _loop(self, block):
+        try:
+            while True:
+                if len(self._workers_stopped) == len(self._workers):
+                    block = False
+                message = self._messages_to_manager.get(block)
+                self._log_messages(message.logs)
+                if message.from_user:
+                    self._caller.handle(message.name, message.src, *message.args)
+                    continue
+                method = getattr(self, '_handle_' + message.name)
+                assert method, 'bad message %s' % repr(message)
+                method(message.src, *message.args)
+        except Queue.Empty:
+            pass
+
+
+class WorkerException(Exception):
+    """Raised when we receive an unexpected/unknown exception from a worker."""
+    pass
+
+
+class _Message(object):
+    def __init__(self, src, message_name, message_args, from_user, logs):
+        self.src = src
+        self.name = message_name
+        self.args = message_args
+        self.from_user = from_user
+        self.logs = logs
+
+    def __repr__(self):
+        return '_Message(src=%s, name=%s, args=%s, from_user=%s, logs=%s)' % (self.src, self.name, self.args, self.from_user, self.logs)
+
+
+class _Worker(multiprocessing.Process):
+    def __init__(self, host, messages_to_manager, messages_to_worker, worker_factory, worker_number, running_inline, manager, log_level):
+        super(_Worker, self).__init__()
+        self.host = host
+        self.worker_number = worker_number
+        self.name = 'worker/%d' % worker_number
+        self.log_messages = []
+        self.log_level = log_level
+        self._running_inline = running_inline
+        self._manager = manager
+
+        self._messages_to_manager = messages_to_manager
+        self._messages_to_worker = messages_to_worker
+        self._worker = worker_factory(self)
+        self._logger = None
+        self._log_handler = None
+
+    def terminate(self):
+        if self._worker:
+            if hasattr(self._worker, 'stop'):
+                self._worker.stop()
+            self._worker = None
+        if self.is_alive():
+            super(_Worker, self).terminate()
+
+    def _close(self):
+        if self._log_handler and self._logger:
+            self._logger.removeHandler(self._log_handler)
+        self._log_handler = None
+        self._logger = None
+
+    def start(self):
+        if not self._running_inline:
+            super(_Worker, self).start()
+
+    def run(self):
+        if not self.host:
+            self.host = Host()
+        if not self._running_inline:
+            self._set_up_logging()
+
+        worker = self._worker
+        exception_msg = ""
+        _log.debug("%s starting" % self.name)
+
+        try:
+            if hasattr(worker, 'start'):
+                worker.start()
+            while True:
+                message = self._messages_to_worker.get()
+                if message.from_user:
+                    worker.handle(message.name, message.src, *message.args)
+                    self._yield_to_manager()
+                else:
+                    assert message.name == 'stop', 'bad message %s' % repr(message)
+                    break
+
+            _log.debug("%s exiting" % self.name)
+        except Queue.Empty:
+            assert False, '%s: ran out of messages in worker queue.' % self.name
+        except KeyboardInterrupt, e:
+            self._raise(sys.exc_info())
+        except Exception, e:
+            self._raise(sys.exc_info())
+        finally:
+            try:
+                if hasattr(worker, 'stop'):
+                    worker.stop()
+            finally:
+                self._post(name='done', args=(), from_user=False)
+            self._close()
+
+    def post(self, name, *args):
+        self._post(name, args, from_user=True)
+        self._yield_to_manager()
+
+    def _yield_to_manager(self):
+        if self._running_inline:
+            self._manager._loop(block=False)
+
+    def _post(self, name, args, from_user):
+        log_messages = self.log_messages
+        self.log_messages = []
+        self._messages_to_manager.put(_Message(self.name, name, args, from_user, log_messages))
+
+    def _raise(self, exc_info):
+        exception_type, exception_value, exception_traceback = exc_info
+        if self._running_inline:
+            raise exception_type, exception_value, exception_traceback
+
+        if exception_type == KeyboardInterrupt:
+            _log.debug("%s: interrupted, exiting" % self.name)
+            stack_utils.log_traceback(_log.debug, exception_traceback)
+        else:
+            _log.error("%s: %s('%s') raised:" % (self.name, exception_value.__class__.__name__, str(exception_value)))
+            stack_utils.log_traceback(_log.error, exception_traceback)
+        # Since tracebacks aren't picklable, send the extracted stack instead.
+        stack = traceback.extract_tb(exception_traceback)
+        self._post(name='worker_exception', args=(exception_type, exception_value, stack), from_user=False)
+
+    def _set_up_logging(self):
+        self._logger = logging.getLogger()
+
+        # The unix multiprocessing implementation clones any log handlers into the child process,
+        # so we remove them to avoid duplicate logging.
+        for h in self._logger.handlers:
+            self._logger.removeHandler(h)
+
+        self._log_handler = _WorkerLogHandler(self)
+        self._logger.addHandler(self._log_handler)
+        self._logger.setLevel(self.log_level)
+
+
+class _WorkerLogHandler(logging.Handler):
+    def __init__(self, worker):
+        logging.Handler.__init__(self)
+        self._worker = worker
+        self.setLevel(worker.log_level)
+
+    def emit(self, record):
+        self._worker.log_messages.append(record)
diff --git a/Tools/Scripts/webkitpy/common/multiprocessing_bootstrap.py b/Tools/Scripts/webkitpy/common/multiprocessing_bootstrap.py
new file mode 100755
index 0000000..1189776
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/multiprocessing_bootstrap.py
@@ -0,0 +1,70 @@
+#!/usr/bin/env python
+# Copyright (c) 2011 Google Inc. All rights reserved.
+# Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org)
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""In order for the multiprocessing module to spawn children correctly on
+Windows, we need to be running a Python module that can be imported
+(which means a file in sys.path that ends in .py). In addition, we need to
+ensure that sys.path / PYTHONPATH is set and propagating correctly.
+
+This module enforces that."""
+
+import os
+import subprocess
+import sys
+
+from webkitpy.common import version_check   # 'unused import' pylint: disable=W0611
+
+
+def run(*parts):
+    up = os.path.dirname
+    script_dir = up(up(up(os.path.abspath(__file__))))
+    env = os.environ
+    if 'PYTHONPATH' in env:
+        if script_dir not in env['PYTHONPATH']:
+            env['PYTHONPATH'] = env['PYTHONPATH'] + os.pathsep + script_dir
+    else:
+        env['PYTHONPATH'] = script_dir
+    module_path = os.path.join(script_dir, *parts)
+    cmd = [sys.executable, module_path] + sys.argv[1:]
+
+    # Wrap processes in the jhbuild environment so DRT or WKTR
+    # doesn't need to do it and their process id as reported by
+    # subprocess.Popen is not jhbuild's.
+    if '--gtk' in sys.argv[1:] and os.path.exists(os.path.join(script_dir, '..', '..', 'WebKitBuild', 'Dependencies')):
+        cmd.insert(1, os.path.join(script_dir, '..', 'gtk', 'run-with-jhbuild'))
+
+    proc = subprocess.Popen(cmd, env=env)
+    try:
+        proc.wait()
+    except KeyboardInterrupt:
+        # We need a second wait in order to make sure the subprocess exits fully.
+        # FIXME: It would be nice if we could put a timeout on this.
+        proc.wait()
+    sys.exit(proc.returncode)
diff --git a/Tools/Scripts/webkitpy/common/net/__init__.py b/Tools/Scripts/webkitpy/common/net/__init__.py
new file mode 100644
index 0000000..ef65bee
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/net/__init__.py
@@ -0,0 +1 @@
+# Required for Python to search this directory for module files
diff --git a/Tools/Scripts/webkitpy/common/net/bugzilla/__init__.py b/Tools/Scripts/webkitpy/common/net/bugzilla/__init__.py
new file mode 100644
index 0000000..c427b18
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/net/bugzilla/__init__.py
@@ -0,0 +1,7 @@
+# Required for Python to search this directory for module files
+
+# We only export public API here.
+from .bugzilla import Bugzilla
+# Unclear if Bug and Attachment need to be public classes.
+from .bug import Bug
+from .attachment import Attachment
diff --git a/Tools/Scripts/webkitpy/common/net/bugzilla/attachment.py b/Tools/Scripts/webkitpy/common/net/bugzilla/attachment.py
new file mode 100644
index 0000000..6e10d65
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/net/bugzilla/attachment.py
@@ -0,0 +1,118 @@
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Copyright (c) 2009 Apple Inc. All rights reserved.
+# Copyright (c) 2010 Research In Motion Limited. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from webkitpy.common.memoized import memoized
+from webkitpy.common.system.deprecated_logging import log
+
+
+class Attachment(object):
+
+    rollout_preamble = "ROLLOUT of r"
+
+    def __init__(self, attachment_dictionary, bug):
+        self._attachment_dictionary = attachment_dictionary
+        self._bug = bug
+        # FIXME: These should be replaced with @memoized after updating mocks.
+        self._reviewer = None
+        self._committer = None
+
+    def _bugzilla(self):
+        return self._bug._bugzilla
+
+    def id(self):
+        return int(self._attachment_dictionary.get("id"))
+
+    @memoized
+    def attacher(self):
+        return self._bugzilla().committers.contributor_by_email(self.attacher_email())
+
+    def attacher_email(self):
+        return self._attachment_dictionary.get("attacher_email")
+
+    def bug(self):
+        return self._bug
+
+    def bug_id(self):
+        return int(self._attachment_dictionary.get("bug_id"))
+
+    def is_patch(self):
+        return not not self._attachment_dictionary.get("is_patch")
+
+    def is_obsolete(self):
+        return not not self._attachment_dictionary.get("is_obsolete")
+
+    def is_rollout(self):
+        return self.name().startswith(self.rollout_preamble)
+
+    def name(self):
+        return self._attachment_dictionary.get("name")
+
+    def attach_date(self):
+        return self._attachment_dictionary.get("attach_date")
+
+    def review(self):
+        return self._attachment_dictionary.get("review")
+
+    def commit_queue(self):
+        return self._attachment_dictionary.get("commit-queue")
+
+    def url(self):
+        # FIXME: This should just return
+        # self._bugzilla().attachment_url_for_id(self.id()). scm_unittest.py
+        # depends on the current behavior.
+        return self._attachment_dictionary.get("url")
+
+    def contents(self):
+        # FIXME: We shouldn't be grabbing at _bugzilla.
+        return self._bug._bugzilla.fetch_attachment_contents(self.id())
+
+    def _validate_flag_value(self, flag):
+        email = self._attachment_dictionary.get("%s_email" % flag)
+        if not email:
+            return None
+        # FIXME: This is not a robust way to call committer_by_email
+        committer = getattr(self._bugzilla().committers,
+                            "%s_by_email" % flag)(email)
+        if committer:
+            return committer
+        log("Warning, attachment %s on bug %s has invalid %s (%s)" % (
+                 self._attachment_dictionary['id'],
+                 self._attachment_dictionary['bug_id'], flag, email))
+
+    # FIXME: These could use @memoized like attacher(), but unit tests would need updates.
+    def reviewer(self):
+        if not self._reviewer:
+            self._reviewer = self._validate_flag_value("reviewer")
+        return self._reviewer
+
+    def committer(self):
+        if not self._committer:
+            self._committer = self._validate_flag_value("committer")
+        return self._committer
diff --git a/Tools/Scripts/webkitpy/common/net/bugzilla/bug.py b/Tools/Scripts/webkitpy/common/net/bugzilla/bug.py
new file mode 100644
index 0000000..4bf8ec6
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/net/bugzilla/bug.py
@@ -0,0 +1,125 @@
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Copyright (c) 2009 Apple Inc. All rights reserved.
+# Copyright (c) 2010 Research In Motion Limited. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from .attachment import Attachment
+
+
+class Bug(object):
+    # FIXME: This class is kinda a hack for now.  It exists so we have one
+    # place to hold bug logic, even if much of the code deals with
+    # dictionaries still.
+
+    def __init__(self, bug_dictionary, bugzilla):
+        self.bug_dictionary = bug_dictionary
+        self._bugzilla = bugzilla
+
+    def id(self):
+        return self.bug_dictionary["id"]
+
+    def title(self):
+        # FIXME: Do we need to HTML unescape the title?
+        return self.bug_dictionary["title"]
+
+    def reporter_email(self):
+        return self.bug_dictionary["reporter_email"]
+
+    def assigned_to_email(self):
+        return self.bug_dictionary["assigned_to_email"]
+
+    def cc_emails(self):
+        return self.bug_dictionary["cc_emails"]
+
+    # FIXME: This information should be stored in some sort of webkit_config.py instead of here.
+    unassigned_emails = frozenset([
+        "webkit-unassigned@lists.webkit.org",
+        "webkit-qt-unassigned@trolltech.com",
+    ])
+
+    def is_unassigned(self):
+        return self.assigned_to_email() in self.unassigned_emails
+
+    def status(self):
+        return self.bug_dictionary["bug_status"]
+
+    # Bugzilla has many status states we don't really use in WebKit:
+    # https://bugs.webkit.org/page.cgi?id=fields.html#status
+    _open_states = ["UNCONFIRMED", "NEW", "ASSIGNED", "REOPENED"]
+    _closed_states = ["RESOLVED", "VERIFIED", "CLOSED"]
+
+    def is_open(self):
+        return self.status() in self._open_states
+
+    def is_closed(self):
+        return not self.is_open()
+
+    def duplicate_of(self):
+        return self.bug_dictionary.get('dup_id', None)
+
+    # Rarely do we actually want obsolete attachments
+    def attachments(self, include_obsolete=False):
+        attachments = self.bug_dictionary["attachments"]
+        if not include_obsolete:
+            attachments = filter(lambda attachment:
+                                 not attachment["is_obsolete"], attachments)
+        return [Attachment(attachment, self) for attachment in attachments]
+
+    def patches(self, include_obsolete=False):
+        return [patch for patch in self.attachments(include_obsolete)
+                                   if patch.is_patch()]
+
+    def unreviewed_patches(self):
+        return [patch for patch in self.patches() if patch.review() == "?"]
+
+    def reviewed_patches(self, include_invalid=False):
+        patches = [patch for patch in self.patches() if patch.review() == "+"]
+        if include_invalid:
+            return patches
+        # Checking reviewer() ensures that it was both reviewed and has a valid
+        # reviewer.
+        return filter(lambda patch: patch.reviewer(), patches)
+
+    def commit_queued_patches(self, include_invalid=False):
+        patches = [patch for patch in self.patches()
+                                      if patch.commit_queue() == "+"]
+        if include_invalid:
+            return patches
+        # Checking committer() ensures that it was both commit-queue+'d and has
+        # a valid committer.
+        return filter(lambda patch: patch.committer(), patches)
+
+    def comments(self):
+        return self.bug_dictionary["comments"]
+
+    def is_in_comments(self, message):
+        for comment in self.comments():
+            if message in comment["text"]:
+                return True
+        return False
+
diff --git a/Tools/Scripts/webkitpy/common/net/bugzilla/bug_unittest.py b/Tools/Scripts/webkitpy/common/net/bugzilla/bug_unittest.py
new file mode 100644
index 0000000..f20c601
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/net/bugzilla/bug_unittest.py
@@ -0,0 +1,47 @@
+# Copyright (C) 2009 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#    * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from .bug import Bug
+
+
+class BugTest(unittest.TestCase):
+    def test_is_unassigned(self):
+        for email in Bug.unassigned_emails:
+            bug = Bug({"assigned_to_email": email}, bugzilla=None)
+            self.assertTrue(bug.is_unassigned())
+        bug = Bug({"assigned_to_email": "test@test.com"}, bugzilla=None)
+        self.assertFalse(bug.is_unassigned())
+
+    def test_is_in_comments(self):
+        bug = Bug({"comments": [{"text": "Message1."},
+                                {"text": "Message2. Message3. Message4."}, ], },
+                  bugzilla=None)
+        self.assertTrue(bug.is_in_comments("Message3."))
+        self.assertFalse(bug.is_in_comments("Message."))
diff --git a/Tools/Scripts/webkitpy/common/net/bugzilla/bugzilla.py b/Tools/Scripts/webkitpy/common/net/bugzilla/bugzilla.py
new file mode 100644
index 0000000..651e1b3
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/net/bugzilla/bugzilla.py
@@ -0,0 +1,856 @@
+# Copyright (c) 2011 Google Inc. All rights reserved.
+# Copyright (c) 2009 Apple Inc. All rights reserved.
+# Copyright (c) 2010 Research In Motion Limited. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# WebKit's Python module for interacting with Bugzilla
+
+import mimetypes
+import re
+import StringIO
+import socket
+import urllib
+
+from datetime import datetime # used in timestamp()
+
+from .attachment import Attachment
+from .bug import Bug
+
+from webkitpy.common.system.deprecated_logging import log
+from webkitpy.common.config import committers
+import webkitpy.common.config.urls as config_urls
+from webkitpy.common.net.credentials import Credentials
+from webkitpy.common.system.user import User
+from webkitpy.thirdparty.BeautifulSoup import BeautifulSoup, BeautifulStoneSoup, SoupStrainer
+
+
+class EditUsersParser(object):
+    def __init__(self):
+        self._group_name_to_group_string_cache = {}
+
+    def _login_and_uid_from_row(self, row):
+        first_cell = row.find("td")
+        # The first row is just headers, we skip it.
+        if not first_cell:
+            return None
+        # When there were no results, we have a fake "<none>" entry in the table.
+        if first_cell.find(text="<none>"):
+            return None
+        # Otherwise the <td> contains a single <a> which contains the login name or a single <i> with the string "<none>".
+        anchor_tag = first_cell.find("a")
+        login = unicode(anchor_tag.string).strip()
+        user_id = int(re.search(r"userid=(\d+)", str(anchor_tag['href'])).group(1))
+        return (login, user_id)
+
+    def login_userid_pairs_from_edit_user_results(self, results_page):
+        soup = BeautifulSoup(results_page, convertEntities=BeautifulStoneSoup.HTML_ENTITIES)
+        results_table = soup.find(id="admin_table")
+        login_userid_pairs = [self._login_and_uid_from_row(row) for row in results_table('tr')]
+        # Filter out None from the logins.
+        return filter(lambda pair: bool(pair), login_userid_pairs)
+
+    def _group_name_and_string_from_row(self, row):
+        label_element = row.find('label')
+        group_string = unicode(label_element['for'])
+        group_name = unicode(label_element.find('strong').string).rstrip(':')
+        return (group_name, group_string)
+
+    def user_dict_from_edit_user_page(self, page):
+        soup = BeautifulSoup(page, convertEntities=BeautifulStoneSoup.HTML_ENTITIES)
+        user_table = soup.find("table", {'class': 'main'})
+        user_dict = {}
+        for row in user_table('tr'):
+            label_element = row.find('label')
+            if not label_element:
+                continue  # This must not be a row we know how to parse.
+            if row.find('table'):
+                continue  # Skip the <tr> holding the groups table.
+
+            key = label_element['for']
+            if "group" in key:
+                key = "groups"
+                value = user_dict.get('groups', set())
+                # We must be parsing a "tr" inside the inner group table.
+                (group_name, _) = self._group_name_and_string_from_row(row)
+                if row.find('input', {'type': 'checkbox', 'checked': 'checked'}):
+                    value.add(group_name)
+            else:
+                value = unicode(row.find('td').string).strip()
+            user_dict[key] = value
+        return user_dict
+
+    def _group_rows_from_edit_user_page(self, edit_user_page):
+        soup = BeautifulSoup(edit_user_page, convertEntities=BeautifulSoup.HTML_ENTITIES)
+        return soup('td', {'class': 'groupname'})
+
+    def group_string_from_name(self, edit_user_page, group_name):
+        # Bugzilla uses "group_NUMBER" strings, which may be different per install
+        # so we just look them up once and cache them.
+        if not self._group_name_to_group_string_cache:
+            rows = self._group_rows_from_edit_user_page(edit_user_page)
+            name_string_pairs = map(self._group_name_and_string_from_row, rows)
+            self._group_name_to_group_string_cache = dict(name_string_pairs)
+        return self._group_name_to_group_string_cache[group_name]
+
+
+def timestamp():
+    return datetime.now().strftime("%Y%m%d%H%M%S")
+
+
+# A container for all of the logic for making and parsing bugzilla queries.
+class BugzillaQueries(object):
+
+    def __init__(self, bugzilla):
+        self._bugzilla = bugzilla
+
+    def _is_xml_bugs_form(self, form):
+        # ClientForm.HTMLForm.find_control throws if the control is not found,
+        # so we do a manual search instead:
+        return "xml" in [control.id for control in form.controls]
+
+    # This is kinda a hack.  There is probably a better way to get this information from bugzilla.
+    def _parse_result_count(self, results_page):
+        result_count_text = BeautifulSoup(results_page).find(attrs={'class': 'bz_result_count'}).string
+        result_count_parts = result_count_text.strip().split(" ")
+        if result_count_parts[0] == "Zarro":
+            return 0
+        if result_count_parts[0] == "One":
+            return 1
+        return int(result_count_parts[0])
+
+    # Note: _load_query, _fetch_bug and _fetch_bugs_from_advanced_query
+    # are the only methods which access self._bugzilla.
+
+    def _load_query(self, query):
+        self._bugzilla.authenticate()
+        full_url = "%s%s" % (config_urls.bug_server_url, query)
+        return self._bugzilla.browser.open(full_url)
+
+    def _fetch_bugs_from_advanced_query(self, query):
+        results_page = self._load_query(query)
+        # Some simple searches can return a single result.
+        results_url = results_page.geturl()
+        if results_url.find("/show_bug.cgi?id=") != -1:
+            bug_id = int(results_url.split("=")[-1])
+            return [self._fetch_bug(bug_id)]
+        if not self._parse_result_count(results_page):
+            return []
+        # Bugzilla results pages have an "XML" submit button at the bottom
+        # which can be used to get an XML page containing all of the <bug> elements.
+        # This is slighty lame that this assumes that _load_query used
+        # self._bugzilla.browser and that it's in an acceptable state.
+        self._bugzilla.browser.select_form(predicate=self._is_xml_bugs_form)
+        bugs_xml = self._bugzilla.browser.submit()
+        return self._bugzilla._parse_bugs_from_xml(bugs_xml)
+
+    def _fetch_bug(self, bug_id):
+        return self._bugzilla.fetch_bug(bug_id)
+
+    def _fetch_bug_ids_advanced_query(self, query):
+        soup = BeautifulSoup(self._load_query(query))
+        # The contents of the <a> inside the cells in the first column happen
+        # to be the bug id.
+        return [int(bug_link_cell.find("a").string)
+                for bug_link_cell in soup('td', "first-child")]
+
+    def _parse_attachment_ids_request_query(self, page):
+        digits = re.compile("\d+")
+        attachment_href = re.compile("attachment.cgi\?id=\d+&action=review")
+        attachment_links = SoupStrainer("a", href=attachment_href)
+        return [int(digits.search(tag["href"]).group(0))
+                for tag in BeautifulSoup(page, parseOnlyThese=attachment_links)]
+
+    def _fetch_attachment_ids_request_query(self, query):
+        return self._parse_attachment_ids_request_query(self._load_query(query))
+
+    def _parse_quips(self, page):
+        soup = BeautifulSoup(page, convertEntities=BeautifulSoup.HTML_ENTITIES)
+        quips = soup.find(text=re.compile(r"Existing quips:")).findNext("ul").findAll("li")
+        return [unicode(quip_entry.string) for quip_entry in quips]
+
+    def fetch_quips(self):
+        return self._parse_quips(self._load_query("/quips.cgi?action=show"))
+
+    # List of all r+'d bugs.
+    def fetch_bug_ids_from_pending_commit_list(self):
+        needs_commit_query_url = "buglist.cgi?query_format=advanced&bug_status=UNCONFIRMED&bug_status=NEW&bug_status=ASSIGNED&bug_status=REOPENED&field0-0-0=flagtypes.name&type0-0-0=equals&value0-0-0=review%2B"
+        return self._fetch_bug_ids_advanced_query(needs_commit_query_url)
+
+    def fetch_bugs_matching_quicksearch(self, search_string):
+        # We may want to use a more explicit query than "quicksearch".
+        # If quicksearch changes we should probably change to use
+        # a normal buglist.cgi?query_format=advanced query.
+        quicksearch_url = "buglist.cgi?quicksearch=%s" % urllib.quote(search_string)
+        return self._fetch_bugs_from_advanced_query(quicksearch_url)
+
+    # Currently this returns all bugs across all components.
+    # In the future we may wish to extend this API to construct more restricted searches.
+    def fetch_bugs_matching_search(self, search_string):
+        query = "buglist.cgi?query_format=advanced"
+        if search_string:
+            query += "&short_desc_type=allwordssubstr&short_desc=%s" % urllib.quote(search_string)
+        return self._fetch_bugs_from_advanced_query(query)
+
+    def fetch_patches_from_pending_commit_list(self):
+        return sum([self._fetch_bug(bug_id).reviewed_patches()
+            for bug_id in self.fetch_bug_ids_from_pending_commit_list()], [])
+
+    def fetch_bugs_from_review_queue(self, cc_email=None):
+        query = "buglist.cgi?query_format=advanced&bug_status=UNCONFIRMED&bug_status=NEW&bug_status=ASSIGNED&bug_status=REOPENED&field0-0-0=flagtypes.name&type0-0-0=equals&value0-0-0=review?"
+
+        if cc_email:
+            query += "&emailcc1=1&emailtype1=substring&email1=%s" % urllib.quote(cc_email)
+
+        return self._fetch_bugs_from_advanced_query(query)
+
+    def fetch_bug_ids_from_commit_queue(self):
+        commit_queue_url = "buglist.cgi?query_format=advanced&bug_status=UNCONFIRMED&bug_status=NEW&bug_status=ASSIGNED&bug_status=REOPENED&field0-0-0=flagtypes.name&type0-0-0=equals&value0-0-0=commit-queue%2B&order=Last+Changed"
+        return self._fetch_bug_ids_advanced_query(commit_queue_url)
+
+    def fetch_patches_from_commit_queue(self):
+        # This function will only return patches which have valid committers
+        # set.  It won't reject patches with invalid committers/reviewers.
+        return sum([self._fetch_bug(bug_id).commit_queued_patches()
+                    for bug_id in self.fetch_bug_ids_from_commit_queue()], [])
+
+    def fetch_bug_ids_from_review_queue(self):
+        review_queue_url = "buglist.cgi?query_format=advanced&bug_status=UNCONFIRMED&bug_status=NEW&bug_status=ASSIGNED&bug_status=REOPENED&field0-0-0=flagtypes.name&type0-0-0=equals&value0-0-0=review?"
+        return self._fetch_bug_ids_advanced_query(review_queue_url)
+
+    # This method will make several requests to bugzilla.
+    def fetch_patches_from_review_queue(self, limit=None):
+        # [:None] returns the whole array.
+        return sum([self._fetch_bug(bug_id).unreviewed_patches()
+            for bug_id in self.fetch_bug_ids_from_review_queue()[:limit]], [])
+
+    # NOTE: This is the only client of _fetch_attachment_ids_request_query
+    # This method only makes one request to bugzilla.
+    def fetch_attachment_ids_from_review_queue(self):
+        review_queue_url = "request.cgi?action=queue&type=review&group=type"
+        return self._fetch_attachment_ids_request_query(review_queue_url)
+
+    # This only works if your account has edituser privileges.
+    # We could easily parse https://bugs.webkit.org/userprefs.cgi?tab=permissions to
+    # check permissions, but bugzilla will just return an error if we don't have them.
+    def fetch_login_userid_pairs_matching_substring(self, search_string):
+        review_queue_url = "editusers.cgi?action=list&matchvalue=login_name&matchstr=%s&matchtype=substr" % urllib.quote(search_string)
+        results_page = self._load_query(review_queue_url)
+        # We could pull the EditUsersParser off Bugzilla if needed.
+        return EditUsersParser().login_userid_pairs_from_edit_user_results(results_page)
+
+    # FIXME: We should consider adding a BugzillaUser class.
+    def fetch_logins_matching_substring(self, search_string):
+        pairs = self.fetch_login_userid_pairs_matching_substring(search_string)
+        return map(lambda pair: pair[0], pairs)
+
+
+class Bugzilla(object):
+    def __init__(self, committers=committers.CommitterList()):
+        self.authenticated = False
+        self.queries = BugzillaQueries(self)
+        self.committers = committers
+        self.cached_quips = []
+        self.edit_user_parser = EditUsersParser()
+        self._browser = None
+
+    def _get_browser(self):
+        if not self._browser:
+            self.setdefaulttimeout(600)
+            from webkitpy.thirdparty.autoinstalled.mechanize import Browser
+            self._browser = Browser()
+            # Ignore bugs.webkit.org/robots.txt until we fix it to allow this script.
+            self._browser.set_handle_robots(False)
+        return self._browser
+
+    def _set_browser(self, value):
+        self._browser = value
+
+    browser = property(_get_browser, _set_browser)
+
+    def setdefaulttimeout(self, value):
+        socket.setdefaulttimeout(value)
+
+    def fetch_user(self, user_id):
+        self.authenticate()
+        edit_user_page = self.browser.open(self.edit_user_url_for_id(user_id))
+        return self.edit_user_parser.user_dict_from_edit_user_page(edit_user_page)
+
+    def add_user_to_groups(self, user_id, group_names):
+        self.authenticate()
+        user_edit_page = self.browser.open(self.edit_user_url_for_id(user_id))
+        self.browser.select_form(nr=1)
+        for group_name in group_names:
+            group_string = self.edit_user_parser.group_string_from_name(user_edit_page, group_name)
+            self.browser.find_control(group_string).items[0].selected = True
+        self.browser.submit()
+
+    def quips(self):
+        # We only fetch and parse the list of quips once per instantiation
+        # so that we do not burden bugs.webkit.org.
+        if not self.cached_quips:
+            self.cached_quips = self.queries.fetch_quips()
+        return self.cached_quips
+
+    def bug_url_for_bug_id(self, bug_id, xml=False):
+        if not bug_id:
+            return None
+        content_type = "&ctype=xml&excludefield=attachmentdata" if xml else ""
+        return "%sshow_bug.cgi?id=%s%s" % (config_urls.bug_server_url, bug_id, content_type)
+
+    def short_bug_url_for_bug_id(self, bug_id):
+        if not bug_id:
+            return None
+        return "http://webkit.org/b/%s" % bug_id
+
+    def add_attachment_url(self, bug_id):
+        return "%sattachment.cgi?action=enter&bugid=%s" % (config_urls.bug_server_url, bug_id)
+
+    def attachment_url_for_id(self, attachment_id, action="view"):
+        if not attachment_id:
+            return None
+        action_param = ""
+        if action and action != "view":
+            action_param = "&action=%s" % action
+        return "%sattachment.cgi?id=%s%s" % (config_urls.bug_server_url,
+                                             attachment_id,
+                                             action_param)
+
+    def edit_user_url_for_id(self, user_id):
+        return "%seditusers.cgi?action=edit&userid=%s" % (config_urls.bug_server_url, user_id)
+
+    def _parse_attachment_flag(self,
+                               element,
+                               flag_name,
+                               attachment,
+                               result_key):
+        flag = element.find('flag', attrs={'name': flag_name})
+        if flag:
+            attachment[flag_name] = flag['status']
+            if flag['status'] == '+':
+                attachment[result_key] = flag['setter']
+        # Sadly show_bug.cgi?ctype=xml does not expose the flag modification date.
+
+    def _string_contents(self, soup):
+        # WebKit's bugzilla instance uses UTF-8.
+        # BeautifulStoneSoup always returns Unicode strings, however
+        # the .string method returns a (unicode) NavigableString.
+        # NavigableString can confuse other parts of the code, so we
+        # convert from NavigableString to a real unicode() object using unicode().
+        return unicode(soup.string)
+
+    # Example: 2010-01-20 14:31 PST
+    # FIXME: Some bugzilla dates seem to have seconds in them?
+    # Python does not support timezones out of the box.
+    # Assume that bugzilla always uses PST (which is true for bugs.webkit.org)
+    _bugzilla_date_format = "%Y-%m-%d %H:%M:%S"
+
+    @classmethod
+    def _parse_date(cls, date_string):
+        (date, time, time_zone) = date_string.split(" ")
+        if time.count(':') == 1:
+            # Add seconds into the time.
+            time += ':0'
+        # Ignore the timezone because python doesn't understand timezones out of the box.
+        date_string = "%s %s" % (date, time)
+        return datetime.strptime(date_string, cls._bugzilla_date_format)
+
+    def _date_contents(self, soup):
+        return self._parse_date(self._string_contents(soup))
+
+    def _parse_attachment_element(self, element, bug_id):
+        attachment = {}
+        attachment['bug_id'] = bug_id
+        attachment['is_obsolete'] = (element.has_key('isobsolete') and element['isobsolete'] == "1")
+        attachment['is_patch'] = (element.has_key('ispatch') and element['ispatch'] == "1")
+        attachment['id'] = int(element.find('attachid').string)
+        # FIXME: No need to parse out the url here.
+        attachment['url'] = self.attachment_url_for_id(attachment['id'])
+        attachment["attach_date"] = self._date_contents(element.find("date"))
+        attachment['name'] = self._string_contents(element.find('desc'))
+        attachment['attacher_email'] = self._string_contents(element.find('attacher'))
+        attachment['type'] = self._string_contents(element.find('type'))
+        self._parse_attachment_flag(
+                element, 'review', attachment, 'reviewer_email')
+        self._parse_attachment_flag(
+                element, 'commit-queue', attachment, 'committer_email')
+        return attachment
+
+    def _parse_log_descr_element(self, element):
+        comment = {}
+        comment['comment_email'] = self._string_contents(element.find('who'))
+        comment['comment_date'] = self._date_contents(element.find('bug_when'))
+        comment['text'] = self._string_contents(element.find('thetext'))
+        return comment
+
+    def _parse_bugs_from_xml(self, page):
+        soup = BeautifulSoup(page)
+        # Without the unicode() call, BeautifulSoup occasionally complains of being
+        # passed None for no apparent reason.
+        return [Bug(self._parse_bug_dictionary_from_xml(unicode(bug_xml)), self) for bug_xml in soup('bug')]
+
+    def _parse_bug_dictionary_from_xml(self, page):
+        soup = BeautifulStoneSoup(page, convertEntities=BeautifulStoneSoup.XML_ENTITIES)
+        bug = {}
+        bug["id"] = int(soup.find("bug_id").string)
+        bug["title"] = self._string_contents(soup.find("short_desc"))
+        bug["bug_status"] = self._string_contents(soup.find("bug_status"))
+        dup_id = soup.find("dup_id")
+        if dup_id:
+            bug["dup_id"] = self._string_contents(dup_id)
+        bug["reporter_email"] = self._string_contents(soup.find("reporter"))
+        bug["assigned_to_email"] = self._string_contents(soup.find("assigned_to"))
+        bug["cc_emails"] = [self._string_contents(element) for element in soup.findAll('cc')]
+        bug["attachments"] = [self._parse_attachment_element(element, bug["id"]) for element in soup.findAll('attachment')]
+        bug["comments"] = [self._parse_log_descr_element(element) for element in soup.findAll('long_desc')]
+
+        return bug
+
+    # Makes testing fetch_*_from_bug() possible until we have a better
+    # BugzillaNetwork abstration.
+
+    def _fetch_bug_page(self, bug_id):
+        bug_url = self.bug_url_for_bug_id(bug_id, xml=True)
+        log("Fetching: %s" % bug_url)
+        return self.browser.open(bug_url)
+
+    def fetch_bug_dictionary(self, bug_id):
+        try:
+            return self._parse_bug_dictionary_from_xml(self._fetch_bug_page(bug_id))
+        except KeyboardInterrupt:
+            raise
+        except:
+            self.authenticate()
+            return self._parse_bug_dictionary_from_xml(self._fetch_bug_page(bug_id))
+
+    # FIXME: A BugzillaCache object should provide all these fetch_ methods.
+
+    def fetch_bug(self, bug_id):
+        return Bug(self.fetch_bug_dictionary(bug_id), self)
+
+    def fetch_attachment_contents(self, attachment_id):
+        attachment_url = self.attachment_url_for_id(attachment_id)
+        # We need to authenticate to download patches from security bugs.
+        self.authenticate()
+        return self.browser.open(attachment_url).read()
+
+    def _parse_bug_id_from_attachment_page(self, page):
+        # The "Up" relation happens to point to the bug.
+        up_link = BeautifulSoup(page).find('link', rel='Up')
+        if not up_link:
+            # This attachment does not exist (or you don't have permissions to
+            # view it).
+            return None
+        match = re.search("show_bug.cgi\?id=(?P<bug_id>\d+)", up_link['href'])
+        return int(match.group('bug_id'))
+
+    def bug_id_for_attachment_id(self, attachment_id):
+        self.authenticate()
+
+        attachment_url = self.attachment_url_for_id(attachment_id, 'edit')
+        log("Fetching: %s" % attachment_url)
+        page = self.browser.open(attachment_url)
+        return self._parse_bug_id_from_attachment_page(page)
+
+    # FIXME: This should just return Attachment(id), which should be able to
+    # lazily fetch needed data.
+
+    def fetch_attachment(self, attachment_id):
+        # We could grab all the attachment details off of the attachment edit
+        # page but we already have working code to do so off of the bugs page,
+        # so re-use that.
+        bug_id = self.bug_id_for_attachment_id(attachment_id)
+        if not bug_id:
+            return None
+        attachments = self.fetch_bug(bug_id).attachments(include_obsolete=True)
+        for attachment in attachments:
+            if attachment.id() == int(attachment_id):
+                return attachment
+        return None # This should never be hit.
+
+    def authenticate(self):
+        if self.authenticated:
+            return
+
+        credentials = Credentials(config_urls.bug_server_host, git_prefix="bugzilla")
+
+        attempts = 0
+        while not self.authenticated:
+            attempts += 1
+            username, password = credentials.read_credentials()
+
+            log("Logging in as %s..." % username)
+            self.browser.open(config_urls.bug_server_url +
+                              "index.cgi?GoAheadAndLogIn=1")
+            self.browser.select_form(name="login")
+            self.browser['Bugzilla_login'] = username
+            self.browser['Bugzilla_password'] = password
+            self.browser.find_control("Bugzilla_restrictlogin").items[0].selected = False
+            response = self.browser.submit()
+
+            match = re.search("<title>(.+?)</title>", response.read())
+            # If the resulting page has a title, and it contains the word
+            # "invalid" assume it's the login failure page.
+            if match and re.search("Invalid", match.group(1), re.IGNORECASE):
+                errorMessage = "Bugzilla login failed: %s" % match.group(1)
+                # raise an exception only if this was the last attempt
+                if attempts < 5:
+                    log(errorMessage)
+                else:
+                    raise Exception(errorMessage)
+            else:
+                self.authenticated = True
+                self.username = username
+
+    # FIXME: Use enum instead of two booleans
+    def _commit_queue_flag(self, mark_for_landing, mark_for_commit_queue):
+        if mark_for_landing:
+            user = self.committers.account_by_email(self.username)
+            mark_for_commit_queue = True
+            if not user:
+                log("Your Bugzilla login is not listed in committers.py. Uploading with cq? instead of cq+")
+                mark_for_landing = False
+            elif not user.can_commit:
+                log("You're not a committer yet or haven't updated committers.py yet. Uploading with cq? instead of cq+")
+                mark_for_landing = False
+
+        if mark_for_landing:
+            return '+'
+        if mark_for_commit_queue:
+            return '?'
+        return 'X'
+
+    # FIXME: mark_for_commit_queue and mark_for_landing should be joined into a single commit_flag argument.
+    def _fill_attachment_form(self,
+                              description,
+                              file_object,
+                              mark_for_review=False,
+                              mark_for_commit_queue=False,
+                              mark_for_landing=False,
+                              is_patch=False,
+                              filename=None,
+                              mimetype=None):
+        self.browser['description'] = description
+        if is_patch:
+            self.browser['ispatch'] = ("1",)
+        # FIXME: Should this use self._find_select_element_for_flag?
+        self.browser['flag_type-1'] = ('?',) if mark_for_review else ('X',)
+        self.browser['flag_type-3'] = (self._commit_queue_flag(mark_for_landing, mark_for_commit_queue),)
+
+        filename = filename or "%s.patch" % timestamp()
+        if not mimetype:
+            mimetypes.add_type('text/plain', '.patch')  # Make sure mimetypes knows about .patch
+            mimetype, _ = mimetypes.guess_type(filename)
+        if not mimetype:
+            mimetype = "text/plain"  # Bugzilla might auto-guess for us and we might not need this?
+        self.browser.add_file(file_object, mimetype, filename, 'data')
+
+    def _file_object_for_upload(self, file_or_string):
+        if hasattr(file_or_string, 'read'):
+            return file_or_string
+        # Only if file_or_string is not already encoded do we want to encode it.
+        if isinstance(file_or_string, unicode):
+            file_or_string = file_or_string.encode('utf-8')
+        return StringIO.StringIO(file_or_string)
+
+    # timestamp argument is just for unittests.
+    def _filename_for_upload(self, file_object, bug_id, extension="txt", timestamp=timestamp):
+        if hasattr(file_object, "name"):
+            return file_object.name
+        return "bug-%s-%s.%s" % (bug_id, timestamp(), extension)
+
+    def add_attachment_to_bug(self, bug_id, file_or_string, description, filename=None, comment_text=None, mimetype=None):
+        self.authenticate()
+        log('Adding attachment "%s" to %s' % (description, self.bug_url_for_bug_id(bug_id)))
+        self.browser.open(self.add_attachment_url(bug_id))
+        self.browser.select_form(name="entryform")
+        file_object = self._file_object_for_upload(file_or_string)
+        filename = filename or self._filename_for_upload(file_object, bug_id)
+        self._fill_attachment_form(description, file_object, filename=filename, mimetype=mimetype)
+        if comment_text:
+            log(comment_text)
+            self.browser['comment'] = comment_text
+        self.browser.submit()
+
+    # FIXME: The arguments to this function should be simplified and then
+    # this should be merged into add_attachment_to_bug
+    def add_patch_to_bug(self,
+                         bug_id,
+                         file_or_string,
+                         description,
+                         comment_text=None,
+                         mark_for_review=False,
+                         mark_for_commit_queue=False,
+                         mark_for_landing=False):
+        self.authenticate()
+        log('Adding patch "%s" to %s' % (description, self.bug_url_for_bug_id(bug_id)))
+
+        self.browser.open(self.add_attachment_url(bug_id))
+        self.browser.select_form(name="entryform")
+        file_object = self._file_object_for_upload(file_or_string)
+        filename = self._filename_for_upload(file_object, bug_id, extension="patch")
+        self._fill_attachment_form(description,
+                                   file_object,
+                                   mark_for_review=mark_for_review,
+                                   mark_for_commit_queue=mark_for_commit_queue,
+                                   mark_for_landing=mark_for_landing,
+                                   is_patch=True,
+                                   filename=filename)
+        if comment_text:
+            log(comment_text)
+            self.browser['comment'] = comment_text
+        self.browser.submit()
+
+    # FIXME: There has to be a more concise way to write this method.
+    def _check_create_bug_response(self, response_html):
+        match = re.search("<title>Bug (?P<bug_id>\d+) Submitted</title>",
+                          response_html)
+        if match:
+            return match.group('bug_id')
+
+        match = re.search(
+            '<div id="bugzilla-body">(?P<error_message>.+)<div id="footer">',
+            response_html,
+            re.DOTALL)
+        error_message = "FAIL"
+        if match:
+            text_lines = BeautifulSoup(
+                    match.group('error_message')).findAll(text=True)
+            error_message = "\n" + '\n'.join(
+                    ["  " + line.strip()
+                     for line in text_lines if line.strip()])
+        raise Exception("Bug not created: %s" % error_message)
+
+    def create_bug(self,
+                   bug_title,
+                   bug_description,
+                   component=None,
+                   diff=None,
+                   patch_description=None,
+                   cc=None,
+                   blocked=None,
+                   assignee=None,
+                   mark_for_review=False,
+                   mark_for_commit_queue=False):
+        self.authenticate()
+
+        log('Creating bug with title "%s"' % bug_title)
+        self.browser.open(config_urls.bug_server_url + "enter_bug.cgi?product=WebKit")
+        self.browser.select_form(name="Create")
+        component_items = self.browser.find_control('component').items
+        component_names = map(lambda item: item.name, component_items)
+        if not component:
+            component = "New Bugs"
+        if component not in component_names:
+            component = User.prompt_with_list("Please pick a component:", component_names)
+        self.browser["component"] = [component]
+        if cc:
+            self.browser["cc"] = cc
+        if blocked:
+            self.browser["blocked"] = unicode(blocked)
+        if not assignee:
+            assignee = self.username
+        if assignee and not self.browser.find_control("assigned_to").disabled:
+            self.browser["assigned_to"] = assignee
+        self.browser["short_desc"] = bug_title
+        self.browser["comment"] = bug_description
+
+        if diff:
+            # _fill_attachment_form expects a file-like object
+            # Patch files are already binary, so no encoding needed.
+            assert(isinstance(diff, str))
+            patch_file_object = StringIO.StringIO(diff)
+            self._fill_attachment_form(
+                    patch_description,
+                    patch_file_object,
+                    mark_for_review=mark_for_review,
+                    mark_for_commit_queue=mark_for_commit_queue,
+                    is_patch=True)
+
+        response = self.browser.submit()
+
+        bug_id = self._check_create_bug_response(response.read())
+        log("Bug %s created." % bug_id)
+        log("%sshow_bug.cgi?id=%s" % (config_urls.bug_server_url, bug_id))
+        return bug_id
+
+    def _find_select_element_for_flag(self, flag_name):
+        # FIXME: This will break if we ever re-order attachment flags
+        if flag_name == "review":
+            return self.browser.find_control(type='select', nr=0)
+        elif flag_name == "commit-queue":
+            return self.browser.find_control(type='select', nr=1)
+        raise Exception("Don't know how to find flag named \"%s\"" % flag_name)
+
+    def clear_attachment_flags(self,
+                               attachment_id,
+                               additional_comment_text=None):
+        self.authenticate()
+
+        comment_text = "Clearing flags on attachment: %s" % attachment_id
+        if additional_comment_text:
+            comment_text += "\n\n%s" % additional_comment_text
+        log(comment_text)
+
+        self.browser.open(self.attachment_url_for_id(attachment_id, 'edit'))
+        self.browser.select_form(nr=1)
+        self.browser.set_value(comment_text, name='comment', nr=0)
+        self._find_select_element_for_flag('review').value = ("X",)
+        self._find_select_element_for_flag('commit-queue').value = ("X",)
+        self.browser.submit()
+
+    def set_flag_on_attachment(self,
+                               attachment_id,
+                               flag_name,
+                               flag_value,
+                               comment_text=None,
+                               additional_comment_text=None):
+        # FIXME: We need a way to test this function on a live bugzilla
+        # instance.
+
+        self.authenticate()
+
+        # FIXME: additional_comment_text seems useless and should be merged into comment-text.
+        if additional_comment_text:
+            comment_text += "\n\n%s" % additional_comment_text
+        log(comment_text)
+
+        self.browser.open(self.attachment_url_for_id(attachment_id, 'edit'))
+        self.browser.select_form(nr=1)
+
+        if comment_text:
+            self.browser.set_value(comment_text, name='comment', nr=0)
+
+        self._find_select_element_for_flag(flag_name).value = (flag_value,)
+        self.browser.submit()
+
+    # FIXME: All of these bug editing methods have a ridiculous amount of
+    # copy/paste code.
+
+    def obsolete_attachment(self, attachment_id, comment_text=None):
+        self.authenticate()
+
+        log("Obsoleting attachment: %s" % attachment_id)
+        self.browser.open(self.attachment_url_for_id(attachment_id, 'edit'))
+        self.browser.select_form(nr=1)
+        self.browser.find_control('isobsolete').items[0].selected = True
+        # Also clear any review flag (to remove it from review/commit queues)
+        self._find_select_element_for_flag('review').value = ("X",)
+        self._find_select_element_for_flag('commit-queue').value = ("X",)
+        if comment_text:
+            log(comment_text)
+            # Bugzilla has two textareas named 'comment', one is somehow
+            # hidden.  We want the first.
+            self.browser.set_value(comment_text, name='comment', nr=0)
+        self.browser.submit()
+
+    def add_cc_to_bug(self, bug_id, email_address_list):
+        self.authenticate()
+
+        log("Adding %s to the CC list for bug %s" % (email_address_list, bug_id))
+        self.browser.open(self.bug_url_for_bug_id(bug_id))
+        self.browser.select_form(name="changeform")
+        self.browser["newcc"] = ", ".join(email_address_list)
+        self.browser.submit()
+
+    def post_comment_to_bug(self, bug_id, comment_text, cc=None):
+        self.authenticate()
+
+        log("Adding comment to bug %s" % bug_id)
+        self.browser.open(self.bug_url_for_bug_id(bug_id))
+        self.browser.select_form(name="changeform")
+        self.browser["comment"] = comment_text
+        if cc:
+            self.browser["newcc"] = ", ".join(cc)
+        self.browser.submit()
+
+    def close_bug_as_fixed(self, bug_id, comment_text=None):
+        self.authenticate()
+
+        log("Closing bug %s as fixed" % bug_id)
+        self.browser.open(self.bug_url_for_bug_id(bug_id))
+        self.browser.select_form(name="changeform")
+        if comment_text:
+            self.browser['comment'] = comment_text
+        self.browser['bug_status'] = ['RESOLVED']
+        self.browser['resolution'] = ['FIXED']
+        self.browser.submit()
+
+    def _has_control(self, form, id):
+        return id in [control.id for control in form.controls]
+
+    def reassign_bug(self, bug_id, assignee=None, comment_text=None):
+        self.authenticate()
+
+        if not assignee:
+            assignee = self.username
+
+        log("Assigning bug %s to %s" % (bug_id, assignee))
+        self.browser.open(self.bug_url_for_bug_id(bug_id))
+        self.browser.select_form(name="changeform")
+
+        if not self._has_control(self.browser, "assigned_to"):
+            log("""Failed to assign bug to you (can't find assigned_to) control.
+Do you have EditBugs privileges at bugs.webkit.org?
+https://bugs.webkit.org/userprefs.cgi?tab=permissions
+
+If not, you should email webkit-committers@lists.webkit.org or ask in #webkit
+for someone to add EditBugs to your bugs.webkit.org account.""")
+            return
+
+        if comment_text:
+            log(comment_text)
+            self.browser["comment"] = comment_text
+        self.browser["assigned_to"] = assignee
+        self.browser.submit()
+
+    def reopen_bug(self, bug_id, comment_text):
+        self.authenticate()
+
+        log("Re-opening bug %s" % bug_id)
+        # Bugzilla requires a comment when re-opening a bug, so we know it will
+        # never be None.
+        log(comment_text)
+        self.browser.open(self.bug_url_for_bug_id(bug_id))
+        self.browser.select_form(name="changeform")
+        bug_status = self.browser.find_control("bug_status", type="select")
+        # This is a hack around the fact that ClientForm.ListControl seems to
+        # have no simpler way to ask if a control has an item named "REOPENED"
+        # without using exceptions for control flow.
+        possible_bug_statuses = map(lambda item: item.name, bug_status.items)
+        if "REOPENED" in possible_bug_statuses:
+            bug_status.value = ["REOPENED"]
+        # If the bug was never confirmed it will not have a "REOPENED"
+        # state, but only an "UNCONFIRMED" state.
+        elif "UNCONFIRMED" in possible_bug_statuses:
+            bug_status.value = ["UNCONFIRMED"]
+        else:
+            # FIXME: This logic is slightly backwards.  We won't print this
+            # message if the bug is already open with state "UNCONFIRMED".
+            log("Did not reopen bug %s, it appears to already be open with status %s." % (bug_id, bug_status.value))
+        self.browser['comment'] = comment_text
+        self.browser.submit()
diff --git a/Tools/Scripts/webkitpy/common/net/bugzilla/bugzilla_mock.py b/Tools/Scripts/webkitpy/common/net/bugzilla/bugzilla_mock.py
new file mode 100644
index 0000000..71b080c
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/net/bugzilla/bugzilla_mock.py
@@ -0,0 +1,432 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#    * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import datetime
+
+from .bug import Bug
+from .attachment import Attachment
+from webkitpy.common.config.committers import CommitterList, Reviewer
+
+from webkitpy.common.system.deprecated_logging import log
+
+
+def _id_to_object_dictionary(*objects):
+    dictionary = {}
+    for thing in objects:
+        dictionary[thing["id"]] = thing
+    return dictionary
+
+# Testing
+
+
+_patch1 = {
+    "id": 10000,
+    "bug_id": 50000,
+    "url": "http://example.com/10000",
+    "name": "Patch1",
+    "is_obsolete": False,
+    "is_patch": True,
+    "review": "+",
+    "reviewer_email": "foo@bar.com",
+    "commit-queue": "+",
+    "committer_email": "foo@bar.com",
+    "attacher_email": "Contributer1",
+}
+
+
+_patch2 = {
+    "id": 10001,
+    "bug_id": 50000,
+    "url": "http://example.com/10001",
+    "name": "Patch2",
+    "is_obsolete": False,
+    "is_patch": True,
+    "review": "+",
+    "reviewer_email": "reviewer2@webkit.org",
+    "commit-queue": "+",
+    "committer_email": "non-committer@example.com",
+    "attacher_email": "eric@webkit.org",
+}
+
+
+_patch3 = {
+    "id": 10002,
+    "bug_id": 50001,
+    "url": "http://example.com/10002",
+    "name": "Patch3",
+    "is_obsolete": False,
+    "is_patch": True,
+    "review": "?",
+    "commit-queue": "-",
+    "attacher_email": "eric@webkit.org",
+    "attach_date": datetime.datetime.today(),
+}
+
+
+_patch4 = {
+    "id": 10003,
+    "bug_id": 50003,
+    "url": "http://example.com/10002",
+    "name": "Patch3",
+    "is_obsolete": False,
+    "is_patch": True,
+    "review": "+",
+    "commit-queue": "?",
+    "reviewer_email": "foo@bar.com",
+    "attacher_email": "Contributer2",
+}
+
+
+_patch5 = {
+    "id": 10004,
+    "bug_id": 50003,
+    "url": "http://example.com/10002",
+    "name": "Patch5",
+    "is_obsolete": False,
+    "is_patch": True,
+    "review": "+",
+    "reviewer_email": "foo@bar.com",
+    "attacher_email": "eric@webkit.org",
+}
+
+
+_patch6 = {  # Valid committer, but no reviewer.
+    "id": 10005,
+    "bug_id": 50003,
+    "url": "http://example.com/10002",
+    "name": "ROLLOUT of r3489",
+    "is_obsolete": False,
+    "is_patch": True,
+    "commit-queue": "+",
+    "committer_email": "foo@bar.com",
+    "attacher_email": "eric@webkit.org",
+}
+
+
+_patch7 = {  # Valid review, patch is marked obsolete.
+    "id": 10006,
+    "bug_id": 50002,
+    "url": "http://example.com/10002",
+    "name": "Patch7",
+    "is_obsolete": True,
+    "is_patch": True,
+    "review": "+",
+    "reviewer_email": "foo@bar.com",
+    "attacher_email": "eric@webkit.org",
+}
+
+
+# This matches one of Bug.unassigned_emails
+_unassigned_email = "webkit-unassigned@lists.webkit.org"
+# This is needed for the FlakyTestReporter to believe the bug
+# was filed by one of the webkitpy bots.
+_commit_queue_email = "commit-queue@webkit.org"
+
+
+_bug1 = {
+    "id": 50000,
+    "title": "Bug with two r+'d and cq+'d patches, one of which has an "
+             "invalid commit-queue setter.",
+    "reporter_email": "foo@foo.com",
+    "assigned_to_email": _unassigned_email,
+    "cc_emails": [],
+    "attachments": [_patch1, _patch2],
+    "bug_status": "UNCONFIRMED",
+    "comments": [],
+}
+
+
+_bug2 = {
+    "id": 50001,
+    "title": "Bug with a patch needing review.",
+    "reporter_email": "eric@webkit.org",
+    "assigned_to_email": "foo@foo.com",
+    "cc_emails": ["abarth@webkit.org", ],
+    "attachments": [_patch3],
+    "bug_status": "ASSIGNED",
+    "comments": [{"comment_date":  datetime.datetime(2011, 6, 11, 9, 4, 3),
+                  "comment_email": "bar@foo.com",
+                  "text": "Message1.\nCommitted r35: <http://trac.webkit.org/changeset/35>",
+                  },
+                 ],
+}
+
+
+_bug3 = {
+    "id": 50002,
+    "title": "The third bug",
+    "reporter_email": "foo@foo.com",
+    "assigned_to_email": _unassigned_email,
+    "cc_emails": [],
+    "attachments": [_patch7],
+    "bug_status": "NEW",
+    "comments":  [{"comment_date":  datetime.datetime(2011, 6, 11, 9, 4, 3),
+                   "comment_email": "bar@foo.com",
+                   "text": "Committed r30: <http://trac.webkit.org/changeset/30>",
+                   },
+                  {"comment_date":  datetime.datetime(2011, 6, 11, 9, 4, 3),
+                   "comment_email": "bar@foo.com",
+                   "text": "Committed r31: <http://trac.webkit.org/changeset/31>",
+                   },
+                  ],
+}
+
+
+_bug4 = {
+    "id": 50003,
+    "title": "The fourth bug",
+    "reporter_email": "foo@foo.com",
+    "assigned_to_email": "foo@foo.com",
+    "cc_emails": [],
+    "attachments": [_patch4, _patch5, _patch6],
+    "bug_status": "REOPENED",
+    "comments": [{"comment_date":  datetime.datetime(2011, 6, 11, 9, 4, 3),
+                  "comment_email": "bar@foo.com",
+                  "text": "Committed r25: <http://trac.webkit.org/changeset/30>",
+                  },
+                 {"comment_date":  datetime.datetime(2011, 6, 11, 9, 4, 3),
+                  "comment_email": "bar@foo.com",
+                  "text": "Rolled out in <http://trac.webkit.org/changeset/26",
+                  },
+                 ],
+}
+
+
+_bug5 = {
+    "id": 50004,
+    "title": "The fifth bug",
+    "reporter_email": _commit_queue_email,
+    "assigned_to_email": "foo@foo.com",
+    "cc_emails": [],
+    "attachments": [],
+    "bug_status": "RESOLVED",
+    "dup_id": 50002,
+    "comments": [{"comment_date":  datetime.datetime(2011, 6, 11, 9, 4, 3),
+                  "comment_email": "bar@foo.com",
+                  "text": "Committed r15: <http://trac.webkit.org/changeset/15>",
+                  },
+                 ],
+
+}
+
+
+class MockBugzillaQueries(object):
+
+    def __init__(self, bugzilla):
+        self._bugzilla = bugzilla
+
+    def _all_bugs(self):
+        return map(lambda bug_dictionary: Bug(bug_dictionary, self._bugzilla),
+                   self._bugzilla.bug_cache.values())
+
+    def fetch_bug_ids_from_commit_queue(self):
+        bugs_with_commit_queued_patches = filter(
+                lambda bug: bug.commit_queued_patches(),
+                self._all_bugs())
+        return map(lambda bug: bug.id(), bugs_with_commit_queued_patches)
+
+    def fetch_attachment_ids_from_review_queue(self):
+        unreviewed_patches = sum([bug.unreviewed_patches()
+                                  for bug in self._all_bugs()], [])
+        return map(lambda patch: patch.id(), unreviewed_patches)
+
+    def fetch_patches_from_commit_queue(self):
+        return sum([bug.commit_queued_patches()
+                    for bug in self._all_bugs()], [])
+
+    def fetch_bug_ids_from_pending_commit_list(self):
+        bugs_with_reviewed_patches = filter(lambda bug: bug.reviewed_patches(),
+                                            self._all_bugs())
+        bug_ids = map(lambda bug: bug.id(), bugs_with_reviewed_patches)
+        # NOTE: This manual hack here is to allow testing logging in
+        # test_assign_to_committer the real pending-commit query on bugzilla
+        # will return bugs with patches which have r+, but are also obsolete.
+        return bug_ids + [50002]
+
+    def fetch_bugs_from_review_queue(self, cc_email=None):
+        unreviewed_bugs = [bug for bug in self._all_bugs() if bug.unreviewed_patches()]
+
+        if cc_email:
+            return [bug for bug in unreviewed_bugs if cc_email in bug.cc_emails()]
+
+        return unreviewed_bugs
+
+    def fetch_patches_from_pending_commit_list(self):
+        return sum([bug.reviewed_patches() for bug in self._all_bugs()], [])
+
+    def fetch_bugs_matching_search(self, search_string):
+        return [self._bugzilla.fetch_bug(50004), self._bugzilla.fetch_bug(50003)]
+
+    def fetch_bugs_matching_quicksearch(self, search_string):
+        return [self._bugzilla.fetch_bug(50001), self._bugzilla.fetch_bug(50002),
+                self._bugzilla.fetch_bug(50003), self._bugzilla.fetch_bug(50004)]
+
+
+_mock_reviewers = [Reviewer("Foo Bar", "foo@bar.com"),
+                   Reviewer("Reviewer2", "reviewer2@webkit.org")]
+
+
+# FIXME: Bugzilla is the wrong Mock-point.  Once we have a BugzillaNetwork
+#        class we should mock that instead.
+# Most of this class is just copy/paste from Bugzilla.
+class MockBugzilla(object):
+
+    bug_server_url = "http://example.com"
+
+    bug_cache = _id_to_object_dictionary(_bug1, _bug2, _bug3, _bug4, _bug5)
+
+    attachment_cache = _id_to_object_dictionary(_patch1,
+                                                _patch2,
+                                                _patch3,
+                                                _patch4,
+                                                _patch5,
+                                                _patch6,
+                                                _patch7)
+
+    def __init__(self):
+        self.queries = MockBugzillaQueries(self)
+        # FIXME: This should move onto the Host object, and we should use a MockCommitterList
+        self.committers = CommitterList(reviewers=_mock_reviewers)
+        self.username = None
+        self._override_patch = None
+
+    def authenticate(self):
+        self.username = "username@webkit.org"
+
+    def create_bug(self,
+                   bug_title,
+                   bug_description,
+                   component=None,
+                   diff=None,
+                   patch_description=None,
+                   cc=None,
+                   blocked=None,
+                   mark_for_review=False,
+                   mark_for_commit_queue=False):
+        log("MOCK create_bug")
+        log("bug_title: %s" % bug_title)
+        log("bug_description: %s" % bug_description)
+        if component:
+            log("component: %s" % component)
+        if cc:
+            log("cc: %s" % cc)
+        if blocked:
+            log("blocked: %s" % blocked)
+        return 60001
+
+    def quips(self):
+        return ["Good artists copy. Great artists steal. - Pablo Picasso"]
+
+    def fetch_bug(self, bug_id):
+        return Bug(self.bug_cache.get(int(bug_id)), self)
+
+    def set_override_patch(self, patch):
+        self._override_patch = patch
+
+    def fetch_attachment(self, attachment_id):
+        if self._override_patch:
+            return self._override_patch
+
+        attachment_dictionary = self.attachment_cache.get(attachment_id)
+        if not attachment_dictionary:
+            print "MOCK: fetch_attachment: %s is not a known attachment id" % attachment_id
+            return None
+        bug = self.fetch_bug(attachment_dictionary["bug_id"])
+        for attachment in bug.attachments(include_obsolete=True):
+            if attachment.id() == int(attachment_id):
+                return attachment
+
+    def bug_url_for_bug_id(self, bug_id):
+        return "%s/%s" % (self.bug_server_url, bug_id)
+
+    def fetch_bug_dictionary(self, bug_id):
+        return self.bug_cache.get(bug_id)
+
+    def attachment_url_for_id(self, attachment_id, action="view"):
+        action_param = ""
+        if action and action != "view":
+            action_param = "&action=%s" % action
+        return "%s/%s%s" % (self.bug_server_url, attachment_id, action_param)
+
+    def reassign_bug(self, bug_id, assignee=None, comment_text=None):
+        log("MOCK reassign_bug: bug_id=%s, assignee=%s" % (bug_id, assignee))
+        if comment_text:
+            log("-- Begin comment --")
+            log(comment_text)
+            log("-- End comment --")
+
+    def set_flag_on_attachment(self,
+                               attachment_id,
+                               flag_name,
+                               flag_value,
+                               comment_text=None,
+                               additional_comment_text=None):
+        log("MOCK setting flag '%s' to '%s' on attachment '%s' with comment '%s' and additional comment '%s'" % (
+            flag_name, flag_value, attachment_id, comment_text, additional_comment_text))
+
+    def post_comment_to_bug(self, bug_id, comment_text, cc=None):
+        log("MOCK bug comment: bug_id=%s, cc=%s\n--- Begin comment ---\n%s\n--- End comment ---\n" % (
+            bug_id, cc, comment_text))
+
+    def add_attachment_to_bug(self, bug_id, file_or_string, description, filename=None, comment_text=None, mimetype=None):
+        log("MOCK add_attachment_to_bug: bug_id=%s, description=%s filename=%s mimetype=%s" %
+            (bug_id, description, filename, mimetype))
+        if comment_text:
+            log("-- Begin comment --")
+            log(comment_text)
+            log("-- End comment --")
+
+    def add_patch_to_bug(self,
+                         bug_id,
+                         diff,
+                         description,
+                         comment_text=None,
+                         mark_for_review=False,
+                         mark_for_commit_queue=False,
+                         mark_for_landing=False):
+        log("MOCK add_patch_to_bug: bug_id=%s, description=%s, mark_for_review=%s, mark_for_commit_queue=%s, mark_for_landing=%s" %
+            (bug_id, description, mark_for_review, mark_for_commit_queue, mark_for_landing))
+        if comment_text:
+            log("-- Begin comment --")
+            log(comment_text)
+            log("-- End comment --")
+
+    def add_cc_to_bug(self, bug_id, ccs):
+        pass
+
+    def obsolete_attachment(self, attachment_id, message=None):
+        pass
+
+    def reopen_bug(self, bug_id, message):
+        log("MOCK reopen_bug %s with comment '%s'" % (bug_id, message))
+
+    def close_bug_as_fixed(self, bug_id, message):
+        pass
+
+    def clear_attachment_flags(self, attachment_id, message):
+        pass
diff --git a/Tools/Scripts/webkitpy/common/net/bugzilla/bugzilla_unittest.py b/Tools/Scripts/webkitpy/common/net/bugzilla/bugzilla_unittest.py
new file mode 100644
index 0000000..6108b5e
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/net/bugzilla/bugzilla_unittest.py
@@ -0,0 +1,550 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#    * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+import datetime
+import StringIO
+
+from .bugzilla import Bugzilla, BugzillaQueries, EditUsersParser
+
+from webkitpy.common.config import urls
+from webkitpy.common.config.committers import Reviewer, Committer, Contributor, CommitterList
+from webkitpy.common.system.outputcapture import OutputCapture
+from webkitpy.common.net.web_mock import MockBrowser
+from webkitpy.thirdparty.mock import Mock
+from webkitpy.thirdparty.BeautifulSoup import BeautifulSoup
+
+
+class BugzillaTest(unittest.TestCase):
+    _example_attachment = '''
+        <attachment
+          isobsolete="1"
+          ispatch="1"
+          isprivate="0"
+        >
+        <attachid>33721</attachid>
+        <date>2009-07-29 10:23 PDT</date>
+        <desc>Fixed whitespace issue</desc>
+        <filename>patch</filename>
+        <type>text/plain</type>
+        <size>9719</size>
+        <attacher>christian.plesner.hansen@gmail.com</attacher>
+          <flag name="review"
+                id="17931"
+                status="+"
+                setter="one@test.com"
+           />
+          <flag name="commit-queue"
+                id="17932"
+                status="+"
+                setter="two@test.com"
+           />
+        </attachment>
+'''
+    _expected_example_attachment_parsing = {
+        'attach_date': datetime.datetime(2009, 07, 29, 10, 23),
+        'bug_id' : 100,
+        'is_obsolete' : True,
+        'is_patch' : True,
+        'id' : 33721,
+        'url' : "https://bugs.webkit.org/attachment.cgi?id=33721",
+        'name' : "Fixed whitespace issue",
+        'type' : "text/plain",
+        'review' : '+',
+        'reviewer_email' : 'one@test.com',
+        'commit-queue' : '+',
+        'committer_email' : 'two@test.com',
+        'attacher_email' : 'christian.plesner.hansen@gmail.com',
+    }
+
+    def test_url_creation(self):
+        # FIXME: These would be all better as doctests
+        bugs = Bugzilla()
+        self.assertEquals(None, bugs.bug_url_for_bug_id(None))
+        self.assertEquals(None, bugs.short_bug_url_for_bug_id(None))
+        self.assertEquals(None, bugs.attachment_url_for_id(None))
+
+    def test_parse_bug_id(self):
+        # Test that we can parse the urls we produce.
+        bugs = Bugzilla()
+        self.assertEquals(12345, urls.parse_bug_id(bugs.short_bug_url_for_bug_id(12345)))
+        self.assertEquals(12345, urls.parse_bug_id(bugs.bug_url_for_bug_id(12345)))
+        self.assertEquals(12345, urls.parse_bug_id(bugs.bug_url_for_bug_id(12345, xml=True)))
+
+    _bug_xml = """
+    <bug>
+          <bug_id>32585</bug_id>
+          <creation_ts>2009-12-15 15:17 PST</creation_ts>
+          <short_desc>bug to test webkit-patch&apos;s and commit-queue&apos;s failures</short_desc>
+          <delta_ts>2009-12-27 21:04:50 PST</delta_ts>
+          <reporter_accessible>1</reporter_accessible>
+          <cclist_accessible>1</cclist_accessible>
+          <classification_id>1</classification_id>
+          <classification>Unclassified</classification>
+          <product>WebKit</product>
+          <component>Tools / Tests</component>
+          <version>528+ (Nightly build)</version>
+          <rep_platform>PC</rep_platform>
+          <op_sys>Mac OS X 10.5</op_sys>
+          <bug_status>NEW</bug_status>
+          <priority>P2</priority>
+          <bug_severity>Normal</bug_severity>
+          <target_milestone>---</target_milestone>
+          <everconfirmed>1</everconfirmed>
+          <reporter name="Eric Seidel">eric@webkit.org</reporter>
+          <assigned_to name="Nobody">webkit-unassigned@lists.webkit.org</assigned_to>
+          <cc>foo@bar.com</cc>
+    <cc>example@example.com</cc>
+          <long_desc isprivate="0">
+            <who name="Eric Seidel">eric@webkit.org</who>
+            <bug_when>2009-12-15 15:17:28 PST</bug_when>
+            <thetext>bug to test webkit-patch and commit-queue failures
+
+Ignore this bug.  Just for testing failure modes of webkit-patch and the commit-queue.</thetext>
+          </long_desc>
+          <attachment
+              isobsolete="0"
+              ispatch="1"
+              isprivate="0"
+          >
+            <attachid>45548</attachid>
+            <date>2009-12-27 23:51 PST</date>
+            <desc>Patch</desc>
+            <filename>bug-32585-20091228005112.patch</filename>
+            <type>text/plain</type>
+            <size>10882</size>
+            <attacher>mjs@apple.com</attacher>
+
+              <token>1261988248-dc51409e9c421a4358f365fa8bec8357</token>
+              <data encoding="base64">SW5kZXg6IFdlYktpdC9tYWMvQ2hhbmdlTG9nCj09PT09PT09PT09PT09PT09PT09PT09PT09PT09
+removed-because-it-was-really-long
+ZEZpbmlzaExvYWRXaXRoUmVhc29uOnJlYXNvbl07Cit9CisKIEBlbmQKIAogI2VuZGlmCg==
+</data>
+
+            <flag name="review"
+                id="27602"
+                status="?"
+                setter="mjs@apple.com"
+            />
+        </attachment>
+    </bug>
+"""
+
+    _single_bug_xml = """
+<?xml version="1.0" encoding="UTF-8" standalone="yes" ?>
+<!DOCTYPE bugzilla SYSTEM "https://bugs.webkit.org/bugzilla.dtd">
+<bugzilla version="3.2.3"
+          urlbase="https://bugs.webkit.org/"
+          maintainer="admin@webkit.org"
+          exporter="eric@webkit.org"
+>
+%s
+</bugzilla>
+""" % _bug_xml
+
+    _expected_example_bug_parsing = {
+        "id" : 32585,
+        "title" : u"bug to test webkit-patch's and commit-queue's failures",
+        "cc_emails" : ["foo@bar.com", "example@example.com"],
+        "reporter_email" : "eric@webkit.org",
+        "assigned_to_email" : "webkit-unassigned@lists.webkit.org",
+        "bug_status": "NEW",
+        "attachments" : [{
+            "attach_date": datetime.datetime(2009, 12, 27, 23, 51),
+            'name': u'Patch',
+            'url' : "https://bugs.webkit.org/attachment.cgi?id=45548",
+            'is_obsolete': False,
+            'review': '?',
+            'is_patch': True,
+            'attacher_email': 'mjs@apple.com',
+            'bug_id': 32585,
+            'type': 'text/plain',
+            'id': 45548
+        }],
+        "comments" : [{
+                'comment_date': datetime.datetime(2009, 12, 15, 15, 17, 28),
+                'comment_email': 'eric@webkit.org',
+                'text': """bug to test webkit-patch and commit-queue failures
+
+Ignore this bug.  Just for testing failure modes of webkit-patch and the commit-queue.""",
+        }]
+    }
+
+    # FIXME: This should move to a central location and be shared by more unit tests.
+    def _assert_dictionaries_equal(self, actual, expected):
+        # Make sure we aren't parsing more or less than we expect
+        self.assertEquals(sorted(actual.keys()), sorted(expected.keys()))
+
+        for key, expected_value in expected.items():
+            self.assertEquals(actual[key], expected_value, ("Failure for key: %s: Actual='%s' Expected='%s'" % (key, actual[key], expected_value)))
+
+    def test_parse_bug_dictionary_from_xml(self):
+        bug = Bugzilla()._parse_bug_dictionary_from_xml(self._single_bug_xml)
+        self._assert_dictionaries_equal(bug, self._expected_example_bug_parsing)
+
+    _sample_multi_bug_xml = """
+<bugzilla version="3.2.3" urlbase="https://bugs.webkit.org/" maintainer="admin@webkit.org" exporter="eric@webkit.org">
+    %s
+    %s
+</bugzilla>
+""" % (_bug_xml, _bug_xml)
+
+    def test_parse_bugs_from_xml(self):
+        bugzilla = Bugzilla()
+        bugs = bugzilla._parse_bugs_from_xml(self._sample_multi_bug_xml)
+        self.assertEquals(len(bugs), 2)
+        self.assertEquals(bugs[0].id(), self._expected_example_bug_parsing['id'])
+        bugs = bugzilla._parse_bugs_from_xml("")
+        self.assertEquals(len(bugs), 0)
+
+    # This could be combined into test_bug_parsing later if desired.
+    def test_attachment_parsing(self):
+        bugzilla = Bugzilla()
+        soup = BeautifulSoup(self._example_attachment)
+        attachment_element = soup.find("attachment")
+        attachment = bugzilla._parse_attachment_element(attachment_element, self._expected_example_attachment_parsing['bug_id'])
+        self.assertTrue(attachment)
+        self._assert_dictionaries_equal(attachment, self._expected_example_attachment_parsing)
+
+    _sample_attachment_detail_page = """
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
+                      "http://www.w3.org/TR/html4/loose.dtd">
+<html>
+  <head>
+    <title>
+  Attachment 41073 Details for Bug 27314</title>
+<link rel="Top" href="https://bugs.webkit.org/">
+    <link rel="Up" href="show_bug.cgi?id=27314">
+"""
+
+    def test_attachment_detail_bug_parsing(self):
+        bugzilla = Bugzilla()
+        self.assertEquals(27314, bugzilla._parse_bug_id_from_attachment_page(self._sample_attachment_detail_page))
+
+    def test_add_cc_to_bug(self):
+        bugzilla = Bugzilla()
+        bugzilla.browser = MockBrowser()
+        bugzilla.authenticate = lambda: None
+        expected_stderr = "Adding ['adam@example.com'] to the CC list for bug 42\n"
+        OutputCapture().assert_outputs(self, bugzilla.add_cc_to_bug, [42, ["adam@example.com"]], expected_stderr=expected_stderr)
+
+    def _mock_control_item(self, name):
+        mock_item = Mock()
+        mock_item.name = name
+        return mock_item
+
+    def _mock_find_control(self, item_names=[], selected_index=0):
+        mock_control = Mock()
+        mock_control.items = [self._mock_control_item(name) for name in item_names]
+        mock_control.value = [item_names[selected_index]] if item_names else None
+        return lambda name, type: mock_control
+
+    def _assert_reopen(self, item_names=None, selected_index=None, extra_stderr=None):
+        bugzilla = Bugzilla()
+        bugzilla.browser = MockBrowser()
+        bugzilla.authenticate = lambda: None
+
+        mock_find_control = self._mock_find_control(item_names, selected_index)
+        bugzilla.browser.find_control = mock_find_control
+        expected_stderr = "Re-opening bug 42\n['comment']\n"
+        if extra_stderr:
+            expected_stderr += extra_stderr
+        OutputCapture().assert_outputs(self, bugzilla.reopen_bug, [42, ["comment"]], expected_stderr=expected_stderr)
+
+    def test_reopen_bug(self):
+        self._assert_reopen(item_names=["REOPENED", "RESOLVED", "CLOSED"], selected_index=1)
+        self._assert_reopen(item_names=["UNCONFIRMED", "RESOLVED", "CLOSED"], selected_index=1)
+        extra_stderr = "Did not reopen bug 42, it appears to already be open with status ['NEW'].\n"
+        self._assert_reopen(item_names=["NEW", "RESOLVED"], selected_index=0, extra_stderr=extra_stderr)
+
+    def test_file_object_for_upload(self):
+        bugzilla = Bugzilla()
+        file_object = StringIO.StringIO()
+        unicode_tor = u"WebKit \u2661 Tor Arne Vestb\u00F8!"
+        utf8_tor = unicode_tor.encode("utf-8")
+        self.assertEqual(bugzilla._file_object_for_upload(file_object), file_object)
+        self.assertEqual(bugzilla._file_object_for_upload(utf8_tor).read(), utf8_tor)
+        self.assertEqual(bugzilla._file_object_for_upload(unicode_tor).read(), utf8_tor)
+
+    def test_filename_for_upload(self):
+        bugzilla = Bugzilla()
+        mock_file = Mock()
+        mock_file.name = "foo"
+        self.assertEqual(bugzilla._filename_for_upload(mock_file, 1234), 'foo')
+        mock_timestamp = lambda: "now"
+        filename = bugzilla._filename_for_upload(StringIO.StringIO(), 1234, extension="patch", timestamp=mock_timestamp)
+        self.assertEqual(filename, "bug-1234-now.patch")
+
+    def test_commit_queue_flag(self):
+        bugzilla = Bugzilla()
+
+        bugzilla.committers = CommitterList(reviewers=[Reviewer("WebKit Reviewer", "reviewer@webkit.org")],
+            committers=[Committer("WebKit Committer", "committer@webkit.org")],
+            contributors=[Contributor("WebKit Contributor", "contributor@webkit.org")],
+            watchers=[])
+
+        def assert_commit_queue_flag(mark_for_landing, mark_for_commit_queue, expected, username=None):
+            bugzilla.username = username
+            capture = OutputCapture()
+            capture.capture_output()
+            try:
+                self.assertEqual(bugzilla._commit_queue_flag(mark_for_landing=mark_for_landing, mark_for_commit_queue=mark_for_commit_queue), expected)
+            finally:
+                capture.restore_output()
+
+        assert_commit_queue_flag(mark_for_landing=False, mark_for_commit_queue=False, expected='X', username='unknown@webkit.org')
+        assert_commit_queue_flag(mark_for_landing=False, mark_for_commit_queue=True, expected='?', username='unknown@webkit.org')
+        assert_commit_queue_flag(mark_for_landing=False, mark_for_commit_queue=True, expected='?', username='unknown@webkit.org')
+        assert_commit_queue_flag(mark_for_landing=True, mark_for_commit_queue=True, expected='?', username='unknown@webkit.org')
+
+        assert_commit_queue_flag(mark_for_landing=False, mark_for_commit_queue=False, expected='X', username='contributor@webkit.org')
+        assert_commit_queue_flag(mark_for_landing=False, mark_for_commit_queue=True, expected='?', username='contributor@webkit.org')
+        assert_commit_queue_flag(mark_for_landing=True, mark_for_commit_queue=False, expected='?', username='contributor@webkit.org')
+        assert_commit_queue_flag(mark_for_landing=True, mark_for_commit_queue=True, expected='?', username='contributor@webkit.org')
+
+        assert_commit_queue_flag(mark_for_landing=False, mark_for_commit_queue=False, expected='X', username='committer@webkit.org')
+        assert_commit_queue_flag(mark_for_landing=False, mark_for_commit_queue=True, expected='?', username='committer@webkit.org')
+        assert_commit_queue_flag(mark_for_landing=True, mark_for_commit_queue=False, expected='+', username='committer@webkit.org')
+        assert_commit_queue_flag(mark_for_landing=True, mark_for_commit_queue=True, expected='+', username='committer@webkit.org')
+
+        assert_commit_queue_flag(mark_for_landing=False, mark_for_commit_queue=False, expected='X', username='reviewer@webkit.org')
+        assert_commit_queue_flag(mark_for_landing=False, mark_for_commit_queue=True, expected='?', username='reviewer@webkit.org')
+        assert_commit_queue_flag(mark_for_landing=True, mark_for_commit_queue=False, expected='+', username='reviewer@webkit.org')
+        assert_commit_queue_flag(mark_for_landing=True, mark_for_commit_queue=True, expected='+', username='reviewer@webkit.org')
+
+
+class BugzillaQueriesTest(unittest.TestCase):
+    _sample_request_page = """
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
+                      "http://www.w3.org/TR/html4/loose.dtd">
+<html>
+  <head>
+    <title>Request Queue</title>
+  </head>
+<body>
+
+<h3>Flag: review</h3>
+  <table class="requests" cellspacing="0" cellpadding="4" border="1">
+    <tr>
+        <th>Requester</th>
+        <th>Requestee</th>
+        <th>Bug</th>
+        <th>Attachment</th>
+        <th>Created</th>
+    </tr>
+    <tr>
+        <td>Shinichiro Hamaji &lt;hamaji&#64;chromium.org&gt;</td>
+        <td></td>
+        <td><a href="show_bug.cgi?id=30015">30015: text-transform:capitalize is failing in CSS2.1 test suite</a></td>
+        <td><a href="attachment.cgi?id=40511&amp;action=review">
+40511: Patch v0</a></td>
+        <td>2009-10-02 04:58 PST</td>
+    </tr>
+    <tr>
+        <td>Zan Dobersek &lt;zandobersek&#64;gmail.com&gt;</td>
+        <td></td>
+        <td><a href="show_bug.cgi?id=26304">26304: [GTK] Add controls for playing html5 video.</a></td>
+        <td><a href="attachment.cgi?id=40722&amp;action=review">
+40722: Media controls, the simple approach</a></td>
+        <td>2009-10-06 09:13 PST</td>
+    </tr>
+    <tr>
+        <td>Zan Dobersek &lt;zandobersek&#64;gmail.com&gt;</td>
+        <td></td>
+        <td><a href="show_bug.cgi?id=26304">26304: [GTK] Add controls for playing html5 video.</a></td>
+        <td><a href="attachment.cgi?id=40723&amp;action=review">
+40723: Adjust the media slider thumb size</a></td>
+        <td>2009-10-06 09:15 PST</td>
+    </tr>
+  </table>
+</body>
+</html>
+"""
+    _sample_quip_page = u"""
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
+                      "http://www.w3.org/TR/html4/loose.dtd">
+<html>
+  <head>
+    <title>Bugzilla Quip System</title>
+  </head>
+  <body>
+    <h2>
+
+      Existing quips:
+    </h2>
+    <ul>
+        <li>Everything should be made as simple as possible, but not simpler. - Albert Einstein</li>
+        <li>Good artists copy. Great artists steal. - Pablo Picasso</li>
+        <li>\u00e7gua mole em pedra dura, tanto bate at\u008e que fura.</li>
+
+    </ul>
+  </body>
+</html>
+"""
+
+    def _assert_result_count(self, queries, html, count):
+        self.assertEquals(queries._parse_result_count(html), count)
+
+    def test_parse_result_count(self):
+        queries = BugzillaQueries(None)
+        # Pages with results, always list the count at least twice.
+        self._assert_result_count(queries, '<span class="bz_result_count">314 bugs found.</span><span class="bz_result_count">314 bugs found.</span>', 314)
+        self._assert_result_count(queries, '<span class="bz_result_count">Zarro Boogs found.</span>', 0)
+        self._assert_result_count(queries, '<span class="bz_result_count">\n \nOne bug found.</span>', 1)
+        self.assertRaises(Exception, queries._parse_result_count, ['Invalid'])
+
+    def test_request_page_parsing(self):
+        queries = BugzillaQueries(None)
+        self.assertEquals([40511, 40722, 40723], queries._parse_attachment_ids_request_query(self._sample_request_page))
+
+    def test_quip_page_parsing(self):
+        queries = BugzillaQueries(None)
+        expected_quips = ["Everything should be made as simple as possible, but not simpler. - Albert Einstein", "Good artists copy. Great artists steal. - Pablo Picasso", u"\u00e7gua mole em pedra dura, tanto bate at\u008e que fura."]
+        self.assertEquals(expected_quips, queries._parse_quips(self._sample_quip_page))
+
+    def test_load_query(self):
+        queries = BugzillaQueries(Mock())
+        queries._load_query("request.cgi?action=queue&type=review&group=type")
+
+
+class EditUsersParserTest(unittest.TestCase):
+    _example_user_results = """
+        <div id="bugzilla-body">
+        <p>1 user found.</p>
+        <table id="admin_table" border="1" cellpadding="4" cellspacing="0">
+          <tr bgcolor="#6666FF">
+              <th align="left">Edit user...
+              </th>
+              <th align="left">Real name
+              </th>
+              <th align="left">Account History
+              </th>
+          </tr>
+          <tr>
+              <td >
+                  <a href="editusers.cgi?action=edit&amp;userid=1234&amp;matchvalue=login_name&amp;groupid=&amp;grouprestrict=&amp;matchtype=substr&amp;matchstr=abarth%40webkit.org">
+                abarth&#64;webkit.org
+                  </a>
+              </td>
+              <td >
+                Adam Barth
+              </td>
+              <td >
+                  <a href="editusers.cgi?action=activity&amp;userid=1234&amp;matchvalue=login_name&amp;groupid=&amp;grouprestrict=&amp;matchtype=substr&amp;matchstr=abarth%40webkit.org">
+                View
+                  </a>
+              </td>
+          </tr>
+        </table>
+    """
+
+    _example_empty_user_results = """
+    <div id="bugzilla-body">
+    <p>0 users found.</p>
+    <table id="admin_table" border="1" cellpadding="4" cellspacing="0">
+      <tr bgcolor="#6666FF">
+          <th align="left">Edit user...
+          </th>
+          <th align="left">Real name
+          </th>
+          <th align="left">Account History
+          </th>
+      </tr>
+      <tr><td colspan="3" align="center"><i>&lt;none&gt;</i></td></tr>
+    </table>
+    """
+
+    def _assert_login_userid_pairs(self, results_page, expected_logins):
+        parser = EditUsersParser()
+        logins = parser.login_userid_pairs_from_edit_user_results(results_page)
+        self.assertEquals(logins, expected_logins)
+
+    def test_logins_from_editusers_results(self):
+        self._assert_login_userid_pairs(self._example_user_results, [("abarth@webkit.org", 1234)])
+        self._assert_login_userid_pairs(self._example_empty_user_results, [])
+
+    _example_user_page = """<table class="main"><tr>
+  <th><label for="login">Login name:</label></th>
+  <td>eric&#64;webkit.org
+  </td>
+</tr>
+<tr>
+  <th><label for="name">Real name:</label></th>
+  <td>Eric Seidel
+  </td>
+</tr>
+    <tr>
+      <th>Group access:</th>
+      <td>
+        <table class="groups">
+          <tr>
+          </tr>
+          <tr>
+            <th colspan="2">User is a member of these groups</th>
+          </tr>
+            <tr class="direct">
+              <td class="checkbox"><input type="checkbox"
+                           id="group_7"
+                           name="group_7"
+                           value="1" checked="checked" /></td>
+              <td class="groupname">
+                <label for="group_7">
+                  <strong>canconfirm:</strong>
+                  Can confirm a bug.
+                </label>
+              </td>
+            </tr>
+            <tr class="direct">
+              <td class="checkbox"><input type="checkbox"
+                           id="group_6"
+                           name="group_6"
+                           value="1" /></td>
+              <td class="groupname">
+                <label for="group_6">
+                  <strong>editbugs:</strong>
+                  Can edit all aspects of any bug.
+                /label>
+              </td>
+            </tr>
+        </table>
+      </td>
+    </tr>
+
+  <tr>
+    <th>Product responsibilities:</th>
+    <td>
+        <em>none</em>
+    </td>
+  </tr>
+</table>"""
+
+    def test_user_dict_from_edit_user_page(self):
+        parser = EditUsersParser()
+        user_dict = parser.user_dict_from_edit_user_page(self._example_user_page)
+        expected_user_dict = {u'login': u'eric@webkit.org', u'groups': set(['canconfirm']), u'name': u'Eric Seidel'}
+        self.assertEqual(expected_user_dict, user_dict)
diff --git a/Tools/Scripts/webkitpy/common/net/buildbot/__init__.py b/Tools/Scripts/webkitpy/common/net/buildbot/__init__.py
new file mode 100644
index 0000000..631ef6b
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/net/buildbot/__init__.py
@@ -0,0 +1,5 @@
+# Required for Python to search this directory for module files
+
+# We only export public API here.
+# It's unclear if Builder and Build need to be public.
+from .buildbot import BuildBot, Builder, Build
diff --git a/Tools/Scripts/webkitpy/common/net/buildbot/buildbot.py b/Tools/Scripts/webkitpy/common/net/buildbot/buildbot.py
new file mode 100644
index 0000000..adb5a3d
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/net/buildbot/buildbot.py
@@ -0,0 +1,492 @@
+# Copyright (c) 2009, Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import json
+import operator
+import re
+import urllib
+import urllib2
+
+import webkitpy.common.config.urls as config_urls
+from webkitpy.common.memoized import memoized
+from webkitpy.common.net.failuremap import FailureMap
+from webkitpy.common.net.layouttestresults import LayoutTestResults
+from webkitpy.common.net.networktransaction import NetworkTransaction
+from webkitpy.common.net.regressionwindow import RegressionWindow
+from webkitpy.common.system.logutils import get_logger
+from webkitpy.thirdparty.BeautifulSoup import BeautifulSoup
+
+
+_log = get_logger(__file__)
+
+
+class Builder(object):
+    def __init__(self, name, buildbot):
+        self._name = name
+        self._buildbot = buildbot
+        self._builds_cache = {}
+        self._revision_to_build_number = None
+        from webkitpy.thirdparty.autoinstalled.mechanize import Browser
+        self._browser = Browser()
+        self._browser.set_handle_robots(False) # The builder pages are excluded by robots.txt
+
+    def name(self):
+        return self._name
+
+    def results_url(self):
+        return "%s/results/%s" % (self._buildbot.buildbot_url, self.url_encoded_name())
+
+    # In addition to per-build results, the build.chromium.org builders also
+    # keep a directory that accumulates test results over many runs.
+    def accumulated_results_url(self):
+        return None
+
+    def latest_layout_test_results_url(self):
+        return self.accumulated_results_url() or self.latest_cached_build().results_url();
+
+    @memoized
+    def latest_layout_test_results(self):
+        return self.fetch_layout_test_results(self.latest_layout_test_results_url())
+
+    def _fetch_file_from_results(self, results_url, file_name):
+        # It seems this can return None if the url redirects and then returns 404.
+        result = urllib2.urlopen("%s/%s" % (results_url, file_name))
+        if not result:
+            return None
+        # urlopen returns a file-like object which sometimes works fine with str()
+        # but sometimes is a addinfourl object.  In either case calling read() is correct.
+        return result.read()
+
+    def fetch_layout_test_results(self, results_url):
+        # FIXME: This should cache that the result was a 404 and stop hitting the network.
+        results_file = NetworkTransaction(convert_404_to_None=True).run(lambda: self._fetch_file_from_results(results_url, "full_results.json"))
+        if not results_file:
+            results_file = NetworkTransaction(convert_404_to_None=True).run(lambda: self._fetch_file_from_results(results_url, "results.html"))
+
+        # results_from_string accepts either ORWT html or NRWT json.
+        return LayoutTestResults.results_from_string(results_file)
+
+    def url_encoded_name(self):
+        return urllib.quote(self._name)
+
+    def url(self):
+        return "%s/builders/%s" % (self._buildbot.buildbot_url, self.url_encoded_name())
+
+    # This provides a single place to mock
+    def _fetch_build(self, build_number):
+        build_dictionary = self._buildbot._fetch_build_dictionary(self, build_number)
+        if not build_dictionary:
+            return None
+        revision_string = build_dictionary['sourceStamp']['revision']
+        return Build(self,
+            build_number=int(build_dictionary['number']),
+            # 'revision' may be None if a trunk build was started by the force-build button on the web page.
+            revision=(int(revision_string) if revision_string else None),
+            # Buildbot uses any nubmer other than 0 to mean fail.  Since we fetch with
+            # filter=1, passing builds may contain no 'results' value.
+            is_green=(not build_dictionary.get('results')),
+        )
+
+    def build(self, build_number):
+        if not build_number:
+            return None
+        cached_build = self._builds_cache.get(build_number)
+        if cached_build:
+            return cached_build
+
+        build = self._fetch_build(build_number)
+        self._builds_cache[build_number] = build
+        return build
+
+    def latest_cached_build(self):
+        revision_build_pairs = self.revision_build_pairs_with_results()
+        revision_build_pairs.sort(key=lambda i: i[1])
+        latest_build_number = revision_build_pairs[-1][1]
+        return self.build(latest_build_number)
+
+    def force_build(self, username="webkit-patch", comments=None):
+        def predicate(form):
+            try:
+                return form.find_control("username")
+            except Exception, e:
+                return False
+        self._browser.open(self.url())
+        self._browser.select_form(predicate=predicate)
+        self._browser["username"] = username
+        if comments:
+            self._browser["comments"] = comments
+        return self._browser.submit()
+
+    file_name_regexp = re.compile(r"r(?P<revision>\d+) \((?P<build_number>\d+)\)")
+    def _revision_and_build_for_filename(self, filename):
+        # Example: "r47483 (1)/" or "r47483 (1).zip"
+        match = self.file_name_regexp.match(filename)
+        if not match:
+            return None
+        return (int(match.group("revision")), int(match.group("build_number")))
+
+    def _fetch_revision_to_build_map(self):
+        # All _fetch requests go through _buildbot for easier mocking
+        # FIXME: This should use NetworkTransaction's 404 handling instead.
+        try:
+            # FIXME: This method is horribly slow due to the huge network load.
+            # FIXME: This is a poor way to do revision -> build mapping.
+            # Better would be to ask buildbot through some sort of API.
+            print "Loading revision/build list from %s." % self.results_url()
+            print "This may take a while..."
+            result_files = self._buildbot._fetch_twisted_directory_listing(self.results_url())
+        except urllib2.HTTPError, error:
+            if error.code != 404:
+                raise
+            _log.debug("Revision/build list failed to load.")
+            result_files = []
+        return dict(self._file_info_list_to_revision_to_build_list(result_files))
+
+    def _file_info_list_to_revision_to_build_list(self, file_info_list):
+        # This assumes there was only one build per revision, which is false but we don't care for now.
+        revisions_and_builds = []
+        for file_info in file_info_list:
+            revision_and_build = self._revision_and_build_for_filename(file_info["filename"])
+            if revision_and_build:
+                revisions_and_builds.append(revision_and_build)
+        return revisions_and_builds
+
+    def _revision_to_build_map(self):
+        if not self._revision_to_build_number:
+            self._revision_to_build_number = self._fetch_revision_to_build_map()
+        return self._revision_to_build_number
+
+    def revision_build_pairs_with_results(self):
+        return self._revision_to_build_map().items()
+
+    # This assumes there can be only one build per revision, which is false, but we don't care for now.
+    def build_for_revision(self, revision, allow_failed_lookups=False):
+        # NOTE: This lookup will fail if that exact revision was never built.
+        build_number = self._revision_to_build_map().get(int(revision))
+        if not build_number:
+            return None
+        build = self.build(build_number)
+        if not build and allow_failed_lookups:
+            # Builds for old revisions with fail to lookup via buildbot's json api.
+            build = Build(self,
+                build_number=build_number,
+                revision=revision,
+                is_green=False,
+            )
+        return build
+
+    def find_regression_window(self, red_build, look_back_limit=30):
+        if not red_build or red_build.is_green():
+            return RegressionWindow(None, None)
+        common_failures = None
+        current_build = red_build
+        build_after_current_build = None
+        look_back_count = 0
+        while current_build:
+            if current_build.is_green():
+                # current_build can't possibly have any failures in common
+                # with red_build because it's green.
+                break
+            results = current_build.layout_test_results()
+            # We treat a lack of results as if all the test failed.
+            # This occurs, for example, when we can't compile at all.
+            if results:
+                failures = set(results.failing_tests())
+                if common_failures == None:
+                    common_failures = failures
+                else:
+                    common_failures = common_failures.intersection(failures)
+                    if not common_failures:
+                        # current_build doesn't have any failures in common with
+                        # the red build we're worried about.  We assume that any
+                        # failures in current_build were due to flakiness.
+                        break
+            look_back_count += 1
+            if look_back_count > look_back_limit:
+                return RegressionWindow(None, current_build, failing_tests=common_failures)
+            build_after_current_build = current_build
+            current_build = current_build.previous_build()
+        # We must iterate at least once because red_build is red.
+        assert(build_after_current_build)
+        # Current build must either be green or have no failures in common
+        # with red build, so we've found our failure transition.
+        return RegressionWindow(current_build, build_after_current_build, failing_tests=common_failures)
+
+    def find_blameworthy_regression_window(self, red_build_number, look_back_limit=30, avoid_flakey_tests=True):
+        red_build = self.build(red_build_number)
+        regression_window = self.find_regression_window(red_build, look_back_limit)
+        if not regression_window.build_before_failure():
+            return None  # We ran off the limit of our search
+        # If avoid_flakey_tests, require at least 2 bad builds before we
+        # suspect a real failure transition.
+        if avoid_flakey_tests and regression_window.failing_build() == red_build:
+            return None
+        return regression_window
+
+
+class Build(object):
+    def __init__(self, builder, build_number, revision, is_green):
+        self._builder = builder
+        self._number = build_number
+        self._revision = revision
+        self._is_green = is_green
+
+    @staticmethod
+    def build_url(builder, build_number):
+        return "%s/builds/%s" % (builder.url(), build_number)
+
+    def url(self):
+        return self.build_url(self.builder(), self._number)
+
+    def results_url(self):
+        results_directory = "r%s (%s)" % (self.revision(), self._number)
+        return "%s/%s" % (self._builder.results_url(), urllib.quote(results_directory))
+
+    def results_zip_url(self):
+        return "%s.zip" % self.results_url()
+
+    @memoized
+    def layout_test_results(self):
+        return self._builder.fetch_layout_test_results(self.results_url())
+
+    def builder(self):
+        return self._builder
+
+    def revision(self):
+        return self._revision
+
+    def is_green(self):
+        return self._is_green
+
+    def previous_build(self):
+        # previous_build() allows callers to avoid assuming build numbers are sequential.
+        # They may not be sequential across all master changes, or when non-trunk builds are made.
+        return self._builder.build(self._number - 1)
+
+
+class BuildBot(object):
+    _builder_factory = Builder
+    _default_url = config_urls.buildbot_url
+
+    def __init__(self, url=None):
+        self.buildbot_url = url if url else self._default_url
+        self._builder_by_name = {}
+
+    def _parse_last_build_cell(self, builder, cell):
+        status_link = cell.find('a')
+        if status_link:
+            # Will be either a revision number or a build number
+            revision_string = status_link.string
+            # If revision_string has non-digits assume it's not a revision number.
+            builder['built_revision'] = int(revision_string) \
+                                        if not re.match('\D', revision_string) \
+                                        else None
+
+            # FIXME: We treat slave lost as green even though it is not to
+            # work around the Qts bot being on a broken internet connection.
+            # The real fix is https://bugs.webkit.org/show_bug.cgi?id=37099
+            builder['is_green'] = not re.search('fail', cell.renderContents()) or \
+                                  not not re.search('lost', cell.renderContents())
+
+            status_link_regexp = r"builders/(?P<builder_name>.*)/builds/(?P<build_number>\d+)"
+            link_match = re.match(status_link_regexp, status_link['href'])
+            builder['build_number'] = int(link_match.group("build_number"))
+        else:
+            # We failed to find a link in the first cell, just give up.  This
+            # can happen if a builder is just-added, the first cell will just
+            # be "no build"
+            # Other parts of the code depend on is_green being present.
+            builder['is_green'] = False
+            builder['built_revision'] = None
+            builder['build_number'] = None
+
+    def _parse_current_build_cell(self, builder, cell):
+        activity_lines = cell.renderContents().split("<br />")
+        builder["activity"] = activity_lines[0] # normally "building" or "idle"
+        # The middle lines document how long left for any current builds.
+        match = re.match("(?P<pending_builds>\d) pending", activity_lines[-1])
+        builder["pending_builds"] = int(match.group("pending_builds")) if match else 0
+
+    def _parse_builder_status_from_row(self, status_row):
+        status_cells = status_row.findAll('td')
+        builder = {}
+
+        # First cell is the name
+        name_link = status_cells[0].find('a')
+        builder["name"] = unicode(name_link.string)
+
+        self._parse_last_build_cell(builder, status_cells[1])
+        self._parse_current_build_cell(builder, status_cells[2])
+        return builder
+
+    def _matches_regexps(self, builder_name, name_regexps):
+        for name_regexp in name_regexps:
+            if re.match(name_regexp, builder_name):
+                return True
+        return False
+
+    # FIXME: This method needs to die, but is used by a unit test at the moment.
+    def _builder_statuses_with_names_matching_regexps(self, builder_statuses, name_regexps):
+        return [builder for builder in builder_statuses if self._matches_regexps(builder["name"], name_regexps)]
+
+    # FIXME: These _fetch methods should move to a networking class.
+    def _fetch_build_dictionary(self, builder, build_number):
+        # Note: filter=1 will remove None and {} and '', which cuts noise but can
+        # cause keys to be missing which you might otherwise expect.
+        # FIXME: The bot sends a *huge* amount of data for each request, we should
+        # find a way to reduce the response size further.
+        json_url = "%s/json/builders/%s/builds/%s?filter=1" % (self.buildbot_url, urllib.quote(builder.name()), build_number)
+        try:
+            return json.load(urllib2.urlopen(json_url))
+        except urllib2.URLError, err:
+            build_url = Build.build_url(builder, build_number)
+            _log.error("Error fetching data for %s build %s (%s, json: %s): %s" % (builder.name(), build_number, build_url, json_url, err))
+            return None
+        except ValueError, err:
+            build_url = Build.build_url(builder, build_number)
+            _log.error("Error decoding json data from %s: %s" % (build_url, err))
+            return None
+
+    def _fetch_one_box_per_builder(self):
+        build_status_url = "%s/one_box_per_builder" % self.buildbot_url
+        return urllib2.urlopen(build_status_url)
+
+    def _file_cell_text(self, file_cell):
+        """Traverses down through firstChild elements until one containing a string is found, then returns that string"""
+        element = file_cell
+        while element.string is None and element.contents:
+            element = element.contents[0]
+        return element.string
+
+    def _parse_twisted_file_row(self, file_row):
+        string_or_empty = lambda string: unicode(string) if string else u""
+        file_cells = file_row.findAll('td')
+        return {
+            "filename": string_or_empty(self._file_cell_text(file_cells[0])),
+            "size": string_or_empty(self._file_cell_text(file_cells[1])),
+            "type": string_or_empty(self._file_cell_text(file_cells[2])),
+            "encoding": string_or_empty(self._file_cell_text(file_cells[3])),
+        }
+
+    def _parse_twisted_directory_listing(self, page):
+        soup = BeautifulSoup(page)
+        # HACK: Match only table rows with a class to ignore twisted header/footer rows.
+        file_rows = soup.find('table').findAll('tr', {'class': re.compile(r'\b(?:directory|file)\b')})
+        return [self._parse_twisted_file_row(file_row) for file_row in file_rows]
+
+    # FIXME: There should be a better way to get this information directly from twisted.
+    def _fetch_twisted_directory_listing(self, url):
+        return self._parse_twisted_directory_listing(urllib2.urlopen(url))
+
+    def builders(self):
+        return [self.builder_with_name(status["name"]) for status in self.builder_statuses()]
+
+    # This method pulls from /one_box_per_builder as an efficient way to get information about
+    def builder_statuses(self):
+        soup = BeautifulSoup(self._fetch_one_box_per_builder())
+        return [self._parse_builder_status_from_row(status_row) for status_row in soup.find('table').findAll('tr')]
+
+    def builder_with_name(self, name):
+        builder = self._builder_by_name.get(name)
+        if not builder:
+            builder = self._builder_factory(name, self)
+            self._builder_by_name[name] = builder
+        return builder
+
+    def failure_map(self):
+        failure_map = FailureMap()
+        revision_to_failing_bots = {}
+        for builder_status in self.builder_statuses():
+            if builder_status["is_green"]:
+                continue
+            builder = self.builder_with_name(builder_status["name"])
+            regression_window = builder.find_blameworthy_regression_window(builder_status["build_number"])
+            if regression_window:
+                failure_map.add_regression_window(builder, regression_window)
+        return failure_map
+
+    # This makes fewer requests than calling Builder.latest_build would.  It grabs all builder
+    # statuses in one request using self.builder_statuses (fetching /one_box_per_builder instead of builder pages).
+    def _latest_builds_from_builders(self):
+        builder_statuses = self.builder_statuses()
+        return [self.builder_with_name(status["name"]).build(status["build_number"]) for status in builder_statuses]
+
+    def _build_at_or_before_revision(self, build, revision):
+        while build:
+            if build.revision() <= revision:
+                return build
+            build = build.previous_build()
+
+    def _fetch_builder_page(self, builder):
+        builder_page_url = "%s/builders/%s?numbuilds=100" % (self.buildbot_url, urllib2.quote(builder.name()))
+        return urllib2.urlopen(builder_page_url)
+
+    def _revisions_for_builder(self, builder):
+        soup = BeautifulSoup(self._fetch_builder_page(builder))
+        revisions = []
+        for status_row in soup.find('table').findAll('tr'):
+            revision_anchor = status_row.find('a')
+            table_cells = status_row.findAll('td')
+            if not table_cells or len(table_cells) < 3 or not table_cells[2].string:
+                continue
+            if revision_anchor and revision_anchor.string and re.match(r'^\d+$', revision_anchor.string):
+                revisions.append((int(revision_anchor.string), 'success' in table_cells[2].string))
+        return revisions
+
+    def _find_green_revision(self, builder_revisions):
+        revision_statuses = {}
+        for builder in builder_revisions:
+            for revision, succeeded in builder_revisions[builder]:
+                revision_statuses.setdefault(revision, set())
+                if succeeded and revision_statuses[revision] != None:
+                    revision_statuses[revision].add(builder)
+                else:
+                    revision_statuses[revision] = None
+
+        # In descending order, look for a revision X with successful builds
+        # Once we found X, check if remaining builders succeeded in the neighborhood of X.
+        revisions_in_order = sorted(revision_statuses.keys(), reverse=True)
+        for i, revision in enumerate(revisions_in_order):
+            if not revision_statuses[revision]:
+                continue
+
+            builders_succeeded_in_future = set()
+            for future_revision in sorted(revisions_in_order[:i + 1]):
+                if not revision_statuses[future_revision]:
+                    break
+                builders_succeeded_in_future = builders_succeeded_in_future.union(revision_statuses[future_revision])
+
+            builders_succeeded_in_past = set()
+            for past_revision in revisions_in_order[i:]:
+                if not revision_statuses[past_revision]:
+                    break
+                builders_succeeded_in_past = builders_succeeded_in_past.union(revision_statuses[past_revision])
+
+            if len(builders_succeeded_in_future) == len(builder_revisions) and len(builders_succeeded_in_past) == len(builder_revisions):
+                return revision
+        return None
diff --git a/Tools/Scripts/webkitpy/common/net/buildbot/buildbot_mock.py b/Tools/Scripts/webkitpy/common/net/buildbot/buildbot_mock.py
new file mode 100644
index 0000000..f8ec49b
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/net/buildbot/buildbot_mock.py
@@ -0,0 +1,109 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#    * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+class MockBuild(object):
+    def __init__(self, build_number, revision, is_green):
+        self._number = build_number
+        self._revision = revision
+        self._is_green = is_green
+
+class MockBuilder(object):
+    def __init__(self, name):
+        self._name = name
+
+    def name(self):
+        return self._name
+
+    def build(self, build_number):
+        return MockBuild(build_number=build_number, revision=1234, is_green=False)
+
+    def results_url(self):
+        return "http://example.com/builders/%s/results" % self.name()
+
+    def accumulated_results_url(self):
+        return "http://example.com/f/builders/%s/results/layout-test-results" % self.name()
+
+    def latest_layout_test_results_url(self):
+        return self.accumulated_results_url()
+
+    def force_build(self, username, comments):
+        log("MOCK: force_build: name=%s, username=%s, comments=%s" % (
+            self._name, username, comments))
+
+
+class MockFailureMap(object):
+    def __init__(self, buildbot):
+        self._buildbot = buildbot
+
+    def is_empty(self):
+        return False
+
+    def filter_out_old_failures(self, is_old_revision):
+        pass
+
+    def failing_revisions(self):
+        return [29837]
+
+    def builders_failing_for(self, revision):
+        return [self._buildbot.builder_with_name("Builder1")]
+
+    def tests_failing_for(self, revision):
+        return ["mock-test-1"]
+
+    def failing_tests(self):
+        return set(["mock-test-1"])
+
+
+class MockBuildBot(object):
+    def __init__(self):
+        self._mock_builder1_status = {
+            "name": "Builder1",
+            "is_green": True,
+            "activity": "building",
+        }
+        self._mock_builder2_status = {
+            "name": "Builder2",
+            "is_green": True,
+            "activity": "idle",
+        }
+
+    def builder_with_name(self, name):
+        return MockBuilder(name)
+
+    def builder_statuses(self):
+        return [
+            self._mock_builder1_status,
+            self._mock_builder2_status,
+        ]
+
+    def light_tree_on_fire(self):
+        self._mock_builder2_status["is_green"] = False
+
+    def failure_map(self):
+        return MockFailureMap(self)
diff --git a/Tools/Scripts/webkitpy/common/net/buildbot/buildbot_unittest.py b/Tools/Scripts/webkitpy/common/net/buildbot/buildbot_unittest.py
new file mode 100644
index 0000000..69f8648
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/net/buildbot/buildbot_unittest.py
@@ -0,0 +1,479 @@
+# Copyright (C) 2009 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#    * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from webkitpy.common.net.layouttestresults import LayoutTestResults
+from webkitpy.common.net.buildbot import BuildBot, Builder, Build
+from webkitpy.layout_tests.models import test_results
+from webkitpy.layout_tests.models import test_failures
+from webkitpy.thirdparty.BeautifulSoup import BeautifulSoup
+
+
+class BuilderTest(unittest.TestCase):
+    def _mock_test_result(self, testname):
+        return test_results.TestResult(testname, [test_failures.FailureTextMismatch()])
+
+    def _install_fetch_build(self, failure):
+        def _mock_fetch_build(build_number):
+            build = Build(
+                builder=self.builder,
+                build_number=build_number,
+                revision=build_number + 1000,
+                is_green=build_number < 4
+            )
+            results = [self._mock_test_result(testname) for testname in failure(build_number)]
+            layout_test_results = LayoutTestResults(results)
+            def mock_layout_test_results():
+                return layout_test_results
+            build.layout_test_results = mock_layout_test_results
+            return build
+        self.builder._fetch_build = _mock_fetch_build
+
+    def setUp(self):
+        self.buildbot = BuildBot()
+        self.builder = Builder(u"Test Builder \u2661", self.buildbot)
+        self._install_fetch_build(lambda build_number: ["test1", "test2"])
+
+    def test_latest_layout_test_results(self):
+        self.builder.fetch_layout_test_results = lambda results_url: LayoutTestResults([self._mock_test_result(testname) for testname in ["test1", "test2"]])
+        self.builder.accumulated_results_url = lambda: "http://dummy_url.org"
+        self.assertTrue(self.builder.latest_layout_test_results())
+
+    def test_find_regression_window(self):
+        regression_window = self.builder.find_regression_window(self.builder.build(10))
+        self.assertEqual(regression_window.build_before_failure().revision(), 1003)
+        self.assertEqual(regression_window.failing_build().revision(), 1004)
+
+        regression_window = self.builder.find_regression_window(self.builder.build(10), look_back_limit=2)
+        self.assertEqual(regression_window.build_before_failure(), None)
+        self.assertEqual(regression_window.failing_build().revision(), 1008)
+
+    def test_none_build(self):
+        self.builder._fetch_build = lambda build_number: None
+        regression_window = self.builder.find_regression_window(self.builder.build(10))
+        self.assertEqual(regression_window.build_before_failure(), None)
+        self.assertEqual(regression_window.failing_build(), None)
+
+    def test_flaky_tests(self):
+        self._install_fetch_build(lambda build_number: ["test1"] if build_number % 2 else ["test2"])
+        regression_window = self.builder.find_regression_window(self.builder.build(10))
+        self.assertEqual(regression_window.build_before_failure().revision(), 1009)
+        self.assertEqual(regression_window.failing_build().revision(), 1010)
+
+    def test_failure_and_flaky(self):
+        self._install_fetch_build(lambda build_number: ["test1", "test2"] if build_number % 2 else ["test2"])
+        regression_window = self.builder.find_regression_window(self.builder.build(10))
+        self.assertEqual(regression_window.build_before_failure().revision(), 1003)
+        self.assertEqual(regression_window.failing_build().revision(), 1004)
+
+    def test_no_results(self):
+        self._install_fetch_build(lambda build_number: ["test1", "test2"] if build_number % 2 else ["test2"])
+        regression_window = self.builder.find_regression_window(self.builder.build(10))
+        self.assertEqual(regression_window.build_before_failure().revision(), 1003)
+        self.assertEqual(regression_window.failing_build().revision(), 1004)
+
+    def test_failure_after_flaky(self):
+        self._install_fetch_build(lambda build_number: ["test1", "test2"] if build_number > 6 else ["test3"])
+        regression_window = self.builder.find_regression_window(self.builder.build(10))
+        self.assertEqual(regression_window.build_before_failure().revision(), 1006)
+        self.assertEqual(regression_window.failing_build().revision(), 1007)
+
+    def test_find_blameworthy_regression_window(self):
+        self.assertEqual(self.builder.find_blameworthy_regression_window(10).revisions(), [1004])
+        self.assertEqual(self.builder.find_blameworthy_regression_window(10, look_back_limit=2), None)
+        # Flakey test avoidance requires at least 2 red builds:
+        self.assertEqual(self.builder.find_blameworthy_regression_window(4), None)
+        self.assertEqual(self.builder.find_blameworthy_regression_window(4, avoid_flakey_tests=False).revisions(), [1004])
+        # Green builder:
+        self.assertEqual(self.builder.find_blameworthy_regression_window(3), None)
+
+    def test_build_caching(self):
+        self.assertEqual(self.builder.build(10), self.builder.build(10))
+
+    def test_build_and_revision_for_filename(self):
+        expectations = {
+            "r47483 (1)/" : (47483, 1),
+            "r47483 (1).zip" : (47483, 1),
+            "random junk": None,
+        }
+        for filename, revision_and_build in expectations.items():
+            self.assertEqual(self.builder._revision_and_build_for_filename(filename), revision_and_build)
+
+    def test_file_info_list_to_revision_to_build_list(self):
+        file_info_list = [
+            {"filename": "r47483 (1)/"},
+            {"filename": "r47483 (1).zip"},
+            {"filename": "random junk"},
+        ]
+        builds_and_revisions_list = [(47483, 1), (47483, 1)]
+        self.assertEqual(self.builder._file_info_list_to_revision_to_build_list(file_info_list), builds_and_revisions_list)
+
+    def test_fetch_build(self):
+        buildbot = BuildBot()
+        builder = Builder(u"Test Builder \u2661", buildbot)
+
+        def mock_fetch_build_dictionary(self, build_number):
+            build_dictionary = {
+                "sourceStamp": {
+                    "revision": None,  # revision=None means a trunk build started from the force-build button on the builder page.
+                    },
+                "number": int(build_number),
+                # Intentionally missing the 'results' key, meaning it's a "pass" build.
+            }
+            return build_dictionary
+        buildbot._fetch_build_dictionary = mock_fetch_build_dictionary
+        self.assertNotEqual(builder._fetch_build(1), None)
+
+
+class BuildTest(unittest.TestCase):
+    def test_layout_test_results(self):
+        buildbot = BuildBot()
+        builder = Builder(u"Foo Builder (test)", buildbot)
+        builder._fetch_file_from_results = lambda results_url, file_name: None
+        build = Build(builder, None, None, None)
+        # Test that layout_test_results() returns None if the fetch fails.
+        self.assertEqual(build.layout_test_results(), None)
+
+
+class BuildBotTest(unittest.TestCase):
+
+    _example_one_box_status = '''
+    <table>
+    <tr>
+    <td class="box"><a href="builders/Windows%20Debug%20%28Tests%29">Windows Debug (Tests)</a></td>
+      <td align="center" class="LastBuild box success"><a href="builders/Windows%20Debug%20%28Tests%29/builds/3693">47380</a><br />build<br />successful</td>
+      <td align="center" class="Activity building">building<br />ETA in<br />~ 14 mins<br />at 13:40</td>
+    <tr>
+    <td class="box"><a href="builders/SnowLeopard%20Intel%20Release">SnowLeopard Intel Release</a></td>
+      <td class="LastBuild box" >no build</td>
+      <td align="center" class="Activity building">building<br />< 1 min</td>
+    <tr>
+    <td class="box"><a href="builders/Qt%20Linux%20Release">Qt Linux Release</a></td>
+      <td align="center" class="LastBuild box failure"><a href="builders/Qt%20Linux%20Release/builds/654">47383</a><br />failed<br />compile-webkit</td>
+      <td align="center" class="Activity idle">idle<br />3 pending</td>
+    <tr>
+    <td class="box"><a href="builders/Qt%20Windows%2032-bit%20Debug">Qt Windows 32-bit Debug</a></td>
+      <td align="center" class="LastBuild box failure"><a href="builders/Qt%20Windows%2032-bit%20Debug/builds/2090">60563</a><br />failed<br />failed<br />slave<br />lost</td>
+      <td align="center" class="Activity building">building<br />ETA in<br />~ 5 mins<br />at 08:25</td>
+    </table>
+'''
+    _expected_example_one_box_parsings = [
+        {
+            'is_green': True,
+            'build_number' : 3693,
+            'name': u'Windows Debug (Tests)',
+            'built_revision': 47380,
+            'activity': 'building',
+            'pending_builds': 0,
+        },
+        {
+            'is_green': False,
+            'build_number' : None,
+            'name': u'SnowLeopard Intel Release',
+            'built_revision': None,
+            'activity': 'building',
+            'pending_builds': 0,
+        },
+        {
+            'is_green': False,
+            'build_number' : 654,
+            'name': u'Qt Linux Release',
+            'built_revision': 47383,
+            'activity': 'idle',
+            'pending_builds': 3,
+        },
+        {
+            'is_green': True,
+            'build_number' : 2090,
+            'name': u'Qt Windows 32-bit Debug',
+            'built_revision': 60563,
+            'activity': 'building',
+            'pending_builds': 0,
+        },
+    ]
+
+    def test_status_parsing(self):
+        buildbot = BuildBot()
+
+        soup = BeautifulSoup(self._example_one_box_status)
+        status_table = soup.find("table")
+        input_rows = status_table.findAll('tr')
+
+        for x in range(len(input_rows)):
+            status_row = input_rows[x]
+            expected_parsing = self._expected_example_one_box_parsings[x]
+
+            builder = buildbot._parse_builder_status_from_row(status_row)
+
+            # Make sure we aren't parsing more or less than we expect
+            self.assertEquals(builder.keys(), expected_parsing.keys())
+
+            for key, expected_value in expected_parsing.items():
+                self.assertEquals(builder[key], expected_value, ("Builder %d parse failure for key: %s: Actual='%s' Expected='%s'" % (x, key, builder[key], expected_value)))
+
+    def test_builder_with_name(self):
+        buildbot = BuildBot()
+
+        builder = buildbot.builder_with_name("Test Builder")
+        self.assertEqual(builder.name(), "Test Builder")
+        self.assertEqual(builder.url(), "http://build.webkit.org/builders/Test%20Builder")
+        self.assertEqual(builder.url_encoded_name(), "Test%20Builder")
+        self.assertEqual(builder.results_url(), "http://build.webkit.org/results/Test%20Builder")
+
+        # Override _fetch_build_dictionary function to not touch the network.
+        def mock_fetch_build_dictionary(self, build_number):
+            build_dictionary = {
+                "sourceStamp": {
+                    "revision" : 2 * build_number,
+                    },
+                "number" : int(build_number),
+                "results" : build_number % 2, # 0 means pass
+            }
+            return build_dictionary
+        buildbot._fetch_build_dictionary = mock_fetch_build_dictionary
+
+        build = builder.build(10)
+        self.assertEqual(build.builder(), builder)
+        self.assertEqual(build.url(), "http://build.webkit.org/builders/Test%20Builder/builds/10")
+        self.assertEqual(build.results_url(), "http://build.webkit.org/results/Test%20Builder/r20%20%2810%29")
+        self.assertEqual(build.revision(), 20)
+        self.assertEqual(build.is_green(), True)
+
+        build = build.previous_build()
+        self.assertEqual(build.builder(), builder)
+        self.assertEqual(build.url(), "http://build.webkit.org/builders/Test%20Builder/builds/9")
+        self.assertEqual(build.results_url(), "http://build.webkit.org/results/Test%20Builder/r18%20%289%29")
+        self.assertEqual(build.revision(), 18)
+        self.assertEqual(build.is_green(), False)
+
+        self.assertEqual(builder.build(None), None)
+
+    _example_directory_listing = '''
+<h1>Directory listing for /results/SnowLeopard Intel Leaks/</h1>
+
+<table>
+        <tr class="alt">
+            <th>Filename</th>
+            <th>Size</th>
+            <th>Content type</th>
+            <th>Content encoding</th>
+        </tr>
+<tr class="directory ">
+    <td><a href="r47483%20%281%29/"><b>r47483 (1)/</b></a></td>
+    <td><b></b></td>
+    <td><b>[Directory]</b></td>
+    <td><b></b></td>
+</tr>
+<tr class="file alt">
+    <td><a href="r47484%20%282%29.zip">r47484 (2).zip</a></td>
+    <td>89K</td>
+    <td>[application/zip]</td>
+    <td></td>
+</tr>
+'''
+    _expected_files = [
+        {
+            "filename" : "r47483 (1)/",
+            "size" : "",
+            "type" : "[Directory]",
+            "encoding" : "",
+        },
+        {
+            "filename" : "r47484 (2).zip",
+            "size" : "89K",
+            "type" : "[application/zip]",
+            "encoding" : "",
+        },
+    ]
+
+    def test_parse_build_to_revision_map(self):
+        buildbot = BuildBot()
+        files = buildbot._parse_twisted_directory_listing(self._example_directory_listing)
+        self.assertEqual(self._expected_files, files)
+
+    _fake_builder_page = '''
+    <body>
+    <div class="content">
+    <h1>Some Builder</h1>
+    <p>(<a href="../waterfall?show=Some Builder">view in waterfall</a>)</p>
+    <div class="column">
+    <h2>Recent Builds:</h2>
+    <table class="info">
+      <tr>
+        <th>Time</th>
+        <th>Revision</th>
+        <th>Result</th>    <th>Build #</th>
+        <th>Info</th>
+      </tr>
+      <tr class="alt">
+        <td>Jan 10 15:49</td>
+        <td><span class="revision" title="Revision 104643"><a href="http://trac.webkit.org/changeset/104643">104643</a></span></td>
+        <td class="success">failure</td>    <td><a href=".../37604">#37604</a></td>
+        <td class="left">Build successful</td>
+      </tr>
+      <tr class="">
+        <td>Jan 10 15:32</td>
+        <td><span class="revision" title="Revision 104636"><a href="http://trac.webkit.org/changeset/104636">104636</a></span></td>
+        <td class="success">failure</td>    <td><a href=".../37603">#37603</a></td>
+        <td class="left">Build successful</td>
+      </tr>
+      <tr class="alt">
+        <td>Jan 10 15:18</td>
+        <td><span class="revision" title="Revision 104635"><a href="http://trac.webkit.org/changeset/104635">104635</a></span></td>
+        <td class="success">success</td>    <td><a href=".../37602">#37602</a></td>
+        <td class="left">Build successful</td>
+      </tr>
+      <tr class="">
+        <td>Jan 10 14:51</td>
+        <td><span class="revision" title="Revision 104633"><a href="http://trac.webkit.org/changeset/104633">104633</a></span></td>
+        <td class="failure">failure</td>    <td><a href=".../37601">#37601</a></td>
+        <td class="left">Failed compile-webkit</td>
+      </tr>
+    </table>
+    </body>'''
+    _fake_builder_page_without_success = '''
+    <body>
+    <table>
+      <tr class="alt">
+        <td>Jan 10 15:49</td>
+        <td><span class="revision" title="Revision 104643"><a href="http://trac.webkit.org/changeset/104643">104643</a></span></td>
+        <td class="success">failure</td>
+      </tr>
+      <tr class="">
+        <td>Jan 10 15:32</td>
+        <td><span class="revision" title="Revision 104636"><a href="http://trac.webkit.org/changeset/104636">104636</a></span></td>
+        <td class="success">failure</td>
+      </tr>
+      <tr class="alt">
+        <td>Jan 10 15:18</td>
+        <td><span class="revision" title="Revision 104635"><a href="http://trac.webkit.org/changeset/104635">104635</a></span></td>
+        <td class="success">failure</td>
+      </tr>
+      <tr class="">
+          <td>Jan 10 11:58</td>
+          <td><span class="revision" title="Revision ??"><a href="http://trac.webkit.org/changeset/%3F%3F">??</a></span></td>
+          <td class="retry">retry</td>
+        </tr>
+      <tr class="">
+        <td>Jan 10 14:51</td>
+        <td><span class="revision" title="Revision 104633"><a href="http://trac.webkit.org/changeset/104633">104633</a></span></td>
+        <td class="failure">failure</td>
+      </tr>
+    </table>
+    </body>'''
+
+    def test_revisions_for_builder(self):
+        buildbot = BuildBot()
+        buildbot._fetch_builder_page = lambda builder: builder.page
+        builder_with_success = Builder('Some builder', None)
+        builder_with_success.page = self._fake_builder_page
+        self.assertEqual(buildbot._revisions_for_builder(builder_with_success), [(104643, False), (104636, False), (104635, True), (104633, False)])
+
+        builder_without_success = Builder('Some builder', None)
+        builder_without_success.page = self._fake_builder_page_without_success
+        self.assertEqual(buildbot._revisions_for_builder(builder_without_success), [(104643, False), (104636, False), (104635, False), (104633, False)])
+
+    def test_find_green_revision(self):
+        buildbot = BuildBot()
+        self.assertEqual(buildbot._find_green_revision({
+            'Builder 1': [(1, True), (3, True)],
+            'Builder 2': [(1, True), (3, False)],
+            'Builder 3': [(1, True), (3, True)],
+        }), 1)
+        self.assertEqual(buildbot._find_green_revision({
+            'Builder 1': [(1, False), (3, True)],
+            'Builder 2': [(1, True), (3, True)],
+            'Builder 3': [(1, True), (3, True)],
+        }), 3)
+        self.assertEqual(buildbot._find_green_revision({
+            'Builder 1': [(1, True), (2, True)],
+            'Builder 2': [(1, False), (2, True), (3, True)],
+            'Builder 3': [(1, True), (3, True)],
+        }), None)
+        self.assertEqual(buildbot._find_green_revision({
+            'Builder 1': [(1, True), (2, True)],
+            'Builder 2': [(1, True), (2, True), (3, True)],
+            'Builder 3': [(1, True), (3, True)],
+        }), 2)
+        self.assertEqual(buildbot._find_green_revision({
+            'Builder 1': [(1, False), (2, True)],
+            'Builder 2': [(1, True), (3, True)],
+            'Builder 3': [(1, True), (3, True)],
+        }), None)
+        self.assertEqual(buildbot._find_green_revision({
+            'Builder 1': [(1, True), (3, True)],
+            'Builder 2': [(1, False), (2, True), (3, True), (4, True)],
+            'Builder 3': [(2, True), (4, True)],
+        }), 3)
+        self.assertEqual(buildbot._find_green_revision({
+            'Builder 1': [(1, True), (3, True)],
+            'Builder 2': [(1, False), (2, True), (3, True), (4, False)],
+            'Builder 3': [(2, True), (4, True)],
+        }), None)
+        self.assertEqual(buildbot._find_green_revision({
+            'Builder 1': [(1, True), (3, True)],
+            'Builder 2': [(1, False), (2, True), (3, True), (4, False)],
+            'Builder 3': [(2, True), (3, True), (4, True)],
+        }), 3)
+        self.assertEqual(buildbot._find_green_revision({
+            'Builder 1': [(1, True), (2, True)],
+            'Builder 2': [],
+            'Builder 3': [(1, True), (2, True)],
+        }), None)
+        self.assertEqual(buildbot._find_green_revision({
+            'Builder 1': [(1, True), (3, False), (5, True), (10, True), (12, False)],
+            'Builder 2': [(1, True), (3, False), (7, True), (9, True), (12, False)],
+            'Builder 3': [(1, True), (3, True), (7, True), (11, False), (12, True)],
+        }), 7)
+
+    def _fetch_build(self, build_number):
+        if build_number == 5:
+            return "correct build"
+        return "wrong build"
+
+    def _fetch_revision_to_build_map(self):
+        return {'r5': 5, 'r2': 2, 'r3': 3}
+
+    def test_latest_cached_build(self):
+        b = Builder('builder', BuildBot())
+        b._fetch_build = self._fetch_build
+        b._fetch_revision_to_build_map = self._fetch_revision_to_build_map
+        self.assertEquals("correct build", b.latest_cached_build())
+
+    def results_url(self):
+        return "some-url"
+
+    def test_results_zip_url(self):
+        b = Build(None, 123, 123, False)
+        b.results_url = self.results_url
+        self.assertEquals("some-url.zip", b.results_zip_url())
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/Tools/Scripts/webkitpy/common/net/buildbot/chromiumbuildbot.py b/Tools/Scripts/webkitpy/common/net/buildbot/chromiumbuildbot.py
new file mode 100644
index 0000000..5030bba
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/net/buildbot/chromiumbuildbot.py
@@ -0,0 +1,47 @@
+# Copyright (c) 2011, Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import webkitpy.common.config.urls as config_urls
+from webkitpy.common.net.buildbot.buildbot import Builder, BuildBot
+# FIXME: builders should probably be in webkitpy.common.config.
+from webkitpy.layout_tests.port.builders import builder_path_from_name
+
+
+class ChromiumBuilder(Builder):
+    # The build.chromium.org builders store their results in a different
+    # location than the build.webkit.org builders.
+    def results_url(self):
+        return "http://build.chromium.org/f/chromium/layout_test_results/%s" % builder_path_from_name(self._name)
+
+    def accumulated_results_url(self):
+        return self.results_url() + "/results/layout-test-results"
+
+
+class ChromiumBuildBot(BuildBot):
+    _builder_factory = ChromiumBuilder
+    _default_url = config_urls.chromium_buildbot_url
diff --git a/Tools/Scripts/webkitpy/common/net/credentials.py b/Tools/Scripts/webkitpy/common/net/credentials.py
new file mode 100644
index 0000000..21aeaea
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/net/credentials.py
@@ -0,0 +1,161 @@
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Copyright (c) 2009 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# Python module for reading stored web credentials from the OS.
+
+import os
+import platform
+import re
+
+from webkitpy.common.checkout.scm import Git
+from webkitpy.common.system.executive import Executive, ScriptError
+from webkitpy.common.system.user import User
+from webkitpy.common.system.deprecated_logging import log
+
+try:
+    # Use keyring, a cross platform keyring interface, as a fallback:
+    # http://pypi.python.org/pypi/keyring
+    import keyring
+except ImportError:
+    keyring = None
+
+
+class Credentials(object):
+    _environ_prefix = "webkit_bugzilla_"
+
+    def __init__(self, host, git_prefix=None, executive=None, cwd=os.getcwd(),
+                 keyring=keyring):
+        self.host = host
+        self.git_prefix = "%s." % git_prefix if git_prefix else ""
+        self.executive = executive or Executive()
+        self.cwd = cwd
+        self._keyring = keyring
+
+    def _credentials_from_git(self):
+        try:
+            if not Git.in_working_directory(self.cwd):
+                return (None, None)
+            return (Git.read_git_config(self.git_prefix + "username"),
+                    Git.read_git_config(self.git_prefix + "password"))
+        except OSError, e:
+            # Catch and ignore OSError exceptions such as "no such file
+            # or directory" (OSError errno 2), which imply that the Git
+            # command cannot be found/is not installed.
+            pass
+        return (None, None)
+
+    def _keychain_value_with_label(self, label, source_text):
+        match = re.search("%s\"(?P<value>.+)\"" % label,
+                                                  source_text,
+                                                  re.MULTILINE)
+        if match:
+            return match.group('value')
+
+    def _is_mac_os_x(self):
+        return platform.mac_ver()[0]
+
+    def _parse_security_tool_output(self, security_output):
+        username = self._keychain_value_with_label("^\s*\"acct\"<blob>=",
+                                                   security_output)
+        password = self._keychain_value_with_label("^password: ",
+                                                   security_output)
+        return [username, password]
+
+    def _run_security_tool(self, username=None):
+        security_command = [
+            "/usr/bin/security",
+            "find-internet-password",
+            "-g",
+            "-s",
+            self.host,
+        ]
+        if username:
+            security_command += ["-a", username]
+
+        log("Reading Keychain for %s account and password.  "
+            "Click \"Allow\" to continue..." % self.host)
+        try:
+            return self.executive.run_command(security_command)
+        except ScriptError:
+            # Failed to either find a keychain entry or somekind of OS-related
+            # error occured (for instance, couldn't find the /usr/sbin/security
+            # command).
+            log("Could not find a keychain entry for %s." % self.host)
+            return None
+
+    def _credentials_from_keychain(self, username=None):
+        if not self._is_mac_os_x():
+            return [username, None]
+
+        security_output = self._run_security_tool(username)
+        if security_output:
+            return self._parse_security_tool_output(security_output)
+        else:
+            return [None, None]
+
+    def _read_environ(self, key):
+        environ_key = self._environ_prefix + key
+        return os.environ.get(environ_key.upper())
+
+    def _credentials_from_environment(self):
+        return (self._read_environ("username"), self._read_environ("password"))
+
+    def _offer_to_store_credentials_in_keyring(self, username, password):
+        if not self._keyring:
+            return
+        if not User().confirm("Store password in system keyring?", User.DEFAULT_NO):
+            return
+        try:
+            self._keyring.set_password(self.host, username, password)
+        except:
+            pass
+
+    def read_credentials(self, user=User):
+        username, password = self._credentials_from_environment()
+        # FIXME: We don't currently support pulling the username from one
+        # source and the password from a separate source.
+        if not username or not password:
+            username, password = self._credentials_from_git()
+        if not username or not password:
+            username, password = self._credentials_from_keychain(username)
+
+        if not username:
+            username = user.prompt("%s login: " % self.host)
+
+        if username and not password and self._keyring:
+            try:
+                password = self._keyring.get_password(self.host, username)
+            except:
+                pass
+
+        if not password:
+            password = user.prompt_password("%s password for %s: " % (self.host, username))
+            self._offer_to_store_credentials_in_keyring(username, password)
+
+        return (username, password)
diff --git a/Tools/Scripts/webkitpy/common/net/credentials_unittest.py b/Tools/Scripts/webkitpy/common/net/credentials_unittest.py
new file mode 100644
index 0000000..a797e3d
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/net/credentials_unittest.py
@@ -0,0 +1,212 @@
+# Copyright (C) 2009 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#    * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import os
+import tempfile
+import unittest
+from webkitpy.common.net.credentials import Credentials
+from webkitpy.common.system.executive import Executive
+from webkitpy.common.system.outputcapture import OutputCapture
+from webkitpy.common.system.user_mock import MockUser
+from webkitpy.thirdparty.mock import Mock
+from webkitpy.tool.mocktool import MockOptions
+from webkitpy.common.system.executive_mock import MockExecutive
+
+
+# FIXME: Other unit tests probably want this class.
+class _TemporaryDirectory(object):
+    def __init__(self, **kwargs):
+        self._kwargs = kwargs
+        self._directory_path = None
+
+    def __enter__(self):
+        self._directory_path = tempfile.mkdtemp(**self._kwargs)
+        return self._directory_path
+
+    def __exit__(self, type, value, traceback):
+        os.rmdir(self._directory_path)
+
+
+# Note: All tests should use this class instead of Credentials directly to avoid using a real Executive.
+class MockedCredentials(Credentials):
+    def __init__(self, *args, **kwargs):
+        if 'executive' not in kwargs:
+            kwargs['executive'] = MockExecutive()
+        Credentials.__init__(self, *args, **kwargs)
+
+
+class CredentialsTest(unittest.TestCase):
+    example_security_output = """keychain: "/Users/test/Library/Keychains/login.keychain"
+class: "inet"
+attributes:
+    0x00000007 <blob>="bugs.webkit.org (test@webkit.org)"
+    0x00000008 <blob>=<NULL>
+    "acct"<blob>="test@webkit.org"
+    "atyp"<blob>="form"
+    "cdat"<timedate>=0x32303039303832353233353231365A00  "20090825235216Z\000"
+    "crtr"<uint32>=<NULL>
+    "cusi"<sint32>=<NULL>
+    "desc"<blob>="Web form password"
+    "icmt"<blob>="default"
+    "invi"<sint32>=<NULL>
+    "mdat"<timedate>=0x32303039303930393137323635315A00  "20090909172651Z\000"
+    "nega"<sint32>=<NULL>
+    "path"<blob>=<NULL>
+    "port"<uint32>=0x00000000 
+    "prot"<blob>=<NULL>
+    "ptcl"<uint32>="htps"
+    "scrp"<sint32>=<NULL>
+    "sdmn"<blob>=<NULL>
+    "srvr"<blob>="bugs.webkit.org"
+    "type"<uint32>=<NULL>
+password: "SECRETSAUCE"
+"""
+
+    def test_keychain_lookup_on_non_mac(self):
+        class FakeCredentials(MockedCredentials):
+            def _is_mac_os_x(self):
+                return False
+        credentials = FakeCredentials("bugs.webkit.org")
+        self.assertEqual(credentials._is_mac_os_x(), False)
+        self.assertEqual(credentials._credentials_from_keychain("foo"), ["foo", None])
+
+    def test_security_output_parse(self):
+        credentials = MockedCredentials("bugs.webkit.org")
+        self.assertEqual(credentials._parse_security_tool_output(self.example_security_output), ["test@webkit.org", "SECRETSAUCE"])
+
+    def test_security_output_parse_entry_not_found(self):
+        # FIXME: This test won't work if the user has a credential for foo.example.com!
+        credentials = Credentials("foo.example.com")
+        if not credentials._is_mac_os_x():
+            return # This test does not run on a non-Mac.
+
+        # Note, we ignore the captured output because it is already covered
+        # by the test case CredentialsTest._assert_security_call (below).
+        outputCapture = OutputCapture()
+        outputCapture.capture_output()
+        self.assertEqual(credentials._run_security_tool(), None)
+        outputCapture.restore_output()
+
+    def _assert_security_call(self, username=None):
+        executive_mock = Mock()
+        credentials = MockedCredentials("example.com", executive=executive_mock)
+
+        expected_stderr = "Reading Keychain for example.com account and password.  Click \"Allow\" to continue...\n"
+        OutputCapture().assert_outputs(self, credentials._run_security_tool, [username], expected_stderr=expected_stderr)
+
+        security_args = ["/usr/bin/security", "find-internet-password", "-g", "-s", "example.com"]
+        if username:
+            security_args += ["-a", username]
+        executive_mock.run_command.assert_called_with(security_args)
+
+    def test_security_calls(self):
+        self._assert_security_call()
+        self._assert_security_call(username="foo")
+
+    def test_credentials_from_environment(self):
+        credentials = MockedCredentials("example.com")
+
+        saved_environ = os.environ.copy()
+        os.environ['WEBKIT_BUGZILLA_USERNAME'] = "foo"
+        os.environ['WEBKIT_BUGZILLA_PASSWORD'] = "bar"
+        username, password = credentials._credentials_from_environment()
+        self.assertEquals(username, "foo")
+        self.assertEquals(password, "bar")
+        os.environ = saved_environ
+
+    def test_read_credentials_without_git_repo(self):
+        # FIXME: This should share more code with test_keyring_without_git_repo
+        class FakeCredentials(MockedCredentials):
+            def _is_mac_os_x(self):
+                return True
+
+            def _credentials_from_keychain(self, username):
+                return ("test@webkit.org", "SECRETSAUCE")
+
+            def _credentials_from_environment(self):
+                return (None, None)
+
+        with _TemporaryDirectory(suffix="not_a_git_repo") as temp_dir_path:
+            credentials = FakeCredentials("bugs.webkit.org", cwd=temp_dir_path)
+            # FIXME: Using read_credentials here seems too broad as higher-priority
+            # credential source could be affected by the user's environment.
+            self.assertEqual(credentials.read_credentials(), ("test@webkit.org", "SECRETSAUCE"))
+
+
+    def test_keyring_without_git_repo(self):
+        # FIXME: This should share more code with test_read_credentials_without_git_repo
+        class MockKeyring(object):
+            def get_password(self, host, username):
+                return "NOMNOMNOM"
+
+        class FakeCredentials(MockedCredentials):
+            def _is_mac_os_x(self):
+                return True
+
+            def _credentials_from_keychain(self, username):
+                return ("test@webkit.org", None)
+
+            def _credentials_from_environment(self):
+                return (None, None)
+
+        with _TemporaryDirectory(suffix="not_a_git_repo") as temp_dir_path:
+            credentials = FakeCredentials("fake.hostname", cwd=temp_dir_path, keyring=MockKeyring())
+            # FIXME: Using read_credentials here seems too broad as higher-priority
+            # credential source could be affected by the user's environment.
+            self.assertEqual(credentials.read_credentials(), ("test@webkit.org", "NOMNOMNOM"))
+
+    def test_keyring_without_git_repo_nor_keychain(self):
+        class MockKeyring(object):
+            def get_password(self, host, username):
+                return "NOMNOMNOM"
+
+        class FakeCredentials(MockedCredentials):
+            def _credentials_from_keychain(self, username):
+                return (None, None)
+
+            def _credentials_from_environment(self):
+                return (None, None)
+
+        class FakeUser(MockUser):
+            @classmethod
+            def prompt(cls, message, repeat=1, raw_input=raw_input):
+                return "test@webkit.org"
+
+            @classmethod
+            def prompt_password(cls, message, repeat=1, raw_input=raw_input):
+                raise AssertionError("should not prompt for password")
+
+        with _TemporaryDirectory(suffix="not_a_git_repo") as temp_dir_path:
+            credentials = FakeCredentials("fake.hostname", cwd=temp_dir_path, keyring=MockKeyring())
+            # FIXME: Using read_credentials here seems too broad as higher-priority
+            # credential source could be affected by the user's environment.
+            self.assertEqual(credentials.read_credentials(FakeUser), ("test@webkit.org", "NOMNOMNOM"))
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/Tools/Scripts/webkitpy/common/net/failuremap.py b/Tools/Scripts/webkitpy/common/net/failuremap.py
new file mode 100644
index 0000000..746242e
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/net/failuremap.py
@@ -0,0 +1,88 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+# FIXME: This probably belongs in the buildbot module.
+class FailureMap(object):
+    def __init__(self):
+        self._failures = []
+
+    def add_regression_window(self, builder, regression_window):
+        self._failures.append({
+            'builder': builder,
+            'regression_window': regression_window,
+        })
+
+    def is_empty(self):
+        return not self._failures
+
+    def failing_revisions(self):
+        failing_revisions = [failure_info['regression_window'].revisions()
+                             for failure_info in self._failures]
+        return sorted(set(sum(failing_revisions, [])))
+
+    def builders_failing_for(self, revision):
+        return self._builders_failing_because_of([revision])
+
+    def tests_failing_for(self, revision):
+        tests = [failure_info['regression_window'].failing_tests()
+                 for failure_info in self._failures
+                 if revision in failure_info['regression_window'].revisions()
+                    and failure_info['regression_window'].failing_tests()]
+        result = set()
+        for test in tests:
+            result = result.union(test)
+        return sorted(result)
+
+    def failing_tests(self):
+        return set(sum([self.tests_failing_for(revision) for revision in self.failing_revisions()], []))
+
+    def _old_failures(self, is_old_failure):
+        return filter(lambda revision: is_old_failure(revision),
+                      self.failing_revisions())
+
+    def _builders_failing_because_of(self, revisions):
+        revision_set = set(revisions)
+        return [failure_info['builder'] for failure_info in self._failures
+                if revision_set.intersection(
+                    failure_info['regression_window'].revisions())]
+
+    # FIXME: We should re-process old failures after some time delay.
+    # https://bugs.webkit.org/show_bug.cgi?id=36581
+    def filter_out_old_failures(self, is_old_failure):
+        old_failures = self._old_failures(is_old_failure)
+        old_failing_builder_names = set([builder.name()
+            for builder in self._builders_failing_because_of(old_failures)])
+
+        # We filter out all the failing builders that could have been caused
+        # by old_failures.  We could miss some new failures this way, but
+        # emperically, this reduces the amount of spam we generate.
+        failures = self._failures
+        self._failures = [failure_info for failure_info in failures
+            if failure_info['builder'].name() not in old_failing_builder_names]
+        self._cache = {}
diff --git a/Tools/Scripts/webkitpy/common/net/failuremap_unittest.py b/Tools/Scripts/webkitpy/common/net/failuremap_unittest.py
new file mode 100644
index 0000000..9a66d9e
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/net/failuremap_unittest.py
@@ -0,0 +1,80 @@
+# Copyright (c) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from webkitpy.common.net.buildbot import Build
+from webkitpy.common.net.failuremap import *
+from webkitpy.common.net.regressionwindow import RegressionWindow
+from webkitpy.common.net.buildbot.buildbot_mock import MockBuilder
+
+
+class FailureMapTest(unittest.TestCase):
+    builder1 = MockBuilder("Builder1")
+    builder2 = MockBuilder("Builder2")
+
+    build1a = Build(builder1, build_number=22, revision=1233, is_green=True)
+    build1b = Build(builder1, build_number=23, revision=1234, is_green=False)
+    build2a = Build(builder2, build_number=89, revision=1233, is_green=True)
+    build2b = Build(builder2, build_number=90, revision=1235, is_green=False)
+
+    regression_window1 = RegressionWindow(build1a, build1b, failing_tests=[u'test1', u'test1'])
+    regression_window2 = RegressionWindow(build2a, build2b, failing_tests=[u'test1'])
+
+    def _make_failure_map(self):
+        failure_map = FailureMap()
+        failure_map.add_regression_window(self.builder1, self.regression_window1)
+        failure_map.add_regression_window(self.builder2, self.regression_window2)
+        return failure_map
+
+    def test_failing_revisions(self):
+        failure_map = self._make_failure_map()
+        self.assertEquals(failure_map.failing_revisions(), [1234, 1235])
+
+    def test_new_failures(self):
+        failure_map = self._make_failure_map()
+        failure_map.filter_out_old_failures(lambda revision: False)
+        self.assertEquals(failure_map.failing_revisions(), [1234, 1235])
+
+    def test_new_failures_with_old_revisions(self):
+        failure_map = self._make_failure_map()
+        failure_map.filter_out_old_failures(lambda revision: revision == 1234)
+        self.assertEquals(failure_map.failing_revisions(), [])
+
+    def test_new_failures_with_more_old_revisions(self):
+        failure_map = self._make_failure_map()
+        failure_map.filter_out_old_failures(lambda revision: revision == 1235)
+        self.assertEquals(failure_map.failing_revisions(), [1234])
+
+    def test_tests_failing_for(self):
+        failure_map = self._make_failure_map()
+        self.assertEquals(failure_map.tests_failing_for(1234), [u'test1'])
+
+    def test_failing_tests(self):
+        failure_map = self._make_failure_map()
+        self.assertEquals(failure_map.failing_tests(), set([u'test1']))
diff --git a/Tools/Scripts/webkitpy/common/net/file_uploader.py b/Tools/Scripts/webkitpy/common/net/file_uploader.py
new file mode 100644
index 0000000..9b220b0
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/net/file_uploader.py
@@ -0,0 +1,108 @@
+#!/usr/bin/env python
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import mimetypes
+import time
+import urllib2
+
+from webkitpy.common.net.networktransaction import NetworkTransaction, NetworkTimeout
+
+
+def get_mime_type(filename):
+    return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
+
+
+# FIXME: Rather than taking tuples, this function should take more structured data.
+def _encode_multipart_form_data(fields, files):
+    """Encode form fields for multipart/form-data.
+
+    Args:
+      fields: A sequence of (name, value) elements for regular form fields.
+      files: A sequence of (name, filename, value) elements for data to be
+             uploaded as files.
+    Returns:
+      (content_type, body) ready for httplib.HTTP instance.
+
+    Source:
+      http://code.google.com/p/rietveld/source/browse/trunk/upload.py
+    """
+    BOUNDARY = '-M-A-G-I-C---B-O-U-N-D-A-R-Y-'
+    CRLF = '\r\n'
+    lines = []
+
+    for key, value in fields:
+        lines.append('--' + BOUNDARY)
+        lines.append('Content-Disposition: form-data; name="%s"' % key)
+        lines.append('')
+        if isinstance(value, unicode):
+            value = value.encode('utf-8')
+        lines.append(value)
+
+    for key, filename, value in files:
+        lines.append('--' + BOUNDARY)
+        lines.append('Content-Disposition: form-data; name="%s"; filename="%s"' % (key, filename))
+        lines.append('Content-Type: %s' % get_mime_type(filename))
+        lines.append('')
+        if isinstance(value, unicode):
+            value = value.encode('utf-8')
+        lines.append(value)
+
+    lines.append('--' + BOUNDARY + '--')
+    lines.append('')
+    body = CRLF.join(lines)
+    content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
+    return content_type, body
+
+
+class FileUploader(object):
+    def __init__(self, url, timeout_seconds):
+        self._url = url
+        self._timeout_seconds = timeout_seconds
+
+    def upload_single_text_file(self, filesystem, content_type, filename):
+        return self._upload_data(content_type, filesystem.read_text_file(filename))
+
+    def upload_as_multipart_form_data(self, filesystem, files, attrs):
+        file_objs = []
+        for filename, path in files:
+            file_objs.append(('file', filename, filesystem.read_binary_file(path)))
+
+        # FIXME: We should use the same variable names for the formal and actual parameters.
+        content_type, data = _encode_multipart_form_data(attrs, file_objs)
+        return self._upload_data(content_type, data)
+
+    def _upload_data(self, content_type, data):
+        def callback():
+            # FIXME: Setting a timeout, either globally using socket.setdefaulttimeout()
+            # or in urlopen(), doesn't appear to work on Mac 10.5 with Python 2.7.
+            # For now we will ignore the timeout value and hope for the best.
+            request = urllib2.Request(self._url, data, {"Content-Type": content_type})
+            return urllib2.urlopen(request)
+
+        return NetworkTransaction(timeout_seconds=self._timeout_seconds).run(callback)
diff --git a/Tools/Scripts/webkitpy/common/net/htdigestparser.py b/Tools/Scripts/webkitpy/common/net/htdigestparser.py
new file mode 100644
index 0000000..ee7d540
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/net/htdigestparser.py
@@ -0,0 +1,54 @@
+# Copyright (C) 2011 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1.  Redistributions of source code must retain the above copyright
+#     notice, this list of conditions and the following disclaimer.
+# 2.  Redistributions in binary form must reproduce the above copyright
+#     notice, this list of conditions and the following disclaimer in the
+#     documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
+# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""htdigestparser - a parser for htdigest files"""
+
+import hashlib
+import string
+
+
+class HTDigestParser(object):
+    def __init__(self, digest_file):
+        self._entries = self.parse_file(digest_file)
+
+    def authenticate(self, username, realm, password):
+        hashed_password = hashlib.md5(':'.join((username, realm, password))).hexdigest()
+        return [username, realm, hashed_password] in self.entries()
+
+    def entries(self):
+        return self._entries
+
+    def parse_file(self, digest_file):
+        entries = [line.rstrip().split(':') for line in digest_file]
+
+        # Perform some sanity-checking to ensure the file is valid.
+        valid_characters = set(string.hexdigits)
+        for entry in entries:
+            if len(entry) != 3:
+                return []
+            hashed_password = entry[-1]
+            if len(hashed_password) != 32:
+                return []
+            if not set(hashed_password).issubset(valid_characters):
+                return []
+
+        return entries
diff --git a/Tools/Scripts/webkitpy/common/net/htdigestparser_unittest.py b/Tools/Scripts/webkitpy/common/net/htdigestparser_unittest.py
new file mode 100644
index 0000000..a2a4ac9
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/net/htdigestparser_unittest.py
@@ -0,0 +1,82 @@
+# Copyright (C) 2011 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1.  Redistributions of source code must retain the above copyright
+#     notice, this list of conditions and the following disclaimer.
+# 2.  Redistributions in binary form must reproduce the above copyright
+#     notice, this list of conditions and the following disclaimer in the
+#     documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
+# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import StringIO
+import os
+import unittest
+
+from webkitpy.common.net.htdigestparser import HTDigestParser
+
+
+class HTDigestParserTest(unittest.TestCase):
+    def assertEntriesEqual(self, entries, additional_content=None):
+        digest_file = self.fake_htdigest_file()
+        if additional_content is not None:
+            digest_file.seek(pos=0, mode=os.SEEK_END)
+            digest_file.write(additional_content)
+            digest_file.seek(pos=0, mode=os.SEEK_SET)
+        self.assertEqual(entries, HTDigestParser(digest_file).entries())
+
+    def test_authenticate(self):
+        htdigest = HTDigestParser(self.fake_htdigest_file())
+        self.assertTrue(htdigest.authenticate('user1', 'realm 1', 'password1'))
+        self.assertTrue(htdigest.authenticate('user2', 'realm 2', 'password2'))
+        self.assertTrue(htdigest.authenticate('user3', 'realm 1', 'password3'))
+        self.assertTrue(htdigest.authenticate('user3', 'realm 3', 'password3'))
+
+        self.assertFalse(htdigest.authenticate('user1', 'realm', 'password1'))
+        self.assertFalse(htdigest.authenticate('user1', 'realm 2', 'password1'))
+        self.assertFalse(htdigest.authenticate('user2', 'realm 2', 'password1'))
+        self.assertFalse(htdigest.authenticate('user2', 'realm 1', 'password1'))
+        self.assertFalse(htdigest.authenticate('', '', ''))
+
+    def test_entries(self):
+        entries = [
+            ['user1', 'realm 1', '36b8aa27fa5e9051095d37b619f92762'],
+            ['user2', 'realm 2', '14f827686fa97778f02fe1314a3337c8'],
+            ['user3', 'realm 1', '1817fc8a24119cc57fbafc8a630ea5a5'],
+            ['user3', 'realm 3', 'a05f5a2335e9d87bbe75bbe5e53248f0'],
+        ]
+        self.assertEntriesEqual(entries)
+        self.assertEntriesEqual(entries, additional_content='')
+
+    def test_empty_file(self):
+        self.assertEqual([], HTDigestParser(StringIO.StringIO()).entries())
+
+    def test_too_few_colons(self):
+        self.assertEntriesEqual([], additional_content='user1:realm 1\n')
+
+    def test_too_many_colons(self):
+        self.assertEntriesEqual([], additional_content='user1:realm 1:36b8aa27fa5e9051095d37b619f92762:garbage\n')
+
+    def test_invalid_hash(self):
+        self.assertEntriesEqual([], additional_content='user1:realm 1:36b8aa27fa5e9051095d37b619f92762000000\n')
+        self.assertEntriesEqual([], additional_content='user1:realm 1:36b8aa27fa5e9051095d37b619f9276\n')
+        self.assertEntriesEqual([], additional_content='user1:realm 1:36b8aa27fa5e9051095d37b619f9276z\n')
+        self.assertEntriesEqual([], additional_content='user1:realm 1: 36b8aa27fa5e9051095d37b619f92762\n')
+
+    def fake_htdigest_file(self):
+        return StringIO.StringIO("""user1:realm 1:36b8aa27fa5e9051095d37b619f92762
+user2:realm 2:14f827686fa97778f02fe1314a3337c8
+user3:realm 1:1817fc8a24119cc57fbafc8a630ea5a5
+user3:realm 3:a05f5a2335e9d87bbe75bbe5e53248f0
+""")
diff --git a/Tools/Scripts/webkitpy/common/net/irc/__init__.py b/Tools/Scripts/webkitpy/common/net/irc/__init__.py
new file mode 100644
index 0000000..ef65bee
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/net/irc/__init__.py
@@ -0,0 +1 @@
+# Required for Python to search this directory for module files
diff --git a/Tools/Scripts/webkitpy/common/net/irc/irc_mock.py b/Tools/Scripts/webkitpy/common/net/irc/irc_mock.py
new file mode 100644
index 0000000..734be06
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/net/irc/irc_mock.py
@@ -0,0 +1,37 @@
+# Copyright (c) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from webkitpy.common.system.deprecated_logging import log
+
+
+class MockIRC(object):
+    def post(self, message):
+        log("MOCK: irc.post: %s" % message)
+
+    def disconnect(self):
+        log("MOCK: irc.disconnect")
diff --git a/Tools/Scripts/webkitpy/common/net/irc/ircbot.py b/Tools/Scripts/webkitpy/common/net/irc/ircbot.py
new file mode 100644
index 0000000..c8c1a38
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/net/irc/ircbot.py
@@ -0,0 +1,108 @@
+# Copyright (c) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from webkitpy.common.config import irc as config_irc
+
+from webkitpy.common.thread.messagepump import MessagePump, MessagePumpDelegate
+from webkitpy.thirdparty.autoinstalled.irc import ircbot
+from webkitpy.thirdparty.autoinstalled.irc import irclib
+
+
+class IRCBotDelegate(object):
+    def irc_message_received(self, nick, message):
+        raise NotImplementedError, "subclasses must implement"
+
+    def irc_nickname(self):
+        raise NotImplementedError, "subclasses must implement"
+
+    def irc_password(self):
+        raise NotImplementedError, "subclasses must implement"
+
+
+class IRCBot(ircbot.SingleServerIRCBot, MessagePumpDelegate):
+    # FIXME: We should get this information from a config file.
+    def __init__(self,
+                 message_queue,
+                 delegate):
+        self._message_queue = message_queue
+        self._delegate = delegate
+        ircbot.SingleServerIRCBot.__init__(
+            self,
+            [(
+                config_irc.server,
+                config_irc.port,
+                self._delegate.irc_password()
+            )],
+            self._delegate.irc_nickname(),
+            self._delegate.irc_nickname())
+        self._channel = config_irc.channel
+
+    # ircbot.SingleServerIRCBot methods
+
+    def on_nicknameinuse(self, connection, event):
+        connection.nick(connection.get_nickname() + "_")
+
+    def on_welcome(self, connection, event):
+        connection.join(self._channel)
+        self._message_pump = MessagePump(self, self._message_queue)
+
+    def on_pubmsg(self, connection, event):
+        nick = irclib.nm_to_n(event.source())
+        request = event.arguments()[0]
+
+        if not irclib.irc_lower(request).startswith(irclib.irc_lower(connection.get_nickname())):
+            return
+
+        if len(request) <= len(connection.get_nickname()):
+            return
+
+        # Some IRC clients, like xchat-gnome, default to using a comma
+        # when addressing someone.
+        vocative_separator = request[len(connection.get_nickname())]
+        if vocative_separator == ':':
+            request = request.split(':', 1)
+        elif vocative_separator == ',':
+            request = request.split(',', 1)
+        else:
+            return
+
+        if len(request) > 1:
+            response = self._delegate.irc_message_received(nick, request[1])
+            if response:
+                connection.privmsg(self._channel, response)
+
+    # MessagePumpDelegate methods
+
+    def schedule(self, interval, callback):
+        self.connection.execute_delayed(interval, callback)
+
+    def message_available(self, message):
+        self.connection.privmsg(self._channel, message)
+
+    def final_message_delivered(self):
+        self.die()
diff --git a/Tools/Scripts/webkitpy/common/net/irc/ircproxy.py b/Tools/Scripts/webkitpy/common/net/irc/ircproxy.py
new file mode 100644
index 0000000..13348b4
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/net/irc/ircproxy.py
@@ -0,0 +1,62 @@
+# Copyright (c) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import threading
+
+from webkitpy.common.net.irc.ircbot import IRCBot
+from webkitpy.common.thread.threadedmessagequeue import ThreadedMessageQueue
+from webkitpy.common.system.deprecated_logging import log
+
+
+class _IRCThread(threading.Thread):
+    def __init__(self, message_queue, irc_delegate, irc_bot):
+        threading.Thread.__init__(self)
+        self.setDaemon(True)
+        self._message_queue = message_queue
+        self._irc_delegate = irc_delegate
+        self._irc_bot = irc_bot
+
+    def run(self):
+        bot = self._irc_bot(self._message_queue, self._irc_delegate)
+        bot.start()
+
+
+class IRCProxy(object):
+    def __init__(self, irc_delegate, irc_bot=IRCBot):
+        log("Connecting to IRC")
+        self._message_queue = ThreadedMessageQueue()
+        self._child_thread = _IRCThread(self._message_queue, irc_delegate, irc_bot)
+        self._child_thread.start()
+
+    def post(self, message):
+        self._message_queue.post(message)
+
+    def disconnect(self):
+        log("Disconnecting from IRC...")
+        self._message_queue.stop()
+        self._child_thread.join()
diff --git a/Tools/Scripts/webkitpy/common/net/irc/ircproxy_unittest.py b/Tools/Scripts/webkitpy/common/net/irc/ircproxy_unittest.py
new file mode 100644
index 0000000..b44ce40
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/net/irc/ircproxy_unittest.py
@@ -0,0 +1,43 @@
+# Copyright (c) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+# 
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+# 
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from webkitpy.common.net.irc.ircproxy import IRCProxy
+from webkitpy.common.system.outputcapture import OutputCapture
+from webkitpy.thirdparty.mock import Mock
+
+class IRCProxyTest(unittest.TestCase):
+    def test_trivial(self):
+        def fun():
+            proxy = IRCProxy(Mock(), Mock())
+            proxy.post("hello")
+            proxy.disconnect()
+
+        expected_stderr = "Connecting to IRC\nDisconnecting from IRC...\n"
+        OutputCapture().assert_outputs(self, fun, expected_stderr=expected_stderr)
diff --git a/Tools/Scripts/webkitpy/common/net/layouttestresults.py b/Tools/Scripts/webkitpy/common/net/layouttestresults.py
new file mode 100644
index 0000000..f0d807e
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/net/layouttestresults.py
@@ -0,0 +1,177 @@
+# Copyright (c) 2010, Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# A module for parsing results.html files generated by old-run-webkit-tests
+# This class is one big hack and only needs to exist until we transition to new-run-webkit-tests.
+
+from webkitpy.common.net.resultsjsonparser import ResultsJSONParser
+from webkitpy.common.system.deprecated_logging import log
+from webkitpy.thirdparty.BeautifulSoup import BeautifulSoup, SoupStrainer
+from webkitpy.layout_tests.models import test_results
+from webkitpy.layout_tests.models import test_failures
+
+
+# FIXME: This should be unified with all the layout test results code in the layout_tests package
+# This doesn't belong in common.net, but we don't have a better place for it yet.
+def path_for_layout_test(test_name):
+    return "LayoutTests/%s" % test_name
+
+
+class ORWTResultsHTMLParser(object):
+    """This class knows how to parse old-run-webkit-tests results.html files."""
+
+    stderr_key = u'Tests that had stderr output:'
+    fail_key = u'Tests where results did not match expected results:'
+    timeout_key = u'Tests that timed out:'
+    # FIXME: This may need to be made aware of WebKitTestRunner results for WebKit2.
+    crash_key = u'Tests that caused the DumpRenderTree tool to crash:'
+    missing_key = u'Tests that had no expected results (probably new):'
+    webprocess_crash_key = u'Tests that caused the Web process to crash:'
+
+    expected_keys = [
+        stderr_key,
+        fail_key,
+        crash_key,
+        webprocess_crash_key,
+        timeout_key,
+        missing_key,
+    ]
+
+    @classmethod
+    def _failures_from_fail_row(self, row):
+        # Look at all anchors in this row, and guess what type
+        # of new-run-webkit-test failures they equate to.
+        failures = set()
+        test_name = None
+        for anchor in row.findAll("a"):
+            anchor_text = unicode(anchor.string)
+            if not test_name:
+                test_name = anchor_text
+                continue
+            if anchor_text in ["expected image", "image diffs"] or '%' in anchor_text:
+                failures.add(test_failures.FailureImageHashMismatch())
+            elif anchor_text in ["expected", "actual", "diff", "pretty diff"]:
+                failures.add(test_failures.FailureTextMismatch())
+            else:
+                log("Unhandled link text in results.html parsing: %s.  Please file a bug against webkitpy." % anchor_text)
+        # FIXME: Its possible the row contained no links due to ORWT brokeness.
+        # We should probably assume some type of failure anyway.
+        return failures
+
+    @classmethod
+    def _failures_from_row(cls, row, table_title):
+        if table_title == cls.fail_key:
+            return cls._failures_from_fail_row(row)
+        if table_title == cls.crash_key:
+            return [test_failures.FailureCrash()]
+        if table_title == cls.webprocess_crash_key:
+            return [test_failures.FailureCrash(process_name="WebProcess")]
+        if table_title == cls.timeout_key:
+            return [test_failures.FailureTimeout()]
+        if table_title == cls.missing_key:
+            return [test_failures.FailureMissingResult(), test_failures.FailureMissingImageHash(), test_failures.FailureMissingImage()]
+        return None
+
+    @classmethod
+    def _test_result_from_row(cls, row, table_title):
+        test_name = unicode(row.find("a").string)
+        failures = cls._failures_from_row(row, table_title)
+        # TestResult is a class designed to work with new-run-webkit-tests.
+        # old-run-webkit-tests does not save quite enough information in results.html for us to parse.
+        # FIXME: It's unclear if test_name should include LayoutTests or not.
+        return test_results.TestResult(test_name, failures)
+
+    @classmethod
+    def _parse_results_table(cls, table):
+        table_title = unicode(table.findPreviousSibling("p").string)
+        if table_title not in cls.expected_keys:
+            # This Exception should only ever be hit if run-webkit-tests changes its results.html format.
+            raise Exception("Unhandled title: %s" % table_title)
+        # Ignore stderr failures.  Everyone ignores them anyway.
+        if table_title == cls.stderr_key:
+            return []
+        # FIXME: We might end with two TestResults object for the same test if it appears in more than one row.
+        return [cls._test_result_from_row(row, table_title) for row in table.findAll("tr")]
+
+    @classmethod
+    def parse_results_html(cls, page):
+        tables = BeautifulSoup(page).findAll("table")
+        return sum([cls._parse_results_table(table) for table in tables], [])
+
+
+# FIXME: This should be unified with ResultsSummary or other NRWT layout tests code
+# in the layout_tests package.
+# This doesn't belong in common.net, but we don't have a better place for it yet.
+class LayoutTestResults(object):
+    @classmethod
+    def results_from_string(cls, string):
+        if not string:
+            return None
+        # For now we try to parse first as json, then as results.html
+        # eventually we will remove the html fallback support.
+        test_results = ResultsJSONParser.parse_results_json(string)
+        if not test_results:
+            test_results = ORWTResultsHTMLParser.parse_results_html(string)
+        if not test_results:
+            return None
+        return cls(test_results)
+
+    def __init__(self, test_results):
+        self._test_results = test_results
+        self._failure_limit_count = None
+        self._unit_test_failures = []
+
+    # FIXME: run-webkit-tests should store the --exit-after-N-failures value
+    # (or some indication of early exit) somewhere in the results.html/results.json
+    # file.  Until it does, callers should set the limit to
+    # --exit-after-N-failures value used in that run.  Consumers of LayoutTestResults
+    # may use that value to know if absence from the failure list means PASS.
+    # https://bugs.webkit.org/show_bug.cgi?id=58481
+    def set_failure_limit_count(self, limit):
+        self._failure_limit_count = limit
+
+    def failure_limit_count(self):
+        return self._failure_limit_count
+
+    def test_results(self):
+        return self._test_results
+
+    def results_matching_failure_types(self, failure_types):
+        return [result for result in self._test_results if result.has_failure_matching_types(*failure_types)]
+
+    def tests_matching_failure_types(self, failure_types):
+        return [result.test_name for result in self.results_matching_failure_types(failure_types)]
+
+    def failing_test_results(self):
+        return self.results_matching_failure_types(test_failures.ALL_FAILURE_CLASSES)
+
+    def failing_tests(self):
+        return [result.test_name for result in self.failing_test_results()] + self._unit_test_failures
+
+    def add_unit_test_failures(self, unit_test_results):
+        self._unit_test_failures = unit_test_results
diff --git a/Tools/Scripts/webkitpy/common/net/layouttestresults_unittest.py b/Tools/Scripts/webkitpy/common/net/layouttestresults_unittest.py
new file mode 100644
index 0000000..939a56a
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/net/layouttestresults_unittest.py
@@ -0,0 +1,146 @@
+# Copyright (c) 2010, Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from webkitpy.common.net.layouttestresults import LayoutTestResults, ORWTResultsHTMLParser
+from webkitpy.common.system.outputcapture import OutputCapture
+from webkitpy.layout_tests.models import test_results
+from webkitpy.layout_tests.models import test_failures
+from webkitpy.thirdparty.BeautifulSoup import BeautifulSoup
+
+
+class ORWTResultsHTMLParserTest(unittest.TestCase):
+    _example_results_html = """
+<html>
+<head>
+<title>Layout Test Results</title>
+</head>
+<body>
+<p>Tests that had stderr output:</p>
+<table>
+<tr>
+<td><a href="/var/lib/buildbot/build/gtk-linux-64-release/build/LayoutTests/accessibility/aria-activedescendant-crash.html">accessibility/aria-activedescendant-crash.html</a></td>
+<td><a href="accessibility/aria-activedescendant-crash-stderr.txt">stderr</a></td>
+</tr>
+<td><a href="/var/lib/buildbot/build/gtk-linux-64-release/build/LayoutTests/http/tests/security/canvas-remote-read-svg-image.html">http/tests/security/canvas-remote-read-svg-image.html</a></td>
+<td><a href="http/tests/security/canvas-remote-read-svg-image-stderr.txt">stderr</a></td>
+</tr>
+</table><p>Tests that had no expected results (probably new):</p>
+<table>
+<tr>
+<td><a href="/var/lib/buildbot/build/gtk-linux-64-release/build/LayoutTests/fast/repaint/no-caret-repaint-in-non-content-editable-element.html">fast/repaint/no-caret-repaint-in-non-content-editable-element.html</a></td>
+<td><a href="fast/repaint/no-caret-repaint-in-non-content-editable-element-actual.txt">result</a></td>
+</tr>
+</table></body>
+</html>
+"""
+
+    _example_results_html_with_failing_tests = """
+<html>
+<head>
+<title>Layout Test Results</title>
+</head>
+<body>
+<p>Tests where results did not match expected results:</p>
+<table>
+<tr>
+<td><a href="http://trac.webkit.org/export/91245/trunk/LayoutTests/compositing/plugins/composited-plugin.html">compositing/plugins/composited-plugin.html</a></td>
+<td>
+<a href="compositing/plugins/composited-plugin-expected.txt">expected</a>
+</td>
+<td>
+<a href="compositing/plugins/composited-plugin-actual.txt">actual</a>
+</td>
+<td>
+<a href="compositing/plugins/composited-plugin-diffs.txt">diff</a>
+</td>
+<td>
+<a href="compositing/plugins/composited-plugin-pretty-diff.html">pretty diff</a>
+</td>
+</tr>
+</table>
+<p>Tests that had stderr output:</p>
+<table>
+<tr>
+<td><a href="/var/lib/buildbot/build/gtk-linux-64-release/build/LayoutTests/accessibility/aria-activedescendant-crash.html">accessibility/aria-activedescendant-crash.html</a></td>
+<td><a href="accessibility/aria-activedescendant-crash-stderr.txt">stderr</a></td>
+</tr>
+<td><a href="/var/lib/buildbot/build/gtk-linux-64-release/build/LayoutTests/http/tests/security/canvas-remote-read-svg-image.html">http/tests/security/canvas-remote-read-svg-image.html</a></td>
+<td><a href="http/tests/security/canvas-remote-read-svg-image-stderr.txt">stderr</a></td>
+</tr>
+</table><p>Tests that had no expected results (probably new):</p>
+<table>
+<tr>
+<td><a href="/var/lib/buildbot/build/gtk-linux-64-release/build/LayoutTests/fast/repaint/no-caret-repaint-in-non-content-editable-element.html">fast/repaint/no-caret-repaint-in-non-content-editable-element.html</a></td>
+<td><a href="fast/repaint/no-caret-repaint-in-non-content-editable-element-actual.txt">result</a></td>
+</tr>
+</table></body>
+</html>
+"""
+
+    def test_parse_layout_test_results(self):
+        failures = [test_failures.FailureMissingResult(), test_failures.FailureMissingImageHash(), test_failures.FailureMissingImage()]
+        testname = 'fast/repaint/no-caret-repaint-in-non-content-editable-element.html'
+        expected_results = [test_results.TestResult(testname, failures)]
+
+        results = ORWTResultsHTMLParser.parse_results_html(self._example_results_html)
+        self.assertEqual(expected_results, results)
+
+
+    def test_failures_from_fail_row(self):
+        row = BeautifulSoup("<tr><td><a>test.hml</a></td><td><a>expected image</a></td><td><a>25%</a></td></tr>")
+        test_name = unicode(row.find("a").string)
+        # Even if the caller has already found the test name, findAll inside _failures_from_fail_row will see it again.
+        failures = OutputCapture().assert_outputs(self, ORWTResultsHTMLParser._failures_from_fail_row, [row])
+        self.assertEqual(len(failures), 1)
+        self.assertEqual(type(sorted(failures)[0]), test_failures.FailureImageHashMismatch)
+
+        row = BeautifulSoup("<tr><td><a>test.hml</a><a>foo</a></td></tr>")
+        expected_stderr = "Unhandled link text in results.html parsing: foo.  Please file a bug against webkitpy.\n"
+        OutputCapture().assert_outputs(self, ORWTResultsHTMLParser._failures_from_fail_row, [row], expected_stderr=expected_stderr)
+
+
+class LayoutTestResultsTest(unittest.TestCase):
+
+    def test_set_failure_limit_count(self):
+        results = LayoutTestResults([])
+        self.assertEquals(results.failure_limit_count(), None)
+        results.set_failure_limit_count(10)
+        self.assertEquals(results.failure_limit_count(), 10)
+
+    def test_results_from_string(self):
+        self.assertEqual(LayoutTestResults.results_from_string(None), None)
+        self.assertEqual(LayoutTestResults.results_from_string(""), None)
+        results = LayoutTestResults.results_from_string(ORWTResultsHTMLParserTest._example_results_html)
+        self.assertEqual(len(results.failing_tests()), 1)
+
+    def test_tests_matching_failure_types(self):
+        results = LayoutTestResults.results_from_string(ORWTResultsHTMLParserTest._example_results_html_with_failing_tests)
+        failing_tests = results.tests_matching_failure_types([test_failures.FailureTextMismatch])
+        self.assertEqual(len(results.failing_tests()), 2)
diff --git a/Tools/Scripts/webkitpy/common/net/networktransaction.py b/Tools/Scripts/webkitpy/common/net/networktransaction.py
new file mode 100644
index 0000000..03b1432
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/net/networktransaction.py
@@ -0,0 +1,71 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import logging
+import time
+import urllib2
+
+from webkitpy.common.system.deprecated_logging import log
+
+
+_log = logging.getLogger(__name__)
+
+
+class NetworkTimeout(Exception):
+    def __str__(self):
+        return 'NetworkTimeout'
+
+
+class NetworkTransaction(object):
+    def __init__(self, initial_backoff_seconds=10, grown_factor=1.5, timeout_seconds=(10 * 60), convert_404_to_None=False):
+        self._initial_backoff_seconds = initial_backoff_seconds
+        self._grown_factor = grown_factor
+        self._timeout_seconds = timeout_seconds
+        self._convert_404_to_None = convert_404_to_None
+
+    def run(self, request):
+        self._total_sleep = 0
+        self._backoff_seconds = self._initial_backoff_seconds
+        while True:
+            try:
+                return request()
+            except urllib2.HTTPError, e:
+                if self._convert_404_to_None and e.code == 404:
+                    return None
+                self._check_for_timeout()
+                _log.warn("Received HTTP status %s loading \"%s\".  Retrying in %s seconds..." % (e.code, e.filename, self._backoff_seconds))
+                self._sleep()
+
+    def _check_for_timeout(self):
+        if self._total_sleep + self._backoff_seconds > self._timeout_seconds:
+            raise NetworkTimeout()
+
+    def _sleep(self):
+        time.sleep(self._backoff_seconds)
+        self._total_sleep += self._backoff_seconds
+        self._backoff_seconds *= self._grown_factor
diff --git a/Tools/Scripts/webkitpy/common/net/networktransaction_unittest.py b/Tools/Scripts/webkitpy/common/net/networktransaction_unittest.py
new file mode 100644
index 0000000..3302dec
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/net/networktransaction_unittest.py
@@ -0,0 +1,94 @@
+# Copyright (c) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+# 
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+# 
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from webkitpy.common.net.networktransaction import NetworkTransaction, NetworkTimeout
+from webkitpy.common.system.logtesting import LoggingTestCase
+
+
+class NetworkTransactionTest(LoggingTestCase):
+    exception = Exception("Test exception")
+
+    def test_success(self):
+        transaction = NetworkTransaction()
+        self.assertEqual(transaction.run(lambda: 42), 42)
+
+    def _raise_exception(self):
+        raise self.exception
+
+    def test_exception(self):
+        transaction = NetworkTransaction()
+        did_process_exception = False
+        did_throw_exception = True
+        try:
+            transaction.run(lambda: self._raise_exception())
+            did_throw_exception = False
+        except Exception, e:
+            did_process_exception = True
+            self.assertEqual(e, self.exception)
+        self.assertTrue(did_throw_exception)
+        self.assertTrue(did_process_exception)
+
+    def _raise_500_error(self):
+        self._run_count += 1
+        if self._run_count < 3:
+            from webkitpy.thirdparty.autoinstalled.mechanize import HTTPError
+            raise HTTPError("http://example.com/", 500, "internal server error", None, None)
+        return 42
+
+    def _raise_404_error(self):
+        from webkitpy.thirdparty.autoinstalled.mechanize import HTTPError
+        raise HTTPError("http://foo.com/", 404, "not found", None, None)
+
+    def test_retry(self):
+        self._run_count = 0
+        transaction = NetworkTransaction(initial_backoff_seconds=0)
+        self.assertEqual(transaction.run(lambda: self._raise_500_error()), 42)
+        self.assertEqual(self._run_count, 3)
+        self.assertLog(['WARNING: Received HTTP status 500 loading "http://example.com/".  '
+                        'Retrying in 0 seconds...\n',
+                        'WARNING: Received HTTP status 500 loading "http://example.com/".  '
+                        'Retrying in 0.0 seconds...\n'])
+
+    def test_convert_404_to_None(self):
+        transaction = NetworkTransaction(convert_404_to_None=True)
+        self.assertEqual(transaction.run(lambda: self._raise_404_error()), None)
+
+    def test_timeout(self):
+        self._run_count = 0
+        transaction = NetworkTransaction(initial_backoff_seconds=60*60, timeout_seconds=60)
+        did_process_exception = False
+        did_throw_exception = True
+        try:
+            transaction.run(lambda: self._raise_500_error())
+            did_throw_exception = False
+        except NetworkTimeout, e:
+            did_process_exception = True
+        self.assertTrue(did_throw_exception)
+        self.assertTrue(did_process_exception)
diff --git a/Tools/Scripts/webkitpy/common/net/omahaproxy.py b/Tools/Scripts/webkitpy/common/net/omahaproxy.py
new file mode 100644
index 0000000..b7b481f
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/net/omahaproxy.py
@@ -0,0 +1,81 @@
+# Copyright (C) 2012 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# This is the client to query http://omahaproxy.appspot.com/ to retrieve
+# chrome versions associated with WebKit commits.
+
+from webkitpy.common.net.networktransaction import NetworkTransaction
+from webkitpy.common.config import urls
+
+import json
+import urllib2
+
+
+class OmahaProxy(object):
+    default_url = urls.omahaproxy_url
+
+    chrome_platforms = {"linux": "Linux",
+                        "win": "Windows",
+                        "mac": "Mac",
+                        "cros": "Chrome OS",
+                        "cf": "Chrome Frame",
+                        "ios": "iOS"}
+    chrome_channels = ["canary", "dev", "beta", "stable"]
+
+    def __init__(self, url=default_url, browser=None):
+        self._chrome_channels = set(self.chrome_channels)
+        self.set_url(url)
+        from webkitpy.thirdparty.autoinstalled.mechanize import Browser
+        self._browser = browser or Browser()
+
+    def set_url(self, url):
+        self.url = url
+
+    def _json_url(self):
+        return "%s/all.json" % self.url
+
+    def _get_json(self):
+        return NetworkTransaction().run(lambda: urllib2.urlopen(self._json_url()).read())
+
+    def get_revisions(self):
+        revisions_json = json.loads(self._get_json())
+        revisions = []
+        for platform in revisions_json:
+            for version in platform["versions"]:
+                try:
+                    row = {
+                        "commit": int(version["base_webkit_revision"]),
+                        "channel": version["channel"],
+                        "platform": self.chrome_platforms.get(platform["os"], platform["os"]),
+                        "date": version["date"],
+                    }
+                    assert(version["channel"] in self._chrome_channels)
+                    revisions.append(row)
+                except ValueError:
+                    next
+        return revisions
diff --git a/Tools/Scripts/webkitpy/common/net/omahaproxy_unittest.py b/Tools/Scripts/webkitpy/common/net/omahaproxy_unittest.py
new file mode 100644
index 0000000..f3e5be3
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/net/omahaproxy_unittest.py
@@ -0,0 +1,139 @@
+# Copyright (C) 2012 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+# Unit test for omahaproxy.py
+
+import unittest
+
+from webkitpy.common.net.omahaproxy import OmahaProxy
+
+
+class MockOmahaProxy(OmahaProxy):
+    def __init__(self, json):
+        self._get_json = lambda: json
+        OmahaProxy.__init__(self)
+
+
+class OmahaProxyTest(unittest.TestCase):
+    example_omahaproxy_json = """[
+        {"os": "win",
+         "versions": [
+                {"base_webkit_revision": "116185",
+                 "v8_ver": "3.10.8.1",
+                 "wk_ver": "536.11",
+                 "base_trunk_revision": 135598,
+                 "prev_version": "20.0.1128.0",
+                 "version": "20.0.1129.0",
+                 "date": "05\/07\/12",
+                 "prev_date": "05\/06\/12",
+                 "true_branch": "trunk",
+                 "channel": "canary",
+                 "branch_revision": "NA"},
+                {"base_webkit_revision": "115687",
+                 "v8_ver": "3.10.6.0",
+                 "wk_ver": "536.10",
+                 "base_trunk_revision": 134666,
+                 "prev_version": "20.0.1123.1",
+                 "version": "20.0.1123.4",
+                 "date": "05\/04\/12",
+                 "prev_date": "05\/02\/12",
+                 "true_branch": "1123",
+                 "channel": "dev",
+                 "branch_revision": 135092}]},
+        {"os": "linux",
+         "versions": [
+                {"base_webkit_revision": "115688",
+                 "v8_ver": "3.10.6.0",
+                 "wk_ver": "536.10",
+                 "base_trunk_revision": 134666,
+                 "prev_version": "20.0.1123.2",
+                 "version": "20.0.1123.4",
+                 "date": "05\/04\/12",
+                 "prev_date": "05\/02\/12",
+                 "true_branch": "1123",
+                 "channel": "dev",
+                 "branch_revision": 135092},
+                {"base_webkit_revision": "112327",
+                 "v8_ver": "3.9.24.17",
+                 "wk_ver": "536.5",
+                 "base_trunk_revision": 129376,
+                 "prev_version": "19.0.1084.36",
+                 "version": "19.0.1084.41",
+                 "date": "05\/03\/12",
+                 "prev_date": "04\/25\/12",
+                 "true_branch": "1084",
+                 "channel": "beta",
+                 "branch_revision": 134854},
+                {"base_webkit_revision": "*",
+                 "v8_ver": "3.9.24.17",
+                 "wk_ver": "536.5",
+                 "base_trunk_revision": 129376,
+                 "prev_version": "19.0.1084.36",
+                 "version": "19.0.1084.41",
+                 "date": "05\/03\/12",
+                 "prev_date": "04\/25\/12",
+                 "true_branch": "1084",
+                 "channel": "release",
+                 "branch_revision": 134854}]},
+        {"os": "weird-platform",
+         "versions": [
+                {"base_webkit_revision": "115688",
+                 "v8_ver": "3.10.6.0",
+                 "wk_ver": "536.10",
+                 "base_trunk_revision": 134666,
+                 "prev_version": "20.0.1123.2",
+                 "version": "20.0.1123.4",
+                 "date": "05\/04\/12",
+                 "prev_date": "05\/02\/12",
+                 "true_branch": "1123",
+                 "channel": "dev",
+                 "branch_revision": 135092}]}]"""
+
+    expected_revisions = [
+        {"commit": 116185, "channel": "canary", "platform": "Windows", "date": "05/07/12"},
+        {"commit": 115687, "channel": "dev", "platform": "Windows", "date": "05/04/12"},
+        {"commit": 115688, "channel": "dev", "platform": "Linux", "date": "05/04/12"},
+        {"commit": 112327, "channel": "beta", "platform": "Linux", "date": "05/03/12"},
+        {"commit": 115688, "channel": "dev", "platform": "weird-platform", "date": "05/04/12"},
+    ]
+
+    def test_get_revisions(self):
+        omahaproxy = MockOmahaProxy(self.example_omahaproxy_json)
+        revisions = omahaproxy.get_revisions()
+        self.assertEqual(len(revisions), 5)
+        for revision in revisions:
+            self.assertTrue("commit" in revision)
+            self.assertTrue("channel" in revision)
+            self.assertTrue("platform" in revision)
+            self.assertTrue("date" in revision)
+            self.assertEqual(len(revision.keys()), 4)
+        self.assertEqual(revisions, self.expected_revisions)
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/Tools/Scripts/webkitpy/common/net/regressionwindow.py b/Tools/Scripts/webkitpy/common/net/regressionwindow.py
new file mode 100644
index 0000000..3960ba2
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/net/regressionwindow.py
@@ -0,0 +1,52 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+# FIXME: This probably belongs in the buildbot module.
+class RegressionWindow(object):
+    def __init__(self, build_before_failure, failing_build, failing_tests=None):
+        self._build_before_failure = build_before_failure
+        self._failing_build = failing_build
+        self._failing_tests = failing_tests
+        self._revisions = None
+
+    def build_before_failure(self):
+        return self._build_before_failure
+
+    def failing_build(self):
+        return self._failing_build
+
+    def failing_tests(self):
+        return self._failing_tests
+
+    def revisions(self):
+        # Cache revisions to avoid excessive allocations.
+        if not self._revisions:
+            self._revisions = range(self._failing_build.revision(), self._build_before_failure.revision(), -1)
+            self._revisions.reverse()
+        return self._revisions
diff --git a/Tools/Scripts/webkitpy/common/net/resultsjsonparser.py b/Tools/Scripts/webkitpy/common/net/resultsjsonparser.py
new file mode 100644
index 0000000..42ce56a
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/net/resultsjsonparser.py
@@ -0,0 +1,154 @@
+# Copyright (c) 2010, Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+import json
+
+from webkitpy.common.memoized import memoized
+from webkitpy.common.system.deprecated_logging import log
+# FIXME: common should never import from new-run-webkit-tests, one of these files needs to move.
+from webkitpy.layout_tests.layout_package import json_results_generator
+from webkitpy.layout_tests.models import test_expectations, test_results, test_failures
+from webkitpy.layout_tests.models.test_expectations import TestExpectations
+
+
+# These are helper functions for navigating the results json structure.
+def for_each_test(tree, handler, prefix=''):
+    for key in tree:
+        new_prefix = (prefix + '/' + key) if prefix else key
+        if 'actual' not in tree[key]:
+            for_each_test(tree[key], handler, new_prefix)
+        else:
+            handler(new_prefix, tree[key])
+
+
+def result_for_test(tree, test):
+    parts = test.split('/')
+    for part in parts:
+        tree = tree[part]
+    return tree
+
+
+# Wrapper around the dictionaries returned from the json.
+# Eventually the .json should just serialize the TestFailure objects
+# directly and we won't need this.
+class JSONTestResult(object):
+    def __init__(self, test_name, result_dict):
+        self._test_name = test_name
+        self._result_dict = result_dict
+
+    def did_pass_or_run_as_expected(self):
+        return self.did_pass() or self.did_run_as_expected()
+
+    def did_pass(self):
+        return test_expectations.PASS in self._actual_as_tokens()
+
+    def did_run_as_expected(self):
+        actual_results = self._actual_as_tokens()
+        expected_results = self._expected_as_tokens()
+        # FIXME: We should only call remove_pixel_failures when this JSONResult
+        # came from a test run without pixel tests!
+        if not TestExpectations.has_pixel_failures(actual_results):
+            expected_results = TestExpectations.remove_pixel_failures(expected_results)
+        for actual_result in actual_results:
+            if not TestExpectations.result_was_expected(actual_result, expected_results, False, False):
+                return False
+        return True
+
+    def _tokenize(self, results_string):
+        tokens = map(TestExpectations.expectation_from_string, results_string.split(' '))
+        if None in tokens:
+            log("Unrecognized result in %s" % results_string)
+        return set(tokens)
+
+    @memoized
+    def _actual_as_tokens(self):
+        actual_results = self._result_dict['actual']
+        return self._tokenize(actual_results)
+
+    @memoized
+    def _expected_as_tokens(self):
+        actual_results = self._result_dict['expected']
+        return self._tokenize(actual_results)
+
+    def _failure_types_from_actual_result(self, actual):
+        # FIXME: There doesn't seem to be a full list of all possible values of
+        # 'actual' anywhere.  However JSONLayoutResultsGenerator.FAILURE_TO_CHAR
+        # is a useful reference as that's for "old" style results.json files
+        #
+        # FIXME: TEXT, IMAGE_PLUS_TEXT, and AUDIO are obsolete but we keep them for
+        # now so that we can parse old results.json files.
+        if actual == test_expectations.PASS:
+            return []
+        elif actual == test_expectations.FAIL:
+            return [test_failures.FailureTextMismatch(), test_failures.FailureImageHashMismatch(), test_failures.FailureAudioMismatch()]
+        elif actual == test_expectations.TEXT:
+            return [test_failures.FailureTextMismatch()]
+        elif actual == test_expectations.IMAGE:
+            return [test_failures.FailureImageHashMismatch()]
+        elif actual == test_expectations.IMAGE_PLUS_TEXT:
+            return [test_failures.FailureImageHashMismatch(), test_failures.FailureTextMismatch()]
+        elif actual == test_expectations.AUDIO:
+            return [test_failures.FailureAudioMismatch()]
+        elif actual == test_expectations.TIMEOUT:
+            return [test_failures.FailureTimeout()]
+        elif actual == test_expectations.CRASH:
+            # NOTE: We don't know what process crashed from the json, just that a process crashed.
+            return [test_failures.FailureCrash()]
+        elif actual == test_expectations.MISSING:
+            return [test_failures.FailureMissingResult(), test_failures.FailureMissingImageHash(), test_failures.FailureMissingImage()]
+        else:
+            log("Failed to handle: %s" % self._result_dict['actual'])
+            return []
+
+    def _failures(self):
+        if self.did_pass():
+            return []
+        return sum(map(self._failure_types_from_actual_result, self._actual_as_tokens()), [])
+
+    def test_result(self):
+        # FIXME: Optionally pull in the test runtime from times_ms.json.
+        return test_results.TestResult(self._test_name, self._failures())
+
+
+class ResultsJSONParser(object):
+    @classmethod
+    def parse_results_json(cls, json_string):
+        if not json_results_generator.has_json_wrapper(json_string):
+            return None
+
+        content_string = json_results_generator.strip_json_wrapper(json_string)
+        json_dict = json.loads(content_string)
+
+        json_results = []
+        for_each_test(json_dict['tests'], lambda test, result: json_results.append(JSONTestResult(test, result)))
+
+        # FIXME: What's the short sexy python way to filter None?
+        # I would use [foo.bar() for foo in foos if foo.bar()] but bar() is expensive.
+        unexpected_failures = [result.test_result() for result in json_results if not result.did_pass_or_run_as_expected()]
+        return filter(lambda a: a, unexpected_failures)
diff --git a/Tools/Scripts/webkitpy/common/net/resultsjsonparser_unittest.py b/Tools/Scripts/webkitpy/common/net/resultsjsonparser_unittest.py
new file mode 100644
index 0000000..867379f
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/net/resultsjsonparser_unittest.py
@@ -0,0 +1,93 @@
+# Copyright (c) 2010, Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from webkitpy.common.net.resultsjsonparser import ResultsJSONParser
+from webkitpy.layout_tests.models import test_results
+from webkitpy.layout_tests.models import test_failures
+
+
+class ResultsJSONParserTest(unittest.TestCase):
+    # The real files have no whitespace, but newlines make this much more readable.
+
+    _example_full_results_json = """ADD_RESULTS({
+    "tests": {
+        "fast": {
+            "dom": {
+                "prototype-inheritance.html": {
+                    "expected": "PASS",
+                    "actual": "FAIL"
+                },
+                "prototype-banana.html": {
+                    "expected": "FAIL",
+                    "actual": "PASS"
+                },
+                "prototype-taco.html": {
+                    "expected": "PASS",
+                    "actual": "PASS FAIL"
+                },
+                "prototype-chocolate.html": {
+                    "expected": "FAIL",
+                    "actual": "FAIL"
+                },
+                "prototype-strawberry.html": {
+                    "expected": "PASS",
+                    "actual": "FAIL PASS"
+                }
+            }
+        },
+        "svg": {
+            "dynamic-updates": {
+                "SVGFEDropShadowElement-dom-stdDeviation-attr.html": {
+                    "expected": "PASS",
+                    "actual": "IMAGE",
+                    "has_stderr": true
+                }
+            }
+        }
+    },
+    "skipped": 450,
+    "num_regressions": 15,
+    "layout_tests_dir": "\/b\/build\/slave\/Webkit_Mac10_5\/build\/src\/third_party\/WebKit\/LayoutTests",
+    "version": 3,
+    "num_passes": 77,
+    "has_pretty_patch": false,
+    "fixable": 1220,
+    "num_flaky": 0,
+    "uses_expectations_file": true,
+    "has_wdiff": false
+});"""
+
+    def test_basic(self):
+        expected_results = [
+            test_results.TestResult("svg/dynamic-updates/SVGFEDropShadowElement-dom-stdDeviation-attr.html", [test_failures.FailureImageHashMismatch()], 0),
+            test_results.TestResult("fast/dom/prototype-inheritance.html", [test_failures.FailureTextMismatch(), test_failures.FailureImageHashMismatch(), test_failures.FailureAudioMismatch()], 0),
+        ]
+        results = ResultsJSONParser.parse_results_json(self._example_full_results_json)
+        self.assertEqual(expected_results, results)
diff --git a/Tools/Scripts/webkitpy/common/net/statusserver.py b/Tools/Scripts/webkitpy/common/net/statusserver.py
new file mode 100644
index 0000000..2bda1ce
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/net/statusserver.py
@@ -0,0 +1,170 @@
+# Copyright (C) 2009 Google Inc. All rights reserved.
+# 
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+# 
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+# 
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# This the client designed to talk to Tools/QueueStatusServer.
+
+from webkitpy.common.net.networktransaction import NetworkTransaction
+from webkitpy.common.system.deprecated_logging import log
+from webkitpy.thirdparty.BeautifulSoup import BeautifulSoup
+
+import logging
+import urllib2
+
+
+_log = logging.getLogger(__name__)
+
+
+class StatusServer:
+    # FIXME: This should probably move to common.config.urls.
+    default_host = "queues.webkit.org"
+
+    def __init__(self, host=default_host, browser=None, bot_id=None):
+        self.set_host(host)
+        from webkitpy.thirdparty.autoinstalled.mechanize import Browser
+        self._browser = browser or Browser()
+        self.set_bot_id(bot_id)
+
+    def set_host(self, host):
+        self.host = host
+        self.url = "http://%s" % self.host
+
+    def set_bot_id(self, bot_id):
+        self.bot_id = bot_id
+
+    def results_url_for_status(self, status_id):
+        return "%s/results/%s" % (self.url, status_id)
+
+    def _add_patch(self, patch):
+        if not patch:
+            return
+        if patch.bug_id():
+            self._browser["bug_id"] = unicode(patch.bug_id())
+        if patch.id():
+            self._browser["patch_id"] = unicode(patch.id())
+
+    def _add_results_file(self, results_file):
+        if not results_file:
+            return
+        self._browser.add_file(results_file, "text/plain", "results.txt", 'results_file')
+
+    # 500 is the AppEngine limit for TEXT fields (which most of our fields are).
+    # Exceeding the limit will result in a 500 error from the server.
+    def _set_field(self, field_name, value, limit=500):
+        if len(value) > limit:
+            _log.warn("Attempted to set %s to value exceeding %s characters, truncating." % (field_name, limit))
+        self._browser[field_name] = value[:limit]
+
+    def _post_status_to_server(self, queue_name, status, patch, results_file):
+        if results_file:
+            # We might need to re-wind the file if we've already tried to post it.
+            results_file.seek(0)
+
+        update_status_url = "%s/update-status" % self.url
+        self._browser.open(update_status_url)
+        self._browser.select_form(name="update_status")
+        self._browser["queue_name"] = queue_name
+        if self.bot_id:
+            self._browser["bot_id"] = self.bot_id
+        self._add_patch(patch)
+        self._set_field("status", status, limit=500)
+        self._add_results_file(results_file)
+        return self._browser.submit().read()  # This is the id of the newly created status object.
+
+    def _post_svn_revision_to_server(self, svn_revision_number, broken_bot):
+        update_svn_revision_url = "%s/update-svn-revision" % self.url
+        self._browser.open(update_svn_revision_url)
+        self._browser.select_form(name="update_svn_revision")
+        self._browser["number"] = unicode(svn_revision_number)
+        self._browser["broken_bot"] = broken_bot
+        return self._browser.submit().read()
+
+    def _post_work_items_to_server(self, queue_name, work_items):
+        update_work_items_url = "%s/update-work-items" % self.url
+        self._browser.open(update_work_items_url)
+        self._browser.select_form(name="update_work_items")
+        self._browser["queue_name"] = queue_name
+        work_items = map(unicode, work_items)  # .join expects strings
+        self._browser["work_items"] = " ".join(work_items)
+        return self._browser.submit().read()
+
+    def _post_work_item_to_ews(self, attachment_id):
+        submit_to_ews_url = "%s/submit-to-ews" % self.url
+        self._browser.open(submit_to_ews_url)
+        self._browser.select_form(name="submit_to_ews")
+        self._browser["attachment_id"] = unicode(attachment_id)
+        self._browser.submit()
+
+    def submit_to_ews(self, attachment_id):
+        _log.info("Submitting attachment %s to EWS queues" % attachment_id)
+        return NetworkTransaction().run(lambda: self._post_work_item_to_ews(attachment_id))
+
+    def next_work_item(self, queue_name):
+        _log.debug("Fetching next work item for %s" % queue_name)
+        next_patch_url = "%s/next-patch/%s" % (self.url, queue_name)
+        return self._fetch_url(next_patch_url)
+
+    def _post_release_work_item(self, queue_name, patch):
+        release_patch_url = "%s/release-patch" % (self.url)
+        self._browser.open(release_patch_url)
+        self._browser.select_form(name="release_patch")
+        self._browser["queue_name"] = queue_name
+        self._browser["attachment_id"] = unicode(patch.id())
+        self._browser.submit()
+
+    def release_work_item(self, queue_name, patch):
+        _log.info("Releasing work item %s from %s" % (patch.id(), queue_name))
+        return NetworkTransaction(convert_404_to_None=True).run(lambda: self._post_release_work_item(queue_name, patch))
+
+    def update_work_items(self, queue_name, work_items):
+        _log.debug("Recording work items: %s for %s" % (work_items, queue_name))
+        return NetworkTransaction().run(lambda: self._post_work_items_to_server(queue_name, work_items))
+
+    def update_status(self, queue_name, status, patch=None, results_file=None):
+        log(status)
+        return NetworkTransaction().run(lambda: self._post_status_to_server(queue_name, status, patch, results_file))
+
+    def update_svn_revision(self, svn_revision_number, broken_bot):
+        log("SVN revision: %s broke %s" % (svn_revision_number, broken_bot))
+        return NetworkTransaction().run(lambda: self._post_svn_revision_to_server(svn_revision_number, broken_bot))
+
+    def _fetch_url(self, url):
+        # FIXME: This should use NetworkTransaction's 404 handling instead.
+        try:
+            return urllib2.urlopen(url).read()
+        except urllib2.HTTPError, e:
+            if e.code == 404:
+                return None
+            raise e
+
+    def patch_status(self, queue_name, patch_id):
+        patch_status_url = "%s/patch-status/%s/%s" % (self.url, queue_name, patch_id)
+        return self._fetch_url(patch_status_url)
+
+    def svn_revision(self, svn_revision_number):
+        svn_revision_url = "%s/svn-revision/%s" % (self.url, svn_revision_number)
+        return self._fetch_url(svn_revision_url)
diff --git a/Tools/Scripts/webkitpy/common/net/statusserver_mock.py b/Tools/Scripts/webkitpy/common/net/statusserver_mock.py
new file mode 100644
index 0000000..69d1ae8
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/net/statusserver_mock.py
@@ -0,0 +1,68 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#    * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from webkitpy.common.system.deprecated_logging import log
+
+
+class MockStatusServer(object):
+
+    def __init__(self, bot_id=None, work_items=None):
+        self.host = "example.com"
+        self.bot_id = bot_id
+        self._work_items = work_items or []
+
+    def patch_status(self, queue_name, patch_id):
+        return None
+
+    def svn_revision(self, svn_revision):
+        return None
+
+    def next_work_item(self, queue_name):
+        if not self._work_items:
+            return None
+        return self._work_items.pop(0)
+
+    def release_work_item(self, queue_name, patch):
+        log("MOCK: release_work_item: %s %s" % (queue_name, patch.id()))
+
+    def update_work_items(self, queue_name, work_items):
+        self._work_items = work_items
+        log("MOCK: update_work_items: %s %s" % (queue_name, work_items))
+
+    def submit_to_ews(self, patch_id):
+        log("MOCK: submit_to_ews: %s" % (patch_id))
+
+    def update_status(self, queue_name, status, patch=None, results_file=None):
+        log("MOCK: update_status: %s %s" % (queue_name, status))
+        return 187
+
+    def update_svn_revision(self, svn_revision, broken_bot):
+        return 191
+
+    def results_url_for_status(self, status_id):
+        return "http://dummy_url"
diff --git a/Tools/Scripts/webkitpy/common/net/statusserver_unittest.py b/Tools/Scripts/webkitpy/common/net/statusserver_unittest.py
new file mode 100644
index 0000000..1f0afd0
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/net/statusserver_unittest.py
@@ -0,0 +1,43 @@
+# Copyright (c) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from webkitpy.common.net.statusserver import StatusServer
+from webkitpy.common.system.outputcapture import OutputCaptureTestCaseBase
+from webkitpy.common.net.web_mock import MockBrowser
+
+
+class StatusServerTest(OutputCaptureTestCaseBase):
+    def test_url_for_issue(self):
+        mock_browser = MockBrowser()
+        status_server = StatusServer(browser=mock_browser, bot_id='123')
+        status_server.update_status('queue name', 'the status')
+        self.assertEqual('queue name', mock_browser.params['queue_name'])
+        self.assertEqual('the status', mock_browser.params['status'])
+        self.assertEqual('123', mock_browser.params['bot_id'])
diff --git a/Tools/Scripts/webkitpy/common/net/unittestresults.py b/Tools/Scripts/webkitpy/common/net/unittestresults.py
new file mode 100644
index 0000000..bb82b05
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/net/unittestresults.py
@@ -0,0 +1,50 @@
+# Copyright (c) 2012, Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import xml.dom.minidom
+
+from webkitpy.common.system.deprecated_logging import log
+
+
+class UnitTestResults(object):
+    @classmethod
+    def results_from_string(self, string):
+        if not string:
+            return None
+        try:
+            dom = xml.dom.minidom.parseString(string)
+            failures = []
+            for testcase in dom.getElementsByTagName('testcase'):
+                if testcase.getElementsByTagName('failure').length != 0:
+                    testname = testcase.getAttribute('name')
+                    classname = testcase.getAttribute('classname')
+                    failures.append("%s.%s" % (classname, testname))
+            return failures
+        except xml.parsers.expat.ExpatError, e:
+            log("XML error %s parsing unit test output" % str(e))
+            return None
diff --git a/Tools/Scripts/webkitpy/common/net/unittestresults_unittest.py b/Tools/Scripts/webkitpy/common/net/unittestresults_unittest.py
new file mode 100644
index 0000000..f885206
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/net/unittestresults_unittest.py
@@ -0,0 +1,98 @@
+# Copyright (c) 2012, Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from unittestresults import UnitTestResults
+
+
+class UnitTestResultsTest(unittest.TestCase):
+
+    def test_nostring(self):
+        self.assertEquals(None, UnitTestResults.results_from_string(None))
+
+    def test_emptystring(self):
+        self.assertEquals(None, UnitTestResults.results_from_string(""))
+
+    def test_nofailures(self):
+        no_failures_xml = """<?xml version="1.0" encoding="UTF-8"?>
+<testsuites tests="3" failures="0" disabled="0" errors="0" time="11.35" name="AllTests">
+  <testsuite name="RenderTableCellDeathTest" tests="3" failures="0" disabled="0" errors="0" time="0.677">
+    <testcase name="CanSetColumn" status="run" time="0.168" classname="RenderTableCellDeathTest" />
+    <testcase name="CrashIfSettingUnsetColumnIndex" status="run" time="0.129" classname="RenderTableCellDeathTest" />
+    <testcase name="CrashIfSettingUnsetRowIndex" status="run" time="0.123" classname="RenderTableCellDeathTest" />
+  </testsuite>
+</testsuites>"""
+        self.assertEquals([], UnitTestResults.results_from_string(no_failures_xml))
+
+    def test_onefailure(self):
+        one_failure_xml = """<?xml version="1.0" encoding="UTF-8"?>
+<testsuites tests="4" failures="1" disabled="0" errors="0" time="11.35" name="AllTests">
+  <testsuite name="RenderTableCellDeathTest" tests="4" failures="1" disabled="0" errors="0" time="0.677">
+    <testcase name="CanSetColumn" status="run" time="0.168" classname="RenderTableCellDeathTest" />
+    <testcase name="CrashIfSettingUnsetColumnIndex" status="run" time="0.129" classname="RenderTableCellDeathTest" />
+    <testcase name="CrashIfSettingUnsetRowIndex" status="run" time="0.123" classname="RenderTableCellDeathTest" />
+    <testcase name="FAILS_DivAutoZoomParamsTest" status="run" time="0.02" classname="WebFrameTest">
+      <failure message="Value of: scale&#x0A;  Actual: 4&#x0A;Expected: 1" type=""><![CDATA[../../Source/WebKit/chromium/tests/WebFrameTest.cpp:191
+Value of: scale
+  Actual: 4
+Expected: 1]]></failure>
+    </testcase>
+  </testsuite>
+</testsuites>"""
+        expected = ["WebFrameTest.FAILS_DivAutoZoomParamsTest"]
+        self.assertEquals(expected, UnitTestResults.results_from_string(one_failure_xml))
+
+    def test_multiple_failures_per_test(self):
+        multiple_failures_per_test_xml = """<?xml version="1.0" encoding="UTF-8"?>
+<testsuites tests="4" failures="2" disabled="0" errors="0" time="11.35" name="AllTests">
+  <testsuite name="UnitTests" tests="4" failures="2" disable="0" errors="0" time="10.0">
+    <testcase name="TestOne" status="run" time="0.5" classname="ClassOne">
+      <failure message="Value of: pi&#x0A;  Actual: 3&#x0A;Expected: 3.14" type=""><![CDATA[../../Source/WebKit/chromium/tests/ClassOneTest.cpp:42
+Value of: pi
+  Actual: 3
+Expected: 3.14]]></failure>
+    </testcase>
+    <testcase name="TestTwo" status="run" time="0.5" classname="ClassTwo">
+      <failure message="Value of: e&#x0A;  Actual: 2&#x0A;Expected: 2.71" type=""><![CDATA[../../Source/WebKit/chromium/tests/ClassTwoTest.cpp:30
+Value of: e
+  Actual: 2
+Expected: 2.71]]></failure>
+      <failure message="Value of: tau&#x0A;  Actual: 6&#x0A;Expected: 6.28" type=""><![CDATA[../../Source/WebKit/chromium/tests/ClassTwoTest.cpp:55
+Value of: tau
+  Actual: 6
+Expected: 6.28]]></failure>
+    </testcase>
+  </testsuite>
+</testsuites>"""
+        expected = ["ClassOne.TestOne", "ClassTwo.TestTwo"]
+        self.assertEquals(expected, UnitTestResults.results_from_string(multiple_failures_per_test_xml))
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/Tools/Scripts/webkitpy/common/net/web.py b/Tools/Scripts/webkitpy/common/net/web.py
new file mode 100644
index 0000000..b8a06e5
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/net/web.py
@@ -0,0 +1,36 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import urllib2
+
+from webkitpy.common.net.networktransaction import NetworkTransaction
+
+
+class Web(object):
+    def get_binary(self, url, convert_404_to_None=False):
+        return NetworkTransaction(convert_404_to_None=convert_404_to_None).run(lambda: urllib2.urlopen(url).read())
diff --git a/Tools/Scripts/webkitpy/common/net/web_mock.py b/Tools/Scripts/webkitpy/common/net/web_mock.py
new file mode 100644
index 0000000..423573c
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/net/web_mock.py
@@ -0,0 +1,55 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#    * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import StringIO
+
+
+class MockWeb(object):
+    def __init__(self):
+        self.urls_fetched = []
+
+    def get_binary(self, url, convert_404_to_None=False):
+        self.urls_fetched.append(url)
+        return "MOCK Web result, convert 404 to None=%s" % convert_404_to_None
+
+
+# FIXME: Classes which are using Browser probably want to use Web instead.
+class MockBrowser(object):
+    params = {}
+
+    def open(self, url):
+        pass
+
+    def select_form(self, name):
+        pass
+
+    def __setitem__(self, key, value):
+        self.params[key] = value
+
+    def submit(self):
+        return StringIO.StringIO()
diff --git a/Tools/Scripts/webkitpy/common/newstringio.py b/Tools/Scripts/webkitpy/common/newstringio.py
new file mode 100644
index 0000000..f6d08ec
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/newstringio.py
@@ -0,0 +1,40 @@
+#!/usr/bin/env python
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""'with'-compliant StringIO implementation."""
+
+import StringIO
+
+
+class StringIO(StringIO.StringIO):
+    def __enter__(self):
+        return self
+
+    def __exit__(self, type, value, traceback):
+        pass
diff --git a/Tools/Scripts/webkitpy/common/newstringio_unittest.py b/Tools/Scripts/webkitpy/common/newstringio_unittest.py
new file mode 100644
index 0000000..1ee2fb9
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/newstringio_unittest.py
@@ -0,0 +1,44 @@
+#!/usr/bin/env python
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Unit tests for newstringio module."""
+
+import unittest
+
+import newstringio
+
+
+class NewStringIOTest(unittest.TestCase):
+    def test_with(self):
+        with newstringio.StringIO("foo") as f:
+            contents = f.read()
+        self.assertEqual(contents, "foo")
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/Tools/Scripts/webkitpy/common/prettypatch.py b/Tools/Scripts/webkitpy/common/prettypatch.py
new file mode 100644
index 0000000..e8a913a
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/prettypatch.py
@@ -0,0 +1,67 @@
+# Copyright (c) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import os
+import tempfile
+
+
+class PrettyPatch(object):
+    # FIXME: PrettyPatch should not require checkout_root.
+    def __init__(self, executive, checkout_root):
+        self._executive = executive
+        self._checkout_root = checkout_root
+
+    def pretty_diff_file(self, diff):
+        # Diffs can contain multiple text files of different encodings
+        # so we always deal with them as byte arrays, not unicode strings.
+        assert(isinstance(diff, str))
+        pretty_diff = self.pretty_diff(diff)
+        diff_file = tempfile.NamedTemporaryFile(suffix=".html")
+        diff_file.write(pretty_diff)
+        diff_file.flush()
+        return diff_file
+
+    def pretty_diff(self, diff):
+        # pretify.rb will hang forever if given no input.
+        # Avoid the hang by returning an empty string.
+        if not diff:
+            return ""
+
+        pretty_patch_path = os.path.join(self._checkout_root,
+                                         "Websites", "bugs.webkit.org",
+                                         "PrettyPatch")
+        prettify_path = os.path.join(pretty_patch_path, "prettify.rb")
+        args = [
+            "ruby",
+            "-I",
+            pretty_patch_path,
+            prettify_path,
+        ]
+        # PrettyPatch does not modify the encoding of the diff output
+        # so we can't expect it to be utf-8.
+        return self._executive.run_command(args, input=diff, decode_output=False)
diff --git a/Tools/Scripts/webkitpy/common/prettypatch_unittest.py b/Tools/Scripts/webkitpy/common/prettypatch_unittest.py
new file mode 100644
index 0000000..37fa844
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/prettypatch_unittest.py
@@ -0,0 +1,88 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#    * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import os.path
+import sys
+import unittest
+
+from webkitpy.common.system.executive import Executive
+from webkitpy.common.prettypatch import PrettyPatch
+
+
+class PrettyPatchTest(unittest.TestCase):
+    def check_ruby(self):
+        executive = Executive()
+        try:
+            result = executive.run_command(['ruby', '--version'])
+        except OSError, e:
+            return False
+        return True
+
+    _diff_with_multiple_encodings = """
+Index: utf8_test
+===================================================================
+--- utf8_test\t(revision 0)
++++ utf8_test\t(revision 0)
+@@ -0,0 +1 @@
++utf-8 test: \xc2\xa0
+Index: latin1_test
+===================================================================
+--- latin1_test\t(revision 0)
++++ latin1_test\t(revision 0)
+@@ -0,0 +1 @@
++latin1 test: \xa0
+"""
+
+    def _webkit_root(self):
+        webkitpy_common = os.path.dirname(__file__)
+        webkitpy = os.path.dirname(webkitpy_common)
+        scripts = os.path.dirname(webkitpy)
+        webkit_tools = os.path.dirname(scripts)
+        webkit_root = os.path.dirname(webkit_tools)
+        return webkit_root
+
+    def test_pretty_diff_encodings(self):
+        if not self.check_ruby():
+            return
+
+        if sys.platform == 'win32':
+            # FIXME: disabled due to https://bugs.webkit.org/show_bug.cgi?id=93192
+            return
+
+        pretty_patch = PrettyPatch(Executive(), self._webkit_root())
+        pretty = pretty_patch.pretty_diff(self._diff_with_multiple_encodings)
+        self.assertTrue(pretty)  # We got some output
+        self.assertTrue(isinstance(pretty, str))  # It's a byte array, not unicode
+
+    def test_pretty_print_empty_string(self):
+        if not self.check_ruby():
+            return
+
+        # Make sure that an empty diff does not hang the process.
+        pretty_patch = PrettyPatch(Executive(), self._webkit_root())
+        self.assertEqual(pretty_patch.pretty_diff(""), "")
diff --git a/Tools/Scripts/webkitpy/common/read_checksum_from_png.py b/Tools/Scripts/webkitpy/common/read_checksum_from_png.py
new file mode 100644
index 0000000..70a0502
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/read_checksum_from_png.py
@@ -0,0 +1,40 @@
+#!/usr/bin/env python
+# Copyright (c) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+def read_checksum(filehandle):
+    # We expect the comment to be at the beginning of the file.
+    data = filehandle.read(2048)
+    comment_key = 'tEXtchecksum\x00'
+    comment_pos = data.find(comment_key)
+    if comment_pos == -1:
+        return
+
+    checksum_pos = comment_pos + len(comment_key)
+    return data[checksum_pos:checksum_pos + 32]
diff --git a/Tools/Scripts/webkitpy/common/read_checksum_from_png_unittest.py b/Tools/Scripts/webkitpy/common/read_checksum_from_png_unittest.py
new file mode 100644
index 0000000..defbbf8
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/read_checksum_from_png_unittest.py
@@ -0,0 +1,44 @@
+#!/usr/bin/env python
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# 1.  Redistributions of source code must retain the above copyright
+#     notice, this list of conditions and the following disclaimer.
+# 2.  Redistributions in binary form must reproduce the above copyright
+#     notice, this list of conditions and the following disclaimer in the
+#     documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import StringIO
+import unittest
+from webkitpy.common import read_checksum_from_png
+
+
+class ReadChecksumFromPngTest(unittest.TestCase):
+    def test_read_checksum(self):
+        # Test a file with the comment.
+        filehandle = StringIO.StringIO('''\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x03 \x00\x00\x02X\x08\x02\x00\x00\x00\x15\x14\x15'\x00\x00\x00)tEXtchecksum\x003c4134fe2739880353f91c5b84cadbaaC\xb8?\xec\x00\x00\x16\xfeIDATx\x9c\xed\xdd[\x8cU\xe5\xc1\xff\xf15T\x18\x0ea,)\xa6\x80XZ<\x10\n\xd6H\xc4V\x88}\xb5\xa9\xd6r\xd5\x0bki0\xa6\xb5ih\xd2\xde\x98PHz\xd1\x02=\\q#\x01\x8b\xa5rJ\x8b\x88i\xacM\xc5h\x8cbMk(\x1ez@!\x0c\xd5\xd2\xc2\xb44\x1c\x848\x1dF(\xeb\x7f\xb1\xff\xd9\xef~g\xd6\xde3\xe0o\x10\xec\xe7sa6{\xd6z\xd6\xb3\xd7\xf3\xa8_7\xdbM[Y\x96\x05\x00\x009\xc3\xde\xeb\t\x00\x00\xbc\xdf\x08,\x00\x800\x81\x05\x00\x10&\xb0\x00\x00\xc2\x04\x16\x00@\x98\xc0\x02\x00\x08\x13X\x00\x00a\x02\x0b\x00 Lx01\x00\x84\t,\x00\x800\x81\x05\x00\x10\xd64\xb0\xda\x9a\xdb\xb6m\xdb\xb4i\xd3\xfa\x9fr\xf3\xcd7\x0f\xe5T\x07\xe5\xd4\xa9''')
+        checksum = read_checksum_from_png.read_checksum(filehandle)
+        self.assertEquals('3c4134fe2739880353f91c5b84cadbaa', checksum)
+
+        # Test a file without the comment.
+        filehandle = StringIO.StringIO('''\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x03 \x00\x00\x02X\x08\x02\x00\x00\x00\x15\x14\x15'\x00\x00\x16\xfeIDATx\x9c\xed\xdd[\x8cU\xe5\xc1\xff\xf15T\x18\x0ea,)\xa6\x80XZ<\x10\n\xd6H\xc4V\x88}\xb5\xa9\xd6r\xd5\x0bki0\xa6\xb5ih\xd2\xde\x98PHz\xd1\x02=\\q#\x01\x8b\xa5rJ\x8b\x88i\xacM\xc5h\x8cbMk(\x1ez@!\x0c\xd5\xd2\xc2\xb44\x1c\x848\x1dF(\xeb\x7f\xb1\xff\xd9\xef~g\xd6\xde3\xe0o\x10\xec\xe7sa6{\xd6z\xd6\xb3\xd7\xf3\xa8_7\xdbM[Y\x96\x05\x00\x009\xc3\xde\xeb\t\x00\x00\xbc\xdf\x08,\x00\x800\x81\x05\x00\x10&\xb0\x00\x00\xc2\x04\x16\x00@\x98\xc0\x02\x00\x08\x13X\x00\x00a\x02\x0b\x00 Lx01\x00\x84\t,\x00\x800\x81\x05\x00\x10\xd64\xb0\xda\x9a\xdb\xb6m\xdb\xb4i\xd3\xfa\x9fr\xf3\xcd7\x0f\xe5T\x07\xe5\xd4\xa9S\x8b\x17/\x1e?~\xfc\xf8\xf1\xe3\xef\xbf\xff\xfe\xf7z:M5\xbb\x87\x17\xcbUZ\x8f|V\xd7\xbd\x10\xb6\xcd{b\x88\xf6j\xb3\x9b?\x14\x9b\xa1>\xe6\xf9\xd9\xcf\x00\x17\x93''')
+        checksum = read_checksum_from_png.read_checksum(filehandle)
+        self.assertEquals(None, checksum)
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/Tools/Scripts/webkitpy/common/system/__init__.py b/Tools/Scripts/webkitpy/common/system/__init__.py
new file mode 100644
index 0000000..ef65bee
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/system/__init__.py
@@ -0,0 +1 @@
+# Required for Python to search this directory for module files
diff --git a/Tools/Scripts/webkitpy/common/system/autoinstall.py b/Tools/Scripts/webkitpy/common/system/autoinstall.py
new file mode 100755
index 0000000..f3045f8
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/system/autoinstall.py
@@ -0,0 +1,414 @@
+# Copyright (c) 2009, Daniel Krech All rights reserved.
+# Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org)
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#  * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#
+#  * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+#  * Neither the name of the Daniel Krech nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Support for automatically downloading Python packages from an URL."""
+
+
+import codecs
+import logging
+import os
+import shutil
+import sys
+import tarfile
+import tempfile
+import urllib
+import urlparse
+import zipfile
+
+_log = logging.getLogger(__name__)
+
+
+class AutoInstaller(object):
+
+    """Supports automatically installing Python packages from an URL.
+
+    Supports uncompressed files, .tar.gz, and .zip formats.
+
+    Basic usage:
+
+    installer = AutoInstaller()
+
+    installer.install(url="http://pypi.python.org/packages/source/p/pep8/pep8-0.5.0.tar.gz#md5=512a818af9979290cd619cce8e9c2e2b",
+                      url_subpath="pep8-0.5.0/pep8.py")
+    installer.install(url="http://pypi.python.org/packages/source/m/mechanize/mechanize-0.2.4.zip",
+                      url_subpath="mechanize")
+
+    """
+
+    def __init__(self, append_to_search_path=False, make_package=True,
+                 target_dir=None, temp_dir=None):
+        """Create an AutoInstaller instance, and set up the target directory.
+
+        Args:
+          append_to_search_path: A boolean value of whether to append the
+                                 target directory to the sys.path search path.
+          make_package: A boolean value of whether to make the target
+                        directory a package.  This adds an __init__.py file
+                        to the target directory -- allowing packages and
+                        modules within the target directory to be imported
+                        explicitly using dotted module names.
+          target_dir: The directory path to which packages should be installed.
+                      Defaults to a subdirectory of the folder containing
+                      this module called "autoinstalled".
+          temp_dir: The directory path to use for any temporary files
+                    generated while downloading, unzipping, and extracting
+                    packages to install.  Defaults to a standard temporary
+                    location generated by the tempfile module.  This
+                    parameter should normally be used only for development
+                    testing.
+
+        """
+        if target_dir is None:
+            this_dir = os.path.dirname(__file__)
+            target_dir = os.path.join(this_dir, "autoinstalled")
+
+        # Ensure that the target directory exists.
+        self._set_up_target_dir(target_dir, append_to_search_path, make_package)
+
+        self._target_dir = target_dir
+        self._temp_dir = temp_dir
+
+    def _write_file(self, path, text, encoding):
+        with codecs.open(path, "w", encoding) as filehandle:
+            filehandle.write(text)
+
+    def _set_up_target_dir(self, target_dir, append_to_search_path,
+                           make_package):
+        """Set up a target directory.
+
+        Args:
+          target_dir: The path to the target directory to set up.
+          append_to_search_path: A boolean value of whether to append the
+                                 target directory to the sys.path search path.
+          make_package: A boolean value of whether to make the target
+                        directory a package.  This adds an __init__.py file
+                        to the target directory -- allowing packages and
+                        modules within the target directory to be imported
+                        explicitly using dotted module names.
+
+        """
+        if not os.path.exists(target_dir):
+            os.makedirs(target_dir)
+
+        if append_to_search_path:
+            sys.path.append(target_dir)
+
+        if make_package:
+            self._make_package(target_dir)
+
+    def _make_package(self, target_dir):
+        init_path = os.path.join(target_dir, "__init__.py")
+        if not os.path.exists(init_path):
+            text = ("# This file is required for Python to search this "
+                    "directory for modules.\n")
+            self._write_file(init_path, text, "ascii")
+
+    def _create_scratch_directory_inner(self, prefix):
+        """Create a scratch directory without exception handling.
+
+        Creates a scratch directory inside the AutoInstaller temp
+        directory self._temp_dir, or inside a platform-dependent temp
+        directory if self._temp_dir is None.  Returns the path to the
+        created scratch directory.
+
+        Raises:
+          OSError: [Errno 2] if the containing temp directory self._temp_dir
+                             is not None and does not exist.
+
+        """
+        # The tempfile.mkdtemp() method function requires that the
+        # directory corresponding to the "dir" parameter already exist
+        # if it is not None.
+        scratch_dir = tempfile.mkdtemp(prefix=prefix, dir=self._temp_dir)
+        return scratch_dir
+
+    def _create_scratch_directory(self, target_name):
+        """Create a temporary scratch directory, and return its path.
+
+        The scratch directory is generated inside the temp directory
+        of this AutoInstaller instance.  This method also creates the
+        temp directory if it does not already exist.
+
+        """
+        prefix = target_name.replace(os.sep, "_") + "_"
+        try:
+            scratch_dir = self._create_scratch_directory_inner(prefix)
+        except OSError:
+            # Handle case of containing temp directory not existing--
+            # OSError: [Errno 2] No such file or directory:...
+            temp_dir = self._temp_dir
+            if temp_dir is None or os.path.exists(temp_dir):
+                raise
+            # Else try again after creating the temp directory.
+            os.makedirs(temp_dir)
+            scratch_dir = self._create_scratch_directory_inner(prefix)
+
+        return scratch_dir
+
+    def _url_downloaded_path(self, target_name):
+        return os.path.join(self._target_dir, ".%s.url" % target_name)
+
+    def _is_downloaded(self, target_name, url):
+        version_path = self._url_downloaded_path(target_name)
+
+        if not os.path.exists(version_path):
+            return False
+
+        with codecs.open(version_path, "r", "utf-8") as filehandle:
+            return filehandle.read().strip() == url.strip()
+
+    def _record_url_downloaded(self, target_name, url):
+        version_path = self._url_downloaded_path(target_name)
+        self._write_file(version_path, url, "utf-8")
+
+    def _extract_targz(self, path, scratch_dir):
+        # tarfile.extractall() extracts to a path without the trailing ".tar.gz".
+        target_basename = os.path.basename(path[:-len(".tar.gz")])
+        target_path = os.path.join(scratch_dir, target_basename)
+
+        try:
+            tar_file = tarfile.open(path)
+        except tarfile.ReadError, err:
+            # Append existing Error message to new Error.
+            message = ("Could not open tar file: %s\n"
+                       " The file probably does not have the correct format.\n"
+                       " --> Inner message: %s"
+                       % (path, err))
+            raise Exception(message)
+
+        try:
+            tar_file.extractall(target_path)
+        finally:
+            tar_file.close()
+
+        return target_path
+
+    # This is a replacement for ZipFile.extractall(), which is
+    # available in Python 2.6 but not in earlier versions.
+    # NOTE: The version in 2.6.1 (which shipped on Snow Leopard) is broken!
+    def _extract_all(self, zip_file, target_dir):
+        for name in zip_file.namelist():
+            path = os.path.join(target_dir, name)
+            if not os.path.basename(path):
+                # Then the path ends in a slash, so it is a directory.
+                os.makedirs(path)
+                continue
+
+            try:
+                # We open this file w/o encoding, as we're reading/writing
+                # the raw byte-stream from the zip file.
+                outfile = open(path, 'wb')
+            except IOError:
+                # Not all zip files seem to list the directories explicitly,
+                # so try again after creating the containing directory.
+                _log.debug("Got IOError: retrying after creating directory...")
+                dirname = os.path.dirname(path)
+                os.makedirs(dirname)
+                outfile = open(path, 'wb')
+
+            try:
+                outfile.write(zip_file.read(name))
+            finally:
+                outfile.close()
+
+    def _unzip(self, path, scratch_dir):
+        # zipfile.extractall() extracts to a path without the trailing ".zip".
+        target_basename = os.path.basename(path[:-len(".zip")])
+        target_path = os.path.join(scratch_dir, target_basename)
+
+        try:
+            zip_file = zipfile.ZipFile(path, "r")
+        except zipfile.BadZipfile, err:
+            message = ("Could not open zip file: %s\n"
+                       " --> Inner message: %s"
+                       % (path, err))
+            raise Exception(message)
+
+        try:
+            self._extract_all(zip_file, scratch_dir)
+        finally:
+            zip_file.close()
+
+        return target_path
+
+    def _prepare_package(self, path, scratch_dir):
+        """Prepare a package for use, if necessary, and return the new path.
+
+        For example, this method unzips zipped files and extracts
+        tar files.
+
+        Args:
+          path: The path to the downloaded URL contents.
+          scratch_dir: The scratch directory.  Note that the scratch
+                       directory contains the file designated by the
+                       path parameter.
+
+        """
+        # FIXME: Add other natural extensions.
+        if path.endswith(".zip"):
+            new_path = self._unzip(path, scratch_dir)
+        elif path.endswith(".tar.gz"):
+            new_path = self._extract_targz(path, scratch_dir)
+        else:
+            # No preparation is needed.
+            new_path = path
+
+        return new_path
+
+    def _download_to_stream(self, url, stream):
+        try:
+            netstream = urllib.urlopen(url)
+        except IOError, err:
+            # Append existing Error message to new Error.
+            message = ('Could not download Python modules from URL "%s".\n'
+                       " Make sure you are connected to the internet.\n"
+                       " You must be connected to the internet when "
+                       "downloading needed modules for the first time.\n"
+                       " --> Inner message: %s"
+                       % (url, err))
+            raise IOError(message)
+        code = 200
+        if hasattr(netstream, "getcode"):
+            code = netstream.getcode()
+        if not 200 <= code < 300:
+            raise ValueError("HTTP Error code %s" % code)
+
+        BUFSIZE = 2**13  # 8KB
+        while True:
+            data = netstream.read(BUFSIZE)
+            if not data:
+                break
+            stream.write(data)
+        netstream.close()
+
+    def _download(self, url, scratch_dir):
+        url_path = urlparse.urlsplit(url)[2]
+        url_path = os.path.normpath(url_path)  # Removes trailing slash.
+        target_filename = os.path.basename(url_path)
+        target_path = os.path.join(scratch_dir, target_filename)
+
+        with open(target_path, "wb") as stream:
+            self._download_to_stream(url, stream)
+
+        return target_path
+
+    def _install(self, scratch_dir, package_name, target_path, url,
+                 url_subpath):
+        """Install a python package from an URL.
+
+        This internal method overwrites the target path if the target
+        path already exists.
+
+        """
+        path = self._download(url=url, scratch_dir=scratch_dir)
+        path = self._prepare_package(path, scratch_dir)
+
+        if url_subpath is None:
+            source_path = path
+        else:
+            source_path = os.path.join(path, url_subpath)
+
+        if os.path.exists(target_path):
+            if os.path.isdir(target_path):
+                shutil.rmtree(target_path)
+            else:
+                os.remove(target_path)
+
+        # shutil.move() command creates intermediate directories if they do not exist.
+        shutil.move(source_path, target_path)
+
+        # ensure all the new directories are importable.
+        intermediate_dirs = os.path.dirname(os.path.relpath(target_path, self._target_dir))
+        parent_dirname = self._target_dir
+        for dirname in intermediate_dirs.split(os.sep):
+            parent_dirname = os.path.join(parent_dirname, dirname)
+            self._make_package(parent_dirname)
+
+        self._record_url_downloaded(package_name, url)
+
+    def install(self, url, should_refresh=False, target_name=None,
+                url_subpath=None):
+        """Install a python package from an URL.
+
+        Args:
+          url: The URL from which to download the package.
+
+        Optional Args:
+          should_refresh: A boolean value of whether the package should be
+                          downloaded again if the package is already present.
+          target_name: The name of the folder or file in the autoinstaller
+                       target directory at which the package should be
+                       installed.  Defaults to the base name of the
+                       URL sub-path.  This parameter must be provided if
+                       the URL sub-path is not specified.
+          url_subpath: The relative path of the URL directory that should
+                       be installed.  Defaults to the full directory, or
+                       the entire URL contents.
+
+        """
+        if target_name is None:
+            if not url_subpath:
+                raise ValueError('The "target_name" parameter must be '
+                                 'provided if the "url_subpath" parameter '
+                                 "is not provided.")
+            # Remove any trailing slashes.
+            url_subpath = os.path.normpath(url_subpath)
+            target_name = os.path.basename(url_subpath)
+
+        target_path = os.path.join(self._target_dir, target_name)
+        if not should_refresh and self._is_downloaded(target_name, url):
+            return False
+
+        package_name = target_name.replace(os.sep, '.')
+        _log.info("Auto-installing package: %s" % package_name)
+
+        # The scratch directory is where we will download and prepare
+        # files specific to this install until they are ready to move
+        # into place.
+        scratch_dir = self._create_scratch_directory(target_name)
+
+        try:
+            self._install(package_name=package_name,
+                          target_path=target_path,
+                          scratch_dir=scratch_dir,
+                          url=url,
+                          url_subpath=url_subpath)
+        except Exception, err:
+            # Append existing Error message to new Error.
+            message = ("Error auto-installing the %s package to:\n"
+                       ' "%s"\n'
+                       " --> Inner message: %s"
+                       % (target_name, target_path, err))
+            raise Exception(message)
+        finally:
+            shutil.rmtree(scratch_dir)
+        _log.debug('Auto-installed %s to:' % url)
+        _log.debug('    "%s"' % target_path)
+        return True
diff --git a/Tools/Scripts/webkitpy/common/system/crashlogs.py b/Tools/Scripts/webkitpy/common/system/crashlogs.py
new file mode 100644
index 0000000..270ca81
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/system/crashlogs.py
@@ -0,0 +1,74 @@
+# Copyright (c) 2011, Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import re
+
+
+class CrashLogs(object):
+    def __init__(self, host):
+        self._host = host
+
+    def find_newest_log(self, process_name, pid=None, include_errors=False, newer_than=None):
+        if self._host.platform.is_mac():
+            return self._find_newest_log_darwin(process_name, pid, include_errors, newer_than)
+        return None
+
+    def _log_directory_darwin(self):
+        log_directory = self._host.filesystem.expanduser("~")
+        log_directory = self._host.filesystem.join(log_directory, "Library", "Logs")
+        if self._host.filesystem.exists(self._host.filesystem.join(log_directory, "DiagnosticReports")):
+            log_directory = self._host.filesystem.join(log_directory, "DiagnosticReports")
+        else:
+            log_directory = self._host.filesystem.join(log_directory, "CrashReporter")
+        return log_directory
+
+    def _find_newest_log_darwin(self, process_name, pid, include_errors, newer_than):
+        def is_crash_log(fs, dirpath, basename):
+            return basename.startswith(process_name + "_") and basename.endswith(".crash")
+
+        log_directory = self._log_directory_darwin()
+        logs = self._host.filesystem.files_under(log_directory, file_filter=is_crash_log)
+        first_line_regex = re.compile(r'^Process:\s+(?P<process_name>.*) \[(?P<pid>\d+)\]$')
+        errors = ''
+        for path in reversed(sorted(logs)):
+            try:
+                if not newer_than or self._host.filesystem.mtime(path) > newer_than:
+                    f = self._host.filesystem.read_text_file(path)
+                    match = first_line_regex.match(f[0:f.find('\n')])
+                    if match and match.group('process_name') == process_name and (pid is None or int(match.group('pid')) == pid):
+                        return errors + f
+            except IOError, e:
+                if include_errors:
+                    errors += "ERROR: Failed to read '%s': %s\n" % (path, str(e))
+            except OSError, e:
+                if include_errors:
+                    errors += "ERROR: Failed to read '%s': %s\n" % (path, str(e))
+
+        if include_errors and errors:
+            return errors
+        return None
diff --git a/Tools/Scripts/webkitpy/common/system/crashlogs_unittest.py b/Tools/Scripts/webkitpy/common/system/crashlogs_unittest.py
new file mode 100644
index 0000000..1f5c40a
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/system/crashlogs_unittest.py
@@ -0,0 +1,122 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# 1.  Redistributions of source code must retain the above copyright
+#     notice, this list of conditions and the following disclaimer.
+# 2.  Redistributions in binary form must reproduce the above copyright
+#     notice, this list of conditions and the following disclaimer in the
+#     documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from webkitpy.common.system.crashlogs import CrashLogs
+from webkitpy.common.system.filesystem_mock import MockFileSystem
+from webkitpy.common.system.systemhost import SystemHost
+from webkitpy.common.system.systemhost_mock import MockSystemHost
+from webkitpy.thirdparty.mock import Mock
+
+
+def make_mock_crash_report_darwin(process_name, pid):
+    return """Process:         {process_name} [{pid}]
+Path:            /Volumes/Data/slave/snowleopard-intel-release-tests/build/WebKitBuild/Release/{process_name}
+Identifier:      {process_name}
+Version:         ??? (???)
+Code Type:       X86-64 (Native)
+Parent Process:  Python [2578]
+
+Date/Time:       2011-12-07 13:27:34.816 -0800
+OS Version:      Mac OS X 10.6.8 (10K549)
+Report Version:  6
+
+Interval Since Last Report:          1660 sec
+Crashes Since Last Report:           1
+Per-App Crashes Since Last Report:   1
+Anonymous UUID:                      507D4EEB-9D70-4E2E-B322-2D2F0ABFEDC0
+
+Exception Type:  EXC_BREAKPOINT (SIGTRAP)
+Exception Codes: 0x0000000000000002, 0x0000000000000000
+Crashed Thread:  0
+
+Dyld Error Message:
+  Library not loaded: /Volumes/Data/WebKit-BuildSlave/snowleopard-intel-release/build/WebKitBuild/Release/WebCore.framework/Versions/A/WebCore
+  Referenced from: /Volumes/Data/slave/snowleopard-intel-release/build/WebKitBuild/Release/WebKit.framework/Versions/A/WebKit
+  Reason: image not found
+
+Binary Images:
+    0x7fff5fc00000 -     0x7fff5fc3be0f  dyld 132.1 (???) <29DECB19-0193-2575-D838-CF743F0400B2> /usr/lib/dyld
+
+System Profile:
+Model: Xserve3,1, BootROM XS31.0081.B04, 8 processors, Quad-Core Intel Xeon, 2.26 GHz, 6 GB, SMC 1.43f4
+Graphics: NVIDIA GeForce GT 120, NVIDIA GeForce GT 120, PCIe, 256 MB
+Memory Module: global_name
+Network Service: Ethernet 2, Ethernet, en1
+PCI Card: NVIDIA GeForce GT 120, sppci_displaycontroller, MXM-Slot
+Serial ATA Device: OPTIARC DVD RW AD-5670S
+""".format(process_name=process_name, pid=pid)
+
+class CrashLogsTest(unittest.TestCase):
+    def assertLinesEqual(self, a, b):
+        if hasattr(self, 'assertMultiLineEqual'):
+            self.assertMultiLineEqual(a, b)
+        else:
+            self.assertEqual(a.splitlines(), b.splitlines())
+
+
+    def test_find_log_darwin(self):
+        if not SystemHost().platform.is_mac():
+            return
+
+        older_mock_crash_report = make_mock_crash_report_darwin('DumpRenderTree', 28528)
+        mock_crash_report = make_mock_crash_report_darwin('DumpRenderTree', 28530)
+        newer_mock_crash_report = make_mock_crash_report_darwin('DumpRenderTree', 28529)
+        other_process_mock_crash_report = make_mock_crash_report_darwin('FooProcess', 28527)
+        misformatted_mock_crash_report = 'Junk that should not appear in a crash report' + make_mock_crash_report_darwin('DumpRenderTree', 28526)[200:]
+        files = {}
+        files['/Users/mock/Library/Logs/DiagnosticReports/DumpRenderTree_2011-06-13-150718_quadzen.crash'] = older_mock_crash_report
+        files['/Users/mock/Library/Logs/DiagnosticReports/DumpRenderTree_2011-06-13-150719_quadzen.crash'] = mock_crash_report
+        files['/Users/mock/Library/Logs/DiagnosticReports/DumpRenderTree_2011-06-13-150720_quadzen.crash'] = newer_mock_crash_report
+        files['/Users/mock/Library/Logs/DiagnosticReports/DumpRenderTree_2011-06-13-150721_quadzen.crash'] = None
+        files['/Users/mock/Library/Logs/DiagnosticReports/DumpRenderTree_2011-06-13-150722_quadzen.crash'] = other_process_mock_crash_report
+        files['/Users/mock/Library/Logs/DiagnosticReports/DumpRenderTree_2011-06-13-150723_quadzen.crash'] = misformatted_mock_crash_report
+        filesystem = MockFileSystem(files)
+        crash_logs = CrashLogs(MockSystemHost(filesystem=filesystem))
+        log = crash_logs.find_newest_log("DumpRenderTree")
+        self.assertLinesEqual(log, newer_mock_crash_report)
+        log = crash_logs.find_newest_log("DumpRenderTree", 28529)
+        self.assertLinesEqual(log, newer_mock_crash_report)
+        log = crash_logs.find_newest_log("DumpRenderTree", 28530)
+        self.assertLinesEqual(log, mock_crash_report)
+        log = crash_logs.find_newest_log("DumpRenderTree", 28531)
+        self.assertEqual(log, None)
+        log = crash_logs.find_newest_log("DumpRenderTree", newer_than=1.0)
+        self.assertEqual(log, None)
+
+        def bad_read(path):
+            raise IOError('IOError: No such file or directory')
+
+        def bad_mtime(path):
+            raise OSError('OSError: No such file or directory')
+
+        filesystem.read_text_file = bad_read
+        log = crash_logs.find_newest_log("DumpRenderTree", 28531, include_errors=True)
+        self.assertTrue('IOError: No such file or directory' in log)
+
+        filesystem = MockFileSystem(files)
+        crash_logs = CrashLogs(MockSystemHost(filesystem=filesystem))
+        filesystem.mtime = bad_mtime
+        log = crash_logs.find_newest_log("DumpRenderTree", newer_than=1.0, include_errors=True)
+        self.assertTrue('OSError: No such file or directory' in log)
diff --git a/Tools/Scripts/webkitpy/common/system/deprecated_logging.py b/Tools/Scripts/webkitpy/common/system/deprecated_logging.py
new file mode 100644
index 0000000..1375354
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/system/deprecated_logging.py
@@ -0,0 +1,91 @@
+# Copyright (c) 2009, Google Inc. All rights reserved.
+# Copyright (c) 2009 Apple Inc. All rights reserved.
+# 
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+# 
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+# 
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# WebKit's Python module for logging
+# This module is now deprecated in favor of python's built-in logging.py.
+
+import codecs
+import os
+import sys
+
+
+def log(string):
+    print >> sys.stderr, string
+
+
+def error(string):
+    log("ERROR: %s" % string)
+    sys.exit(1)
+
+
+# Simple class to split output between multiple destinations
+class tee:
+    def __init__(self, *files):
+        self.files = files
+
+    # Callers should pass an already encoded string for writing.
+    def write(self, bytes):
+        for file in self.files:
+            file.write(bytes)
+
+
+class OutputTee:
+    def __init__(self):
+        self._original_stdout = None
+        self._original_stderr = None
+        self._files_for_output = []
+
+    def add_log(self, path):
+        log_file = self._open_log_file(path)
+        self._files_for_output.append(log_file)
+        self._tee_outputs_to_files(self._files_for_output)
+        return log_file
+
+    def remove_log(self, log_file):
+        self._files_for_output.remove(log_file)
+        self._tee_outputs_to_files(self._files_for_output)
+        log_file.close()
+
+    @staticmethod
+    def _open_log_file(log_path):
+        (log_directory, log_name) = os.path.split(log_path)
+        if log_directory and not os.path.exists(log_directory):
+            os.makedirs(log_directory)
+        return codecs.open(log_path, "a+", "utf-8")
+
+    def _tee_outputs_to_files(self, files):
+        if not self._original_stdout:
+            self._original_stdout = sys.stdout
+            self._original_stderr = sys.stderr
+        if files and len(files):
+            sys.stdout = tee(self._original_stdout, *files)
+            sys.stderr = tee(self._original_stderr, *files)
+        else:
+            sys.stdout = self._original_stdout
+            sys.stderr = self._original_stderr
diff --git a/Tools/Scripts/webkitpy/common/system/deprecated_logging_unittest.py b/Tools/Scripts/webkitpy/common/system/deprecated_logging_unittest.py
new file mode 100644
index 0000000..3778162
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/system/deprecated_logging_unittest.py
@@ -0,0 +1,60 @@
+# Copyright (C) 2009 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#    * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import os
+import StringIO
+import tempfile
+import unittest
+
+from webkitpy.common.system.executive import ScriptError
+from webkitpy.common.system.deprecated_logging import *
+
+class LoggingTest(unittest.TestCase):
+
+    def assert_log_equals(self, log_input, expected_output):
+        original_stderr = sys.stderr
+        test_stderr = StringIO.StringIO()
+        sys.stderr = test_stderr
+
+        try:
+            log(log_input)
+            actual_output = test_stderr.getvalue()
+        finally:
+            sys.stderr = original_stderr
+
+        self.assertEquals(actual_output, expected_output, "log(\"%s\") expected: %s actual: %s" % (log_input, expected_output, actual_output))
+
+    def test_log(self):
+        self.assert_log_equals("test", "test\n")
+
+        # Test that log() does not throw an exception when passed an object instead of a string.
+        self.assert_log_equals(ScriptError(message="ScriptError"), "ScriptError\n")
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/Tools/Scripts/webkitpy/common/system/environment.py b/Tools/Scripts/webkitpy/common/system/environment.py
new file mode 100644
index 0000000..cd34048
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/system/environment.py
@@ -0,0 +1,41 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+class Environment(object):
+    def __init__(self, env=None):
+        self.env = env or {}
+
+    def to_dictionary(self):
+        return self.env
+
+    def disable_gcc_smartquotes(self):
+        # Technically we only need to set LC_CTYPE to disable current
+        # smartquote behavior: http://gcc.gnu.org/bugzilla/show_bug.cgi?id=38363
+        # Apple's XCode sets LC_ALL instead, probably to be future-proof.
+        self.env['LC_ALL'] = 'C'
diff --git a/Tools/Scripts/webkitpy/common/system/environment_unittest.py b/Tools/Scripts/webkitpy/common/system/environment_unittest.py
new file mode 100644
index 0000000..6558b51
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/system/environment_unittest.py
@@ -0,0 +1,39 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#    * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from .environment import Environment
+
+
+class EnvironmentTest(unittest.TestCase):
+    def test_disable_gcc_smartquotes(self):
+        environment = Environment({})
+        environment.disable_gcc_smartquotes()
+        env = environment.to_dictionary()
+        self.assertEqual(env['LC_ALL'], 'C')
diff --git a/Tools/Scripts/webkitpy/common/system/executive.py b/Tools/Scripts/webkitpy/common/system/executive.py
new file mode 100644
index 0000000..b1d2390
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/system/executive.py
@@ -0,0 +1,481 @@
+# Copyright (c) 2009, Google Inc. All rights reserved.
+# Copyright (c) 2009 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import errno
+import logging
+import multiprocessing
+import os
+import StringIO
+import signal
+import subprocess
+import sys
+import time
+
+from webkitpy.common.system.deprecated_logging import tee
+from webkitpy.common.system.filesystem import FileSystem
+
+
+_log = logging.getLogger(__name__)
+
+
+class ScriptError(Exception):
+
+    # This is a custom List.__str__ implementation to allow size limiting.
+    def _string_from_args(self, args, limit=100):
+        args_string = unicode(args)
+        # We could make this much fancier, but for now this is OK.
+        if len(args_string) > limit:
+            return args_string[:limit - 3] + "..."
+        return args_string
+
+    def __init__(self,
+                 message=None,
+                 script_args=None,
+                 exit_code=None,
+                 output=None,
+                 cwd=None):
+        if not message:
+            message = 'Failed to run "%s"' % self._string_from_args(script_args)
+            if exit_code:
+                message += " exit_code: %d" % exit_code
+            if cwd:
+                message += " cwd: %s" % cwd
+
+        Exception.__init__(self, message)
+        self.script_args = script_args # 'args' is already used by Exception
+        self.exit_code = exit_code
+        self.output = output
+        self.cwd = cwd
+
+    def message_with_output(self, output_limit=500):
+        if self.output:
+            if output_limit and len(self.output) > output_limit:
+                return u"%s\n\nLast %s characters of output:\n%s" % \
+                    (self, output_limit, self.output[-output_limit:])
+            return u"%s\n\n%s" % (self, self.output)
+        return unicode(self)
+
+    def command_name(self):
+        command_path = self.script_args
+        if type(command_path) is list:
+            command_path = command_path[0]
+        return os.path.basename(command_path)
+
+
+class Executive(object):
+    PIPE = subprocess.PIPE
+    STDOUT = subprocess.STDOUT
+
+    def _should_close_fds(self):
+        # We need to pass close_fds=True to work around Python bug #2320
+        # (otherwise we can hang when we kill DumpRenderTree when we are running
+        # multiple threads). See http://bugs.python.org/issue2320 .
+        # Note that close_fds isn't supported on Windows, but this bug only
+        # shows up on Mac and Linux.
+        return sys.platform not in ('win32', 'cygwin')
+
+    def _run_command_with_teed_output(self, args, teed_output, **kwargs):
+        args = map(unicode, args)  # Popen will throw an exception if args are non-strings (like int())
+        args = map(self._encode_argument_if_needed, args)
+
+        child_process = self.popen(args,
+                                   stdout=self.PIPE,
+                                   stderr=self.STDOUT,
+                                   close_fds=self._should_close_fds(),
+                                   **kwargs)
+
+        # Use our own custom wait loop because Popen ignores a tee'd
+        # stderr/stdout.
+        # FIXME: This could be improved not to flatten output to stdout.
+        while True:
+            output_line = child_process.stdout.readline()
+            if output_line == "" and child_process.poll() != None:
+                # poll() is not threadsafe and can throw OSError due to:
+                # http://bugs.python.org/issue1731717
+                return child_process.poll()
+            # We assume that the child process wrote to us in utf-8,
+            # so no re-encoding is necessary before writing here.
+            teed_output.write(output_line)
+
+    # FIXME: Remove this deprecated method and move callers to run_command.
+    # FIXME: This method is a hack to allow running command which both
+    # capture their output and print out to stdin.  Useful for things
+    # like "build-webkit" where we want to display to the user that we're building
+    # but still have the output to stuff into a log file.
+    def run_and_throw_if_fail(self, args, quiet=False, decode_output=True, **kwargs):
+        # Cache the child's output locally so it can be used for error reports.
+        child_out_file = StringIO.StringIO()
+        tee_stdout = sys.stdout
+        if quiet:
+            dev_null = open(os.devnull, "w")  # FIXME: Does this need an encoding?
+            tee_stdout = dev_null
+        child_stdout = tee(child_out_file, tee_stdout)
+        exit_code = self._run_command_with_teed_output(args, child_stdout, **kwargs)
+        if quiet:
+            dev_null.close()
+
+        child_output = child_out_file.getvalue()
+        child_out_file.close()
+
+        if decode_output:
+            child_output = child_output.decode(self._child_process_encoding())
+
+        if exit_code:
+            raise ScriptError(script_args=args,
+                              exit_code=exit_code,
+                              output=child_output)
+        return child_output
+
+    def cpu_count(self):
+        return multiprocessing.cpu_count()
+
+    @staticmethod
+    def interpreter_for_script(script_path, fs=None):
+        fs = fs or FileSystem()
+        lines = fs.read_text_file(script_path).splitlines()
+        if not len(lines):
+            return None
+        first_line = lines[0]
+        if not first_line.startswith('#!'):
+            return None
+        if first_line.find('python') > -1:
+            return sys.executable
+        if first_line.find('perl') > -1:
+            return 'perl'
+        if first_line.find('ruby') > -1:
+            return 'ruby'
+        return None
+
+    @staticmethod
+    def shell_command_for_script(script_path, fs=None):
+        fs = fs or FileSystem()
+        # Win32 does not support shebang. We need to detect the interpreter ourself.
+        if sys.platform == 'win32':
+            interpreter = Executive.interpreter_for_script(script_path, fs)
+            if interpreter:
+                return [interpreter, script_path]
+        return [script_path]
+
+    def kill_process(self, pid):
+        """Attempts to kill the given pid.
+        Will fail silently if pid does not exist or insufficient permisssions."""
+        if sys.platform == "win32":
+            # We only use taskkill.exe on windows (not cygwin) because subprocess.pid
+            # is a CYGWIN pid and taskkill.exe expects a windows pid.
+            # Thankfully os.kill on CYGWIN handles either pid type.
+            command = ["taskkill.exe", "/f", "/pid", pid]
+            # taskkill will exit 128 if the process is not found.  We should log.
+            self.run_command(command, error_handler=self.ignore_error)
+            return
+
+        # According to http://docs.python.org/library/os.html
+        # os.kill isn't available on Windows. python 2.5.5 os.kill appears
+        # to work in cygwin, however it occasionally raises EAGAIN.
+        retries_left = 10 if sys.platform == "cygwin" else 1
+        while retries_left > 0:
+            try:
+                retries_left -= 1
+                os.kill(pid, signal.SIGKILL)
+                _ = os.waitpid(pid, os.WNOHANG)
+            except OSError, e:
+                if e.errno == errno.EAGAIN:
+                    if retries_left <= 0:
+                        _log.warn("Failed to kill pid %s.  Too many EAGAIN errors." % pid)
+                    continue
+                if e.errno == errno.ESRCH:  # The process does not exist.
+                    return
+                if e.errno == errno.EPIPE:  # The process has exited already on cygwin
+                    return
+                if e.errno == errno.ECHILD:
+                    # Can't wait on a non-child process, but the kill worked.
+                    return
+                if e.errno == errno.EACCES and sys.platform == 'cygwin':
+                    # Cygwin python sometimes can't kill native processes.
+                    return
+                raise
+
+    def _win32_check_running_pid(self, pid):
+        # importing ctypes at the top-level seems to cause weird crashes at
+        # exit under cygwin on apple's win port. Only win32 needs cygwin, so
+        # we import it here instead. See https://bugs.webkit.org/show_bug.cgi?id=91682
+        import ctypes
+
+        class PROCESSENTRY32(ctypes.Structure):
+            _fields_ = [("dwSize", ctypes.c_ulong),
+                        ("cntUsage", ctypes.c_ulong),
+                        ("th32ProcessID", ctypes.c_ulong),
+                        ("th32DefaultHeapID", ctypes.POINTER(ctypes.c_ulong)),
+                        ("th32ModuleID", ctypes.c_ulong),
+                        ("cntThreads", ctypes.c_ulong),
+                        ("th32ParentProcessID", ctypes.c_ulong),
+                        ("pcPriClassBase", ctypes.c_ulong),
+                        ("dwFlags", ctypes.c_ulong),
+                        ("szExeFile", ctypes.c_char * 260)]
+
+        CreateToolhelp32Snapshot = ctypes.windll.kernel32.CreateToolhelp32Snapshot
+        Process32First = ctypes.windll.kernel32.Process32First
+        Process32Next = ctypes.windll.kernel32.Process32Next
+        CloseHandle = ctypes.windll.kernel32.CloseHandle
+        TH32CS_SNAPPROCESS = 0x00000002  # win32 magic number
+        hProcessSnap = CreateToolhelp32Snapshot(TH32CS_SNAPPROCESS, 0)
+        pe32 = PROCESSENTRY32()
+        pe32.dwSize = ctypes.sizeof(PROCESSENTRY32)
+        result = False
+        if not Process32First(hProcessSnap, ctypes.byref(pe32)):
+            _log.debug("Failed getting first process.")
+            CloseHandle(hProcessSnap)
+            return result
+        while True:
+            if pe32.th32ProcessID == pid:
+                result = True
+                break
+            if not Process32Next(hProcessSnap, ctypes.byref(pe32)):
+                break
+        CloseHandle(hProcessSnap)
+        return result
+
+    def check_running_pid(self, pid):
+        """Return True if pid is alive, otherwise return False."""
+        if sys.platform == 'win32':
+            return self._win32_check_running_pid(pid)
+
+        try:
+            os.kill(pid, 0)
+            return True
+        except OSError:
+            return False
+
+    def running_pids(self, process_name_filter=None):
+        if not process_name_filter:
+            process_name_filter = lambda process_name: True
+
+        running_pids = []
+
+        if sys.platform in ("win32", "cygwin"):
+            # FIXME: running_pids isn't implemented on Windows yet...
+            return []
+
+        ps_process = self.popen(['ps', '-eo', 'pid,comm'], stdout=self.PIPE, stderr=self.PIPE)
+        stdout, _ = ps_process.communicate()
+        for line in stdout.splitlines():
+            try:
+                # In some cases the line can contain one or more
+                # leading white-spaces, so strip it before split.
+                pid, process_name = line.strip().split(' ', 1)
+                if process_name_filter(process_name):
+                    running_pids.append(int(pid))
+            except ValueError, e:
+                pass
+
+        return sorted(running_pids)
+
+    def wait_newest(self, process_name_filter=None):
+        if not process_name_filter:
+            process_name_filter = lambda process_name: True
+
+        running_pids = self.running_pids(process_name_filter)
+        if not running_pids:
+            return
+        pid = running_pids[-1]
+
+        while self.check_running_pid(pid):
+            time.sleep(0.25)
+
+    def _windows_image_name(self, process_name):
+        name, extension = os.path.splitext(process_name)
+        if not extension:
+            # taskkill expects processes to end in .exe
+            # If necessary we could add a flag to disable appending .exe.
+            process_name = "%s.exe" % name
+        return process_name
+
+    def kill_all(self, process_name):
+        """Attempts to kill processes matching process_name.
+        Will fail silently if no process are found."""
+        if sys.platform in ("win32", "cygwin"):
+            image_name = self._windows_image_name(process_name)
+            command = ["taskkill.exe", "/f", "/im", image_name]
+            # taskkill will exit 128 if the process is not found.  We should log.
+            self.run_command(command, error_handler=self.ignore_error)
+            return
+
+        # FIXME: This is inconsistent that kill_all uses TERM and kill_process
+        # uses KILL.  Windows is always using /f (which seems like -KILL).
+        # We should pick one mode, or add support for switching between them.
+        # Note: Mac OS X 10.6 requires -SIGNALNAME before -u USER
+        command = ["killall", "-TERM", "-u", os.getenv("USER"), process_name]
+        # killall returns 1 if no process can be found and 2 on command error.
+        # FIXME: We should pass a custom error_handler to allow only exit_code 1.
+        # We should log in exit_code == 1
+        self.run_command(command, error_handler=self.ignore_error)
+
+    # Error handlers do not need to be static methods once all callers are
+    # updated to use an Executive object.
+
+    @staticmethod
+    def default_error_handler(error):
+        raise error
+
+    @staticmethod
+    def ignore_error(error):
+        pass
+
+    def _compute_stdin(self, input):
+        """Returns (stdin, string_to_communicate)"""
+        # FIXME: We should be returning /dev/null for stdin
+        # or closing stdin after process creation to prevent
+        # child processes from getting input from the user.
+        if not input:
+            return (None, None)
+        if hasattr(input, "read"):  # Check if the input is a file.
+            return (input, None)  # Assume the file is in the right encoding.
+
+        # Popen in Python 2.5 and before does not automatically encode unicode objects.
+        # http://bugs.python.org/issue5290
+        # See https://bugs.webkit.org/show_bug.cgi?id=37528
+        # for an example of a regresion caused by passing a unicode string directly.
+        # FIXME: We may need to encode differently on different platforms.
+        if isinstance(input, unicode):
+            input = input.encode(self._child_process_encoding())
+        return (self.PIPE, input)
+
+    def _command_for_printing(self, args):
+        """Returns a print-ready string representing command args.
+        The string should be copy/paste ready for execution in a shell."""
+        escaped_args = []
+        for arg in args:
+            if isinstance(arg, unicode):
+                # Escape any non-ascii characters for easy copy/paste
+                arg = arg.encode("unicode_escape")
+            # FIXME: Do we need to fix quotes here?
+            escaped_args.append(arg)
+        return " ".join(escaped_args)
+
+    # FIXME: run_and_throw_if_fail should be merged into this method.
+    def run_command(self,
+                    args,
+                    cwd=None,
+                    env=None,
+                    input=None,
+                    error_handler=None,
+                    return_exit_code=False,
+                    return_stderr=True,
+                    decode_output=True):
+        """Popen wrapper for convenience and to work around python bugs."""
+        assert(isinstance(args, list) or isinstance(args, tuple))
+        start_time = time.time()
+        args = map(unicode, args)  # Popen will throw an exception if args are non-strings (like int())
+        args = map(self._encode_argument_if_needed, args)
+
+        stdin, string_to_communicate = self._compute_stdin(input)
+        stderr = self.STDOUT if return_stderr else None
+
+        process = self.popen(args,
+                             stdin=stdin,
+                             stdout=self.PIPE,
+                             stderr=stderr,
+                             cwd=cwd,
+                             env=env,
+                             close_fds=self._should_close_fds())
+        output = process.communicate(string_to_communicate)[0]
+
+        # run_command automatically decodes to unicode() unless explicitly told not to.
+        if decode_output:
+            output = output.decode(self._child_process_encoding())
+
+        # wait() is not threadsafe and can throw OSError due to:
+        # http://bugs.python.org/issue1731717
+        exit_code = process.wait()
+
+        _log.debug('"%s" took %.2fs' % (self._command_for_printing(args), time.time() - start_time))
+
+        if return_exit_code:
+            return exit_code
+
+        if exit_code:
+            script_error = ScriptError(script_args=args,
+                                       exit_code=exit_code,
+                                       output=output,
+                                       cwd=cwd)
+            (error_handler or self.default_error_handler)(script_error)
+        return output
+
+    def _child_process_encoding(self):
+        # Win32 Python 2.x uses CreateProcessA rather than CreateProcessW
+        # to launch subprocesses, so we have to encode arguments using the
+        # current code page.
+        if sys.platform == 'win32' and sys.version < '3':
+            return 'mbcs'
+        # All other platforms use UTF-8.
+        # FIXME: Using UTF-8 on Cygwin will confuse Windows-native commands
+        # which will expect arguments to be encoded using the current code
+        # page.
+        return 'utf-8'
+
+    def _should_encode_child_process_arguments(self):
+        # Cygwin's Python's os.execv doesn't support unicode command
+        # arguments, and neither does Cygwin's execv itself.
+        if sys.platform == 'cygwin':
+            return True
+
+        # Win32 Python 2.x uses CreateProcessA rather than CreateProcessW
+        # to launch subprocesses, so we have to encode arguments using the
+        # current code page.
+        if sys.platform == 'win32' and sys.version < '3':
+            return True
+
+        return False
+
+    def _encode_argument_if_needed(self, argument):
+        if not self._should_encode_child_process_arguments():
+            return argument
+        return argument.encode(self._child_process_encoding())
+
+    def popen(self, *args, **kwargs):
+        return subprocess.Popen(*args, **kwargs)
+
+    def run_in_parallel(self, command_lines_and_cwds, processes=None):
+        """Runs a list of (cmd_line list, cwd string) tuples in parallel and returns a list of (retcode, stdout, stderr) tuples."""
+        assert len(command_lines_and_cwds)
+
+        if sys.platform in ('cygwin', 'win32'):
+            return map(_run_command_thunk, command_lines_and_cwds)
+        pool = multiprocessing.Pool(processes=processes)
+        results = pool.map(_run_command_thunk, command_lines_and_cwds)
+        pool.close()
+        pool.join()
+        return results
+
+
+def _run_command_thunk(cmd_line_and_cwd):
+    # Note that this needs to be a bare module (and hence Picklable) method to work with multiprocessing.Pool.
+    (cmd_line, cwd) = cmd_line_and_cwd
+    proc = subprocess.Popen(cmd_line, cwd=cwd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+    stdout, stderr = proc.communicate()
+    return (proc.returncode, stdout, stderr)
diff --git a/Tools/Scripts/webkitpy/common/system/executive_mock.py b/Tools/Scripts/webkitpy/common/system/executive_mock.py
new file mode 100644
index 0000000..c261353
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/system/executive_mock.py
@@ -0,0 +1,181 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#    * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import os
+import StringIO
+
+from webkitpy.common.system.deprecated_logging import log
+from webkitpy.common.system.executive import ScriptError
+
+
+class MockProcess(object):
+    def __init__(self, stdout='MOCK STDOUT\n', stderr=''):
+        self.pid = 42
+        self.stdout = StringIO.StringIO(stdout)
+        self.stderr = StringIO.StringIO(stderr)
+        self.stdin = StringIO.StringIO()
+        self.returncode = 0
+
+    def wait(self):
+        return
+
+# FIXME: This should be unified with MockExecutive2
+class MockExecutive(object):
+    PIPE = "MOCK PIPE"
+    STDOUT = "MOCK STDOUT"
+
+    @staticmethod
+    def ignore_error(error):
+        pass
+
+    def __init__(self, should_log=False, should_throw=False, should_throw_when_run=None):
+        self._should_log = should_log
+        self._should_throw = should_throw
+        self._should_throw_when_run = should_throw_when_run or set()
+        # FIXME: Once executive wraps os.getpid() we can just use a static pid for "this" process.
+        self._running_pids = {'test-webkitpy': os.getpid()}
+        self._proc = None
+        self.calls = []
+
+    def check_running_pid(self, pid):
+        return pid in self._running_pids.values()
+
+    def running_pids(self, process_name_filter):
+        running_pids = []
+        for process_name, process_pid in self._running_pids.iteritems():
+            if process_name_filter(process_name):
+                running_pids.append(process_pid)
+
+        log("MOCK running_pids: %s" % running_pids)
+        return running_pids
+
+    def run_and_throw_if_fail(self, args, quiet=False, cwd=None, env=None):
+        if self._should_log:
+            env_string = ""
+            if env:
+                env_string = ", env=%s" % env
+            log("MOCK run_and_throw_if_fail: %s, cwd=%s%s" % (args, cwd, env_string))
+        if self._should_throw_when_run.intersection(args):
+            raise ScriptError("Exception for %s" % args, output="MOCK command output")
+        return "MOCK output of child process"
+
+    def run_command(self,
+                    args,
+                    cwd=None,
+                    input=None,
+                    error_handler=None,
+                    return_exit_code=False,
+                    return_stderr=True,
+                    decode_output=False,
+                    env=None):
+
+        self.calls.append(args)
+
+        assert(isinstance(args, list) or isinstance(args, tuple))
+        if self._should_log:
+            env_string = ""
+            if env:
+                env_string = ", env=%s" % env
+            input_string = ""
+            if input:
+                input_string = ", input=%s" % input
+            log("MOCK run_command: %s, cwd=%s%s%s" % (args, cwd, env_string, input_string))
+        output = "MOCK output of child process"
+        if self._should_throw:
+            raise ScriptError("MOCK ScriptError", output=output)
+        return output
+
+    def cpu_count(self):
+        return 2
+
+    def kill_all(self, process_name):
+        pass
+
+    def kill_process(self, pid):
+        pass
+
+    def popen(self, args, cwd=None, env=None, **kwargs):
+        self.calls.append(args)
+        if self._should_log:
+            cwd_string = ""
+            if cwd:
+                cwd_string = ", cwd=%s" % cwd
+            env_string = ""
+            if env:
+                env_string = ", env=%s" % env
+            log("MOCK popen: %s%s%s" % (args, cwd_string, env_string))
+        if not self._proc:
+            self._proc = MockProcess()
+        return self._proc
+
+    def run_in_parallel(self, commands):
+        num_previous_calls = len(self.calls)
+        command_outputs = []
+        for cmd_line, cwd in commands:
+            command_outputs.append([0, self.run_command(cmd_line, cwd=cwd), ''])
+
+        new_calls = self.calls[num_previous_calls:]
+        self.calls = self.calls[:num_previous_calls]
+        self.calls.append(new_calls)
+        return command_outputs
+
+
+class MockExecutive2(MockExecutive):
+    """MockExecutive2 is like MockExecutive except it doesn't log anything."""
+
+    def __init__(self, output='', exit_code=0, exception=None, run_command_fn=None, stderr=''):
+        self._output = output
+        self._stderr = stderr
+        self._exit_code = exit_code
+        self._exception = exception
+        self._run_command_fn = run_command_fn
+        self.calls = []
+
+    def run_command(self,
+                    args,
+                    cwd=None,
+                    input=None,
+                    error_handler=None,
+                    return_exit_code=False,
+                    return_stderr=True,
+                    decode_output=False,
+                    env=None):
+        self.calls.append(args)
+        assert(isinstance(args, list) or isinstance(args, tuple))
+        if self._exception:
+            raise self._exception
+        if self._run_command_fn:
+            return self._run_command_fn(args)
+        if return_exit_code:
+            return self._exit_code
+        if self._exit_code and error_handler:
+            script_error = ScriptError(script_args=args, exit_code=self._exit_code, output=self._output)
+            error_handler(script_error)
+        if return_stderr:
+            return self._output + self._stderr
+        return self._output
diff --git a/Tools/Scripts/webkitpy/common/system/executive_unittest.py b/Tools/Scripts/webkitpy/common/system/executive_unittest.py
new file mode 100644
index 0000000..57c5573
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/system/executive_unittest.py
@@ -0,0 +1,257 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+# Copyright (C) 2009 Daniel Bates (dbates@intudata.com). All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#    * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import os
+import errno
+import signal
+import subprocess
+import sys
+import time
+import unittest
+
+# Since we execute this script directly as part of the unit tests, we need to ensure
+# that Tools/Scripts is in sys.path for the next imports to work correctly.
+script_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
+if script_dir not in sys.path:
+    sys.path.append(script_dir)
+
+from webkitpy.common.system.executive import Executive, ScriptError
+from webkitpy.common.system.filesystem_mock import MockFileSystem
+
+
+class ScriptErrorTest(unittest.TestCase):
+    def test_string_from_args(self):
+        error = ScriptError()
+        self.assertEquals(error._string_from_args(None), 'None')
+        self.assertEquals(error._string_from_args([]), '[]')
+        self.assertEquals(error._string_from_args(map(str, range(30))), "['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17'...")
+
+    def test_message_with_output(self):
+        error = ScriptError('My custom message!', '', -1)
+        self.assertEquals(error.message_with_output(), 'My custom message!')
+        error = ScriptError('My custom message!', '', -1, 'My output.')
+        self.assertEquals(error.message_with_output(), 'My custom message!\n\nMy output.')
+        error = ScriptError('', 'my_command!', -1, 'My output.', '/Users/username/blah')
+        self.assertEquals(error.message_with_output(), 'Failed to run "my_command!" exit_code: -1 cwd: /Users/username/blah\n\nMy output.')
+        error = ScriptError('', 'my_command!', -1, 'ab' + '1' * 499)
+        self.assertEquals(error.message_with_output(), 'Failed to run "my_command!" exit_code: -1\n\nLast 500 characters of output:\nb' + '1' * 499)
+
+def never_ending_command():
+    """Arguments for a command that will never end (useful for testing process
+    killing). It should be a process that is unlikely to already be running
+    because all instances will be killed."""
+    if sys.platform == 'win32':
+        return ['wmic']
+    return ['yes']
+
+
+def command_line(cmd, *args):
+    return [sys.executable, __file__, '--' + cmd] + list(args)
+
+
+class ExecutiveTest(unittest.TestCase):
+    def assert_interpreter_for_content(self, intepreter, content):
+        fs = MockFileSystem()
+
+        tempfile, temp_name = fs.open_binary_tempfile('')
+        tempfile.write(content)
+        tempfile.close()
+        file_interpreter = Executive.interpreter_for_script(temp_name, fs)
+
+        self.assertEqual(file_interpreter, intepreter)
+
+    def test_interpreter_for_script(self):
+        self.assert_interpreter_for_content(None, '')
+        self.assert_interpreter_for_content(None, 'abcd\nefgh\nijklm')
+        self.assert_interpreter_for_content(None, '##/usr/bin/perl')
+        self.assert_interpreter_for_content('perl', '#!/usr/bin/env perl')
+        self.assert_interpreter_for_content('perl', '#!/usr/bin/env perl\nfirst\nsecond')
+        self.assert_interpreter_for_content('perl', '#!/usr/bin/perl')
+        self.assert_interpreter_for_content('perl', '#!/usr/bin/perl -w')
+        self.assert_interpreter_for_content(sys.executable, '#!/usr/bin/env python')
+        self.assert_interpreter_for_content(sys.executable, '#!/usr/bin/env python\nfirst\nsecond')
+        self.assert_interpreter_for_content(sys.executable, '#!/usr/bin/python')
+        self.assert_interpreter_for_content('ruby', '#!/usr/bin/env ruby')
+        self.assert_interpreter_for_content('ruby', '#!/usr/bin/env ruby\nfirst\nsecond')
+        self.assert_interpreter_for_content('ruby', '#!/usr/bin/ruby')
+
+    def test_run_command_with_bad_command(self):
+        def run_bad_command():
+            Executive().run_command(["foo_bar_command_blah"], error_handler=Executive.ignore_error, return_exit_code=True)
+        self.failUnlessRaises(OSError, run_bad_command)
+
+    def test_run_command_args_type(self):
+        executive = Executive()
+        self.assertRaises(AssertionError, executive.run_command, "echo")
+        self.assertRaises(AssertionError, executive.run_command, u"echo")
+        executive.run_command(command_line('echo', 'foo'))
+        executive.run_command(tuple(command_line('echo', 'foo')))
+
+    def test_run_command_with_unicode(self):
+        """Validate that it is safe to pass unicode() objects
+        to Executive.run* methods, and they will return unicode()
+        objects by default unless decode_output=False"""
+        unicode_tor_input = u"WebKit \u2661 Tor Arne Vestb\u00F8!"
+        if sys.platform == 'win32':
+            encoding = 'mbcs'
+        else:
+            encoding = 'utf-8'
+        encoded_tor = unicode_tor_input.encode(encoding)
+        # On Windows, we expect the unicode->mbcs->unicode roundtrip to be
+        # lossy. On other platforms, we expect a lossless roundtrip.
+        if sys.platform == 'win32':
+            unicode_tor_output = encoded_tor.decode(encoding)
+        else:
+            unicode_tor_output = unicode_tor_input
+
+        executive = Executive()
+
+        output = executive.run_command(command_line('cat'), input=unicode_tor_input)
+        self.assertEquals(output, unicode_tor_output)
+
+        output = executive.run_command(command_line('echo', unicode_tor_input))
+        self.assertEquals(output, unicode_tor_output)
+
+        output = executive.run_command(command_line('echo', unicode_tor_input), decode_output=False)
+        self.assertEquals(output, encoded_tor)
+
+        # Make sure that str() input also works.
+        output = executive.run_command(command_line('cat'), input=encoded_tor, decode_output=False)
+        self.assertEquals(output, encoded_tor)
+
+        # FIXME: We should only have one run* method to test
+        output = executive.run_and_throw_if_fail(command_line('echo', unicode_tor_input), quiet=True)
+        self.assertEquals(output, unicode_tor_output)
+
+        output = executive.run_and_throw_if_fail(command_line('echo', unicode_tor_input), quiet=True, decode_output=False)
+        self.assertEquals(output, encoded_tor)
+
+    def serial_test_kill_process(self):
+        executive = Executive()
+        process = subprocess.Popen(never_ending_command(), stdout=subprocess.PIPE)
+        self.assertEqual(process.poll(), None)  # Process is running
+        executive.kill_process(process.pid)
+        # Note: Can't use a ternary since signal.SIGKILL is undefined for sys.platform == "win32"
+        if sys.platform == "win32":
+            # FIXME: https://bugs.webkit.org/show_bug.cgi?id=54790
+            # We seem to get either 0 or 1 here for some reason.
+            self.assertTrue(process.wait() in (0, 1))
+        elif sys.platform == "cygwin":
+            # FIXME: https://bugs.webkit.org/show_bug.cgi?id=98196
+            # cygwin seems to give us either SIGABRT or SIGKILL
+            self.assertTrue(process.wait() in (-signal.SIGABRT, -signal.SIGKILL))
+        else:
+            expected_exit_code = -signal.SIGKILL
+            self.assertEqual(process.wait(), expected_exit_code)
+
+        # Killing again should fail silently.
+        executive.kill_process(process.pid)
+
+    def serial_test_kill_all(self):
+        executive = Executive()
+        process = subprocess.Popen(never_ending_command(), stdout=subprocess.PIPE)
+        self.assertEqual(process.poll(), None)  # Process is running
+        executive.kill_all(never_ending_command()[0])
+        # Note: Can't use a ternary since signal.SIGTERM is undefined for sys.platform == "win32"
+        if sys.platform == "cygwin":
+            expected_exit_code = 0  # os.kill results in exit(0) for this process.
+            self.assertEqual(process.wait(), expected_exit_code)
+        elif sys.platform == "win32":
+            # FIXME: https://bugs.webkit.org/show_bug.cgi?id=54790
+            # We seem to get either 0 or 1 here for some reason.
+            self.assertTrue(process.wait() in (0, 1))
+        else:
+            expected_exit_code = -signal.SIGTERM
+            self.assertEqual(process.wait(), expected_exit_code)
+        # Killing again should fail silently.
+        executive.kill_all(never_ending_command()[0])
+
+    def _assert_windows_image_name(self, name, expected_windows_name):
+        executive = Executive()
+        windows_name = executive._windows_image_name(name)
+        self.assertEqual(windows_name, expected_windows_name)
+
+    def test_windows_image_name(self):
+        self._assert_windows_image_name("foo", "foo.exe")
+        self._assert_windows_image_name("foo.exe", "foo.exe")
+        self._assert_windows_image_name("foo.com", "foo.com")
+        # If the name looks like an extension, even if it isn't
+        # supposed to, we have no choice but to return the original name.
+        self._assert_windows_image_name("foo.baz", "foo.baz")
+        self._assert_windows_image_name("foo.baz.exe", "foo.baz.exe")
+
+    def serial_test_check_running_pid(self):
+        executive = Executive()
+        self.assertTrue(executive.check_running_pid(os.getpid()))
+        # Maximum pid number on Linux is 32768 by default
+        self.assertFalse(executive.check_running_pid(100000))
+
+    def serial_test_running_pids(self):
+        if sys.platform in ("win32", "cygwin"):
+            return  # This function isn't implemented on Windows yet.
+
+        executive = Executive()
+        pids = executive.running_pids()
+        self.assertTrue(os.getpid() in pids)
+
+    def serial_test_run_in_parallel(self):
+        # We run this test serially to avoid overloading the machine and throwing off the timing.
+
+        if sys.platform in ("win32", "cygwin"):
+            return  # This function isn't implemented properly on windows yet.
+        import multiprocessing
+
+        NUM_PROCESSES = 4
+        DELAY_SECS = 0.25
+        cmd_line = [sys.executable, '-c', 'import time; time.sleep(%f); print "hello"' % DELAY_SECS]
+        cwd = os.getcwd()
+        commands = [tuple([cmd_line, cwd])] * NUM_PROCESSES
+        start = time.time()
+        command_outputs = Executive().run_in_parallel(commands, processes=NUM_PROCESSES)
+        done = time.time()
+        self.assertTrue(done - start < NUM_PROCESSES * DELAY_SECS)
+        self.assertEquals([output[1] for output in command_outputs], ["hello\n"] * NUM_PROCESSES)
+        self.assertEquals([],  multiprocessing.active_children())
+
+    def test_run_in_parallel_assert_nonempty(self):
+        self.assertRaises(AssertionError, Executive().run_in_parallel, [])
+
+
+def main(platform, stdin, stdout, cmd, args):
+    if platform == 'win32' and hasattr(stdout, 'fileno'):
+        import msvcrt
+        msvcrt.setmode(stdout.fileno(), os.O_BINARY)
+    if cmd == '--cat':
+        stdout.write(stdin.read())
+    elif cmd == '--echo':
+        stdout.write(' '.join(args))
+    return 0
+
+if __name__ == '__main__' and len(sys.argv) > 1 and sys.argv[1] in ('--cat', '--echo'):
+    sys.exit(main(sys.platform, sys.stdin, sys.stdout, sys.argv[1], sys.argv[2:]))
diff --git a/Tools/Scripts/webkitpy/common/system/file_lock.py b/Tools/Scripts/webkitpy/common/system/file_lock.py
new file mode 100644
index 0000000..c542777
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/system/file_lock.py
@@ -0,0 +1,85 @@
+#!/usr/bin/env python
+# Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Szeged
+#
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+#    notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+#    notice, this list of conditions and the following disclaimer in the
+#    documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY UNIVERSITY OF SZEGED ``AS IS'' AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL UNIVERSITY OF SZEGED OR
+# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""This class helps to lock files exclusively across processes."""
+
+import logging
+import os
+import sys
+import time
+
+
+_log = logging.getLogger(__name__)
+
+
+class FileLock(object):
+
+    def __init__(self, lock_file_path, max_wait_time_sec=20):
+        self._lock_file_path = lock_file_path
+        self._lock_file_descriptor = None
+        self._max_wait_time_sec = max_wait_time_sec
+
+    def _create_lock(self):
+        if sys.platform == 'win32':
+            import msvcrt
+            msvcrt.locking(self._lock_file_descriptor, msvcrt.LK_NBLCK, 32)
+        else:
+            import fcntl
+            fcntl.flock(self._lock_file_descriptor, fcntl.LOCK_EX | fcntl.LOCK_NB)
+
+    def _remove_lock(self):
+        if sys.platform == 'win32':
+            import msvcrt
+            msvcrt.locking(self._lock_file_descriptor, msvcrt.LK_UNLCK, 32)
+        else:
+            import fcntl
+            fcntl.flock(self._lock_file_descriptor, fcntl.LOCK_UN)
+
+    def acquire_lock(self):
+        self._lock_file_descriptor = os.open(self._lock_file_path, os.O_TRUNC | os.O_CREAT)
+        start_time = time.time()
+        while True:
+            try:
+                self._create_lock()
+                return True
+            except IOError:
+                if time.time() - start_time > self._max_wait_time_sec:
+                    _log.debug("File locking failed: %s" % str(sys.exc_info()))
+                    os.close(self._lock_file_descriptor)
+                    self._lock_file_descriptor = None
+                    return False
+                # There's no compelling reason to spin hard here, so sleep for a bit.
+                time.sleep(0.01)
+
+    def release_lock(self):
+        try:
+            if self._lock_file_descriptor:
+                self._remove_lock()
+                os.close(self._lock_file_descriptor)
+                self._lock_file_descriptor = None
+            os.unlink(self._lock_file_path)
+        except (IOError, OSError):
+            _log.debug("Warning in release lock: %s" % str(sys.exc_info()))
diff --git a/Tools/Scripts/webkitpy/common/system/file_lock_integrationtest.py b/Tools/Scripts/webkitpy/common/system/file_lock_integrationtest.py
new file mode 100644
index 0000000..5cd27d1
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/system/file_lock_integrationtest.py
@@ -0,0 +1,61 @@
+#!/usr/bin/env python
+# Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Szeged
+#
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+#    notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+#    notice, this list of conditions and the following disclaimer in the
+#    documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY UNIVERSITY OF SZEGED ``AS IS'' AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL UNIVERSITY OF SZEGED OR
+# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import os
+import tempfile
+import unittest
+
+from webkitpy.common.system.file_lock import FileLock
+
+
+class FileLockTest(unittest.TestCase):
+
+    def setUp(self):
+        self._lock_name = "TestWebKit" + str(os.getpid()) + ".lock"
+        self._lock_path = os.path.join(tempfile.gettempdir(), self._lock_name)
+        self._file_lock1 = FileLock(self._lock_path, 0.1)
+        self._file_lock2 = FileLock(self._lock_path, 0.1)
+
+    def tearDown(self):
+        self._file_lock1.release_lock()
+        self._file_lock2.release_lock()
+
+    def test_lock_lifecycle(self):
+        # Create the lock.
+        self._file_lock1.acquire_lock()
+        self.assertTrue(os.path.exists(self._lock_path))
+
+        # Try to lock again.
+        self.assertFalse(self._file_lock2.acquire_lock())
+
+        # Release the lock.
+        self._file_lock1.release_lock()
+        self.assertFalse(os.path.exists(self._lock_path))
+
+    def test_stuck_lock(self):
+        open(self._lock_path, 'w').close()
+        self._file_lock1.acquire_lock()
+        self._file_lock1.release_lock()
diff --git a/Tools/Scripts/webkitpy/common/system/file_lock_mock.py b/Tools/Scripts/webkitpy/common/system/file_lock_mock.py
new file mode 100644
index 0000000..e2c1d5c
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/system/file_lock_mock.py
@@ -0,0 +1,36 @@
+#!/usr/bin/env python
+# Copyright (c) 2012 Google Inc. All rights reserved.
+#
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+#    notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+#    notice, this list of conditions and the following disclaimer in the
+#    documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY UNIVERSITY OF SZEGED ``AS IS'' AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL UNIVERSITY OF SZEGED OR
+# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+class MockFileLock(object):
+    def __init__(self, lock_file_path, max_wait_time_sec=20):
+        pass
+
+    def acquire_lock(self):
+        pass
+
+    def release_lock(self):
+        pass
diff --git a/Tools/Scripts/webkitpy/common/system/fileset.py b/Tools/Scripts/webkitpy/common/system/fileset.py
new file mode 100644
index 0000000..57e9a28
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/system/fileset.py
@@ -0,0 +1,64 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# 1.  Redistributions of source code must retain the above copyright
+#     notice, this list of conditions and the following disclaimer.
+# 2.  Redistributions in binary form must reproduce the above copyright
+#     notice, this list of conditions and the following disclaimer in the
+#     documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from webkitpy.common.system.filesystem import FileSystem
+
+
+class FileSetFileHandle(object):
+    """Points to a file that resides in a file set"""
+    def __init__(self, fileset, filename, filesystem=None):
+        self._filename = filename
+        self._fileset = fileset
+        self._contents = None
+        self._filesystem = filesystem or FileSystem()
+
+    def __str__(self):
+        return "%s:%s" % (self._fileset, self._filename)
+
+    def close(self):
+        pass
+
+    def contents(self):
+        if self._contents is None:
+            self._contents = self._fileset.read(self._filename)
+        return self._contents
+
+    def save_to(self, path, filename=None):
+        if filename is None:
+            self._fileset.extract(self._filename, path)
+            return
+        with self._filesystem.mkdtemp() as temp_dir:
+            self._fileset.extract(self._filename, temp_dir)
+
+            src = self._filesystem.join(temp_dir, self._filename)
+            dest = self._filesystem.join(path, filename)
+            self._filesystem.copyfile(src, dest)
+
+    def delete(self):
+        self._fileset.delete(self._filename)
+
+    def name(self):
+        return self._filename
+
+    def splitext(self):
+        return self._filesystem.splitext(self.name())
diff --git a/Tools/Scripts/webkitpy/common/system/filesystem.py b/Tools/Scripts/webkitpy/common/system/filesystem.py
new file mode 100644
index 0000000..3786c6f
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/system/filesystem.py
@@ -0,0 +1,269 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Wrapper object for the file system / source tree."""
+
+import codecs
+import errno
+import exceptions
+import glob
+import hashlib
+import os
+import shutil
+import sys
+import tempfile
+import time
+
+class FileSystem(object):
+    """FileSystem interface for webkitpy.
+
+    Unless otherwise noted, all paths are allowed to be either absolute
+    or relative."""
+    sep = os.sep
+    pardir = os.pardir
+
+    def abspath(self, path):
+        return os.path.abspath(path)
+
+    def realpath(self, path):
+        return os.path.realpath(path)
+
+    def path_to_module(self, module_name):
+        """A wrapper for all calls to __file__ to allow easy unit testing."""
+        # FIXME: This is the only use of sys in this file. It's possible this function should move elsewhere.
+        return sys.modules[module_name].__file__  # __file__ is always an absolute path.
+
+    def expanduser(self, path):
+        return os.path.expanduser(path)
+
+    def basename(self, path):
+        return os.path.basename(path)
+
+    def chdir(self, path):
+        return os.chdir(path)
+
+    def copyfile(self, source, destination):
+        shutil.copyfile(source, destination)
+
+    def dirname(self, path):
+        return os.path.dirname(path)
+
+    def exists(self, path):
+        return os.path.exists(path)
+
+    def files_under(self, path, dirs_to_skip=[], file_filter=None):
+        """Return the list of all files under the given path in topdown order.
+
+        Args:
+            dirs_to_skip: a list of directories to skip over during the
+                traversal (e.g., .svn, resources, etc.)
+            file_filter: if not None, the filter will be invoked
+                with the filesystem object and the dirname and basename of
+                each file found. The file is included in the result if the
+                callback returns True.
+        """
+        def filter_all(fs, dirpath, basename):
+            return True
+
+        file_filter = file_filter or filter_all
+        files = []
+        if self.isfile(path):
+            if file_filter(self, self.dirname(path), self.basename(path)):
+                files.append(path)
+            return files
+
+        if self.basename(path) in dirs_to_skip:
+            return []
+
+        for (dirpath, dirnames, filenames) in os.walk(path):
+            for d in dirs_to_skip:
+                if d in dirnames:
+                    dirnames.remove(d)
+
+            for filename in filenames:
+                if file_filter(self, dirpath, filename):
+                    files.append(self.join(dirpath, filename))
+        return files
+
+    def getcwd(self):
+        return os.getcwd()
+
+    def glob(self, path):
+        return glob.glob(path)
+
+    def isabs(self, path):
+        return os.path.isabs(path)
+
+    def isfile(self, path):
+        return os.path.isfile(path)
+
+    def isdir(self, path):
+        return os.path.isdir(path)
+
+    def join(self, *comps):
+        return os.path.join(*comps)
+
+    def listdir(self, path):
+        return os.listdir(path)
+
+    def mkdtemp(self, **kwargs):
+        """Create and return a uniquely named directory.
+
+        This is like tempfile.mkdtemp, but if used in a with statement
+        the directory will self-delete at the end of the block (if the
+        directory is empty; non-empty directories raise errors). The
+        directory can be safely deleted inside the block as well, if so
+        desired.
+
+        Note that the object returned is not a string and does not support all of the string
+        methods. If you need a string, coerce the object to a string and go from there.
+        """
+        class TemporaryDirectory(object):
+            def __init__(self, **kwargs):
+                self._kwargs = kwargs
+                self._directory_path = tempfile.mkdtemp(**self._kwargs)
+
+            def __str__(self):
+                return self._directory_path
+
+            def __enter__(self):
+                return self._directory_path
+
+            def __exit__(self, type, value, traceback):
+                # Only self-delete if necessary.
+
+                # FIXME: Should we delete non-empty directories?
+                if os.path.exists(self._directory_path):
+                    os.rmdir(self._directory_path)
+
+        return TemporaryDirectory(**kwargs)
+
+    def maybe_make_directory(self, *path):
+        """Create the specified directory if it doesn't already exist."""
+        try:
+            os.makedirs(self.join(*path))
+        except OSError, e:
+            if e.errno != errno.EEXIST:
+                raise
+
+    def move(self, source, destination):
+        shutil.move(source, destination)
+
+    def mtime(self, path):
+        return os.stat(path).st_mtime
+
+    def normpath(self, path):
+        return os.path.normpath(path)
+
+    def open_binary_tempfile(self, suffix):
+        """Create, open, and return a binary temp file. Returns a tuple of the file and the name."""
+        temp_fd, temp_name = tempfile.mkstemp(suffix)
+        f = os.fdopen(temp_fd, 'wb')
+        return f, temp_name
+
+    def open_binary_file_for_reading(self, path):
+        return codecs.open(path, 'rb')
+
+    def read_binary_file(self, path):
+        """Return the contents of the file at the given path as a byte string."""
+        with file(path, 'rb') as f:
+            return f.read()
+
+    def write_binary_file(self, path, contents):
+        with file(path, 'wb') as f:
+            f.write(contents)
+
+    def open_text_file_for_reading(self, path):
+        # Note: There appears to be an issue with the returned file objects
+        # not being seekable. See http://stackoverflow.com/questions/1510188/can-seek-and-tell-work-with-utf-8-encoded-documents-in-python .
+        return codecs.open(path, 'r', 'utf8')
+
+    def open_text_file_for_writing(self, path):
+        return codecs.open(path, 'w', 'utf8')
+
+    def read_text_file(self, path):
+        """Return the contents of the file at the given path as a Unicode string.
+
+        The file is read assuming it is a UTF-8 encoded file with no BOM."""
+        with codecs.open(path, 'r', 'utf8') as f:
+            return f.read()
+
+    def write_text_file(self, path, contents):
+        """Write the contents to the file at the given location.
+
+        The file is written encoded as UTF-8 with no BOM."""
+        with codecs.open(path, 'w', 'utf8') as f:
+            f.write(contents)
+
+    def sha1(self, path):
+        contents = self.read_binary_file(path)
+        return hashlib.sha1(contents).hexdigest()
+
+    def relpath(self, path, start='.'):
+        return os.path.relpath(path, start)
+
+    class _WindowsError(exceptions.OSError):
+        """Fake exception for Linux and Mac."""
+        pass
+
+    def remove(self, path, osremove=os.remove):
+        """On Windows, if a process was recently killed and it held on to a
+        file, the OS will hold on to the file for a short while.  This makes
+        attempts to delete the file fail.  To work around that, this method
+        will retry for a few seconds until Windows is done with the file."""
+        try:
+            exceptions.WindowsError
+        except AttributeError:
+            exceptions.WindowsError = FileSystem._WindowsError
+
+        retry_timeout_sec = 3.0
+        sleep_interval = 0.1
+        while True:
+            try:
+                osremove(path)
+                return True
+            except exceptions.WindowsError, e:
+                time.sleep(sleep_interval)
+                retry_timeout_sec -= sleep_interval
+                if retry_timeout_sec < 0:
+                    raise e
+
+    def rmtree(self, path):
+        """Delete the directory rooted at path, whether empty or not."""
+        shutil.rmtree(path, ignore_errors=True)
+
+    def copytree(self, source, destination):
+        shutil.copytree(source, destination)
+
+    def split(self, path):
+        """Return (dirname, basename + '.' + ext)"""
+        return os.path.split(path)
+
+    def splitext(self, path):
+        """Return (dirname + os.sep + basename, '.' + ext)"""
+        return os.path.splitext(path)
diff --git a/Tools/Scripts/webkitpy/common/system/filesystem_mock.py b/Tools/Scripts/webkitpy/common/system/filesystem_mock.py
new file mode 100644
index 0000000..d87fe1b
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/system/filesystem_mock.py
@@ -0,0 +1,474 @@
+# Copyright (C) 2009 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#    * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import StringIO
+import errno
+import hashlib
+import os
+import re
+
+from webkitpy.common.system import path
+
+
+class MockFileSystem(object):
+    sep = '/'
+    pardir = '..'
+
+    def __init__(self, files=None, dirs=None, cwd='/'):
+        """Initializes a "mock" filesystem that can be used to completely
+        stub out a filesystem.
+
+        Args:
+            files: a dict of filenames -> file contents. A file contents
+                value of None is used to indicate that the file should
+                not exist.
+        """
+        self.files = files or {}
+        self.written_files = {}
+        self.last_tmpdir = None
+        self.current_tmpno = 0
+        self.cwd = cwd
+        self.dirs = set(dirs or [])
+        self.dirs.add(cwd)
+        for f in self.files:
+            d = self.dirname(f)
+            while not d in self.dirs:
+                self.dirs.add(d)
+                d = self.dirname(d)
+
+    def clear_written_files(self):
+        # This function can be used to track what is written between steps in a test.
+        self.written_files = {}
+
+    def _raise_not_found(self, path):
+        raise IOError(errno.ENOENT, path, os.strerror(errno.ENOENT))
+
+    def _split(self, path):
+        # This is not quite a full implementation of os.path.split
+        # http://docs.python.org/library/os.path.html#os.path.split
+        if self.sep in path:
+            return path.rsplit(self.sep, 1)
+        return ('', path)
+
+    def abspath(self, path):
+        if os.path.isabs(path):
+            return self.normpath(path)
+        return self.abspath(self.join(self.cwd, path))
+
+    def realpath(self, path):
+        return self.abspath(path)
+
+    def basename(self, path):
+        return self._split(path)[1]
+
+    def expanduser(self, path):
+        if path[0] != "~":
+            return path
+        parts = path.split(self.sep, 1)
+        home_directory = self.sep + "Users" + self.sep + "mock"
+        if len(parts) == 1:
+            return home_directory
+        return home_directory + self.sep + parts[1]
+
+    def path_to_module(self, module_name):
+        return "/mock-checkout/Tools/Scripts/" + module_name.replace('.', '/') + ".py"
+
+    def chdir(self, path):
+        path = self.normpath(path)
+        if not self.isdir(path):
+            raise OSError(errno.ENOENT, path, os.strerror(errno.ENOENT))
+        self.cwd = path
+
+    def copyfile(self, source, destination):
+        if not self.exists(source):
+            self._raise_not_found(source)
+        if self.isdir(source):
+            raise IOError(errno.EISDIR, source, os.strerror(errno.ISDIR))
+        if self.isdir(destination):
+            raise IOError(errno.EISDIR, destination, os.strerror(errno.ISDIR))
+        if not self.exists(self.dirname(destination)):
+            raise IOError(errno.ENOENT, destination, os.strerror(errno.ENOENT))
+
+        self.files[destination] = self.files[source]
+        self.written_files[destination] = self.files[source]
+
+    def dirname(self, path):
+        return self._split(path)[0]
+
+    def exists(self, path):
+        return self.isfile(path) or self.isdir(path)
+
+    def files_under(self, path, dirs_to_skip=[], file_filter=None):
+        def filter_all(fs, dirpath, basename):
+            return True
+
+        file_filter = file_filter or filter_all
+        files = []
+        if self.isfile(path):
+            if file_filter(self, self.dirname(path), self.basename(path)) and self.files[path] is not None:
+                files.append(path)
+            return files
+
+        if self.basename(path) in dirs_to_skip:
+            return []
+
+        if not path.endswith(self.sep):
+            path += self.sep
+
+        dir_substrings = [self.sep + d + self.sep for d in dirs_to_skip]
+        for filename in self.files:
+            if not filename.startswith(path):
+                continue
+
+            suffix = filename[len(path) - 1:]
+            if any(dir_substring in suffix for dir_substring in dir_substrings):
+                continue
+
+            dirpath, basename = self._split(filename)
+            if file_filter(self, dirpath, basename) and self.files[filename] is not None:
+                files.append(filename)
+
+        return files
+
+    def getcwd(self):
+        return self.cwd
+
+    def glob(self, glob_string):
+        # FIXME: This handles '*', but not '?', '[', or ']'.
+        glob_string = re.escape(glob_string)
+        glob_string = glob_string.replace('\\*', '[^\\/]*') + '$'
+        glob_string = glob_string.replace('\\/', '/')
+        path_filter = lambda path: re.match(glob_string, path)
+
+        # We could use fnmatch.fnmatch, but that might not do the right thing on windows.
+        existing_files = [path for path, contents in self.files.items() if contents is not None]
+        return filter(path_filter, existing_files) + filter(path_filter, self.dirs)
+
+    def isabs(self, path):
+        return path.startswith(self.sep)
+
+    def isfile(self, path):
+        return path in self.files and self.files[path] is not None
+
+    def isdir(self, path):
+        return self.normpath(path) in self.dirs
+
+    def _slow_but_correct_join(self, *comps):
+        return re.sub(re.escape(os.path.sep), self.sep, os.path.join(*comps))
+
+    def join(self, *comps):
+        # This function is called a lot, so we optimize it; there are
+        # unittests to check that we match _slow_but_correct_join(), above.
+        path = ''
+        sep = self.sep
+        for comp in comps:
+            if not comp:
+                continue
+            if comp[0] == sep:
+                path = comp
+                continue
+            if path:
+                path += sep
+            path += comp
+        if comps[-1] == '' and path:
+            path += '/'
+        path = path.replace(sep + sep, sep)
+        return path
+
+    def listdir(self, path):
+        sep = self.sep
+        if not self.isdir(path):
+            raise OSError("%s is not a directory" % path)
+
+        if not path.endswith(sep):
+            path += sep
+
+        dirs = []
+        files = []
+        for f in self.files:
+            if self.exists(f) and f.startswith(path):
+                remaining = f[len(path):]
+                if sep in remaining:
+                    dir = remaining[:remaining.index(sep)]
+                    if not dir in dirs:
+                        dirs.append(dir)
+                else:
+                    files.append(remaining)
+        return dirs + files
+
+    def mtime(self, path):
+        if self.exists(path):
+            return 0
+        self._raise_not_found(path)
+
+    def _mktemp(self, suffix='', prefix='tmp', dir=None, **kwargs):
+        if dir is None:
+            dir = self.sep + '__im_tmp'
+        curno = self.current_tmpno
+        self.current_tmpno += 1
+        self.last_tmpdir = self.join(dir, '%s_%u_%s' % (prefix, curno, suffix))
+        return self.last_tmpdir
+
+    def mkdtemp(self, **kwargs):
+        class TemporaryDirectory(object):
+            def __init__(self, fs, **kwargs):
+                self._kwargs = kwargs
+                self._filesystem = fs
+                self._directory_path = fs._mktemp(**kwargs)
+                fs.maybe_make_directory(self._directory_path)
+
+            def __str__(self):
+                return self._directory_path
+
+            def __enter__(self):
+                return self._directory_path
+
+            def __exit__(self, type, value, traceback):
+                # Only self-delete if necessary.
+
+                # FIXME: Should we delete non-empty directories?
+                if self._filesystem.exists(self._directory_path):
+                    self._filesystem.rmtree(self._directory_path)
+
+        return TemporaryDirectory(fs=self, **kwargs)
+
+    def maybe_make_directory(self, *path):
+        norm_path = self.normpath(self.join(*path))
+        while norm_path and not self.isdir(norm_path):
+            self.dirs.add(norm_path)
+            norm_path = self.dirname(norm_path)
+
+    def move(self, source, destination):
+        if self.files[source] is None:
+            self._raise_not_found(source)
+        self.files[destination] = self.files[source]
+        self.written_files[destination] = self.files[destination]
+        self.files[source] = None
+        self.written_files[source] = None
+
+    def _slow_but_correct_normpath(self, path):
+        return re.sub(re.escape(os.path.sep), self.sep, os.path.normpath(path))
+
+    def normpath(self, path):
+        # This function is called a lot, so we try to optimize the common cases
+        # instead of always calling _slow_but_correct_normpath(), above.
+        if '..' in path or '/./' in path:
+            # This doesn't happen very often; don't bother trying to optimize it.
+            return self._slow_but_correct_normpath(path)
+        if not path:
+            return '.'
+        if path == '/':
+            return path
+        if path == '/.':
+            return '/'
+        if path.endswith('/.'):
+            return path[:-2]
+        if path.endswith('/'):
+            return path[:-1]
+        return path
+
+    def open_binary_tempfile(self, suffix=''):
+        path = self._mktemp(suffix)
+        return (WritableBinaryFileObject(self, path), path)
+
+    def open_binary_file_for_reading(self, path):
+        if self.files[path] is None:
+            self._raise_not_found(path)
+        return ReadableBinaryFileObject(self, path, self.files[path])
+
+    def read_binary_file(self, path):
+        # Intentionally raises KeyError if we don't recognize the path.
+        if self.files[path] is None:
+            self._raise_not_found(path)
+        return self.files[path]
+
+    def write_binary_file(self, path, contents):
+        # FIXME: should this assert if dirname(path) doesn't exist?
+        self.maybe_make_directory(self.dirname(path))
+        self.files[path] = contents
+        self.written_files[path] = contents
+
+    def open_text_file_for_reading(self, path):
+        if self.files[path] is None:
+            self._raise_not_found(path)
+        return ReadableTextFileObject(self, path, self.files[path])
+
+    def open_text_file_for_writing(self, path):
+        return WritableTextFileObject(self, path)
+
+    def read_text_file(self, path):
+        return self.read_binary_file(path).decode('utf-8')
+
+    def write_text_file(self, path, contents):
+        return self.write_binary_file(path, contents.encode('utf-8'))
+
+    def sha1(self, path):
+        contents = self.read_binary_file(path)
+        return hashlib.sha1(contents).hexdigest()
+
+    def relpath(self, path, start='.'):
+        # Since os.path.relpath() calls os.path.normpath()
+        # (see http://docs.python.org/library/os.path.html#os.path.abspath )
+        # it also removes trailing slashes and converts forward and backward
+        # slashes to the preferred slash os.sep.
+        start = self.abspath(start)
+        path = self.abspath(path)
+
+        if not path.lower().startswith(start.lower()):
+            # path is outside the directory given by start; compute path from root
+            return '../' * start.count('/') + path
+
+        rel_path = path[len(start):]
+
+        if not rel_path:
+            # Then the paths are the same.
+            pass
+        elif rel_path[0] == self.sep:
+            # It is probably sufficient to remove just the first character
+            # since os.path.normpath() collapses separators, but we use
+            # lstrip() just to be sure.
+            rel_path = rel_path.lstrip(self.sep)
+        else:
+            # We are in the case typified by the following example:
+            # path = "/tmp/foobar", start = "/tmp/foo" -> rel_path = "bar"
+            # FIXME: We return a less-than-optimal result here.
+            return '../' * start.count('/') + path
+
+        return rel_path
+
+    def remove(self, path):
+        if self.files[path] is None:
+            self._raise_not_found(path)
+        self.files[path] = None
+        self.written_files[path] = None
+
+    def rmtree(self, path):
+        path = self.normpath(path)
+
+        for f in self.files:
+            if f.startswith(path):
+                self.files[f] = None
+
+        self.dirs = set(filter(lambda d: not d.startswith(path), self.dirs))
+
+    def copytree(self, source, destination):
+        source = self.normpath(source)
+        destination = self.normpath(destination)
+
+        for source_file in self.files:
+            if source_file.startswith(source):
+                destination_path = self.join(destination, self.relpath(source_file, source))
+                self.maybe_make_directory(self.dirname(destination_path))
+                self.files[destination_path] = self.files[source_file]
+
+    def split(self, path):
+        idx = path.rfind(self.sep)
+        if idx == -1:
+            return ('', path)
+        return (path[:idx], path[(idx + 1):])
+
+    def splitext(self, path):
+        idx = path.rfind('.')
+        if idx == -1:
+            idx = 0
+        return (path[0:idx], path[idx:])
+
+
+class WritableBinaryFileObject(object):
+    def __init__(self, fs, path):
+        self.fs = fs
+        self.path = path
+        self.closed = False
+        self.fs.files[path] = ""
+
+    def __enter__(self):
+        return self
+
+    def __exit__(self, type, value, traceback):
+        self.close()
+
+    def close(self):
+        self.closed = True
+
+    def write(self, str):
+        self.fs.files[self.path] += str
+        self.fs.written_files[self.path] = self.fs.files[self.path]
+
+
+class WritableTextFileObject(WritableBinaryFileObject):
+    def write(self, str):
+        WritableBinaryFileObject.write(self, str.encode('utf-8'))
+
+
+class ReadableBinaryFileObject(object):
+    def __init__(self, fs, path, data):
+        self.fs = fs
+        self.path = path
+        self.closed = False
+        self.data = data
+        self.offset = 0
+
+    def __enter__(self):
+        return self
+
+    def __exit__(self, type, value, traceback):
+        self.close()
+
+    def close(self):
+        self.closed = True
+
+    def read(self, bytes=None):
+        if not bytes:
+            return self.data[self.offset:]
+        start = self.offset
+        self.offset += bytes
+        return self.data[start:self.offset]
+
+
+class ReadableTextFileObject(ReadableBinaryFileObject):
+    def __init__(self, fs, path, data):
+        super(ReadableTextFileObject, self).__init__(fs, path, StringIO.StringIO(data))
+
+    def close(self):
+        self.data.close()
+        super(ReadableTextFileObject, self).close()
+
+    def read(self, bytes=-1):
+        return self.data.read(bytes)
+
+    def readline(self, length=None):
+        return self.data.readline(length)
+
+    def __iter__(self):
+        return self.data.__iter__()
+
+    def next(self):
+        return self.data.next()
+
+    def seek(self, offset, whence=os.SEEK_SET):
+        self.data.seek(offset, whence)
diff --git a/Tools/Scripts/webkitpy/common/system/filesystem_mock_unittest.py b/Tools/Scripts/webkitpy/common/system/filesystem_mock_unittest.py
new file mode 100644
index 0000000..2a6ccbf
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/system/filesystem_mock_unittest.py
@@ -0,0 +1,88 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#    * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import os
+import re
+import unittest
+
+
+from webkitpy.common.system import filesystem_mock
+from webkitpy.common.system import filesystem_unittest
+
+
+class MockFileSystemTest(unittest.TestCase, filesystem_unittest.GenericFileSystemTests):
+    def setUp(self):
+        self.fs = filesystem_mock.MockFileSystem()
+        self.setup_generic_test_dir()
+
+    def tearDown(self):
+        self.teardown_generic_test_dir()
+        self.fs = None
+
+    def quick_check(self, test_fn, good_fn, *tests):
+        for test in tests:
+            if hasattr(test, '__iter__'):
+                expected = good_fn(*test)
+                actual = test_fn(*test)
+            else:
+                expected = good_fn(test)
+                actual = test_fn(test)
+            self.assertEquals(expected, actual, 'given %s, expected %s, got %s' % (repr(test), repr(expected), repr(actual)))
+
+    def test_join(self):
+        self.quick_check(self.fs.join,
+                         self.fs._slow_but_correct_join,
+                         ('',),
+                         ('', 'bar'),
+                         ('foo',),
+                         ('foo/',),
+                         ('foo', ''),
+                         ('foo/', ''),
+                         ('foo', 'bar'),
+                         ('foo', '/bar'),
+                         )
+
+    def test_normpath(self):
+        self.quick_check(self.fs.normpath,
+                         self.fs._slow_but_correct_normpath,
+                         '',
+                         '/',
+                         '.',
+                         '/.',
+                         'foo',
+                         'foo/',
+                         'foo/.',
+                         'foo/bar',
+                         '/foo',
+                         'foo/../bar',
+                         'foo/../bar/baz',
+                         '../foo')
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/Tools/Scripts/webkitpy/common/system/filesystem_unittest.py b/Tools/Scripts/webkitpy/common/system/filesystem_unittest.py
new file mode 100644
index 0000000..e6d1e42
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/system/filesystem_unittest.py
@@ -0,0 +1,260 @@
+# vim: set fileencoding=utf-8 :
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#    * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# NOTE: The fileencoding comment on the first line of the file is
+# important; without it, Python will choke while trying to parse the file,
+# since it includes non-ASCII characters.
+
+import os
+import stat
+import sys
+import tempfile
+import unittest
+
+from filesystem import FileSystem
+
+
+class GenericFileSystemTests(object):
+    """Tests that should pass on either a real or mock filesystem."""
+    def setup_generic_test_dir(self):
+        fs = self.fs
+        self.generic_test_dir = str(self.fs.mkdtemp())
+        self.orig_cwd = fs.getcwd()
+        fs.chdir(self.generic_test_dir)
+        fs.write_text_file('foo.txt', 'foo')
+        fs.write_text_file('foobar', 'foobar')
+        fs.maybe_make_directory('foodir')
+        fs.write_text_file(fs.join('foodir', 'baz'), 'baz')
+        fs.chdir(self.orig_cwd)
+
+    def teardown_generic_test_dir(self):
+        self.fs.rmtree(self.generic_test_dir)
+        self.fs.chdir(self.orig_cwd)
+        self.generic_test_dir = None
+
+    def test_glob__trailing_asterisk(self):
+        self.fs.chdir(self.generic_test_dir)
+        self.assertEquals(set(self.fs.glob('fo*')), set(['foo.txt', 'foobar', 'foodir']))
+
+    def test_glob__leading_asterisk(self):
+        self.fs.chdir(self.generic_test_dir)
+        self.assertEquals(set(self.fs.glob('*xt')), set(['foo.txt']))
+
+    def test_glob__middle_asterisk(self):
+        self.fs.chdir(self.generic_test_dir)
+        self.assertEquals(set(self.fs.glob('f*r')), set(['foobar', 'foodir']))
+
+    def test_glob__period_is_escaped(self):
+        self.fs.chdir(self.generic_test_dir)
+        self.assertEquals(set(self.fs.glob('foo.*')), set(['foo.txt']))
+
+class RealFileSystemTest(unittest.TestCase, GenericFileSystemTests):
+    def setUp(self):
+        self.fs = FileSystem()
+        self.setup_generic_test_dir()
+
+        self._this_dir = os.path.dirname(os.path.abspath(__file__))
+        self._missing_file = os.path.join(self._this_dir, 'missing_file.py')
+        self._this_file = os.path.join(self._this_dir, 'filesystem_unittest.py')
+
+    def tearDown(self):
+        self.teardown_generic_test_dir()
+        self.fs = None
+
+    def test_chdir(self):
+        fs = FileSystem()
+        cwd = fs.getcwd()
+        newdir = '/'
+        if sys.platform == 'win32':
+            newdir = 'c:\\'
+        fs.chdir(newdir)
+        self.assertEquals(fs.getcwd(), newdir)
+        fs.chdir(cwd)
+
+    def test_chdir__notexists(self):
+        fs = FileSystem()
+        newdir = '/dirdoesnotexist'
+        if sys.platform == 'win32':
+            newdir = 'c:\\dirdoesnotexist'
+        self.assertRaises(OSError, fs.chdir, newdir)
+
+    def test_exists__true(self):
+        fs = FileSystem()
+        self.assertTrue(fs.exists(self._this_file))
+
+    def test_exists__false(self):
+        fs = FileSystem()
+        self.assertFalse(fs.exists(self._missing_file))
+
+    def test_getcwd(self):
+        fs = FileSystem()
+        self.assertTrue(fs.exists(fs.getcwd()))
+
+    def test_isdir__true(self):
+        fs = FileSystem()
+        self.assertTrue(fs.isdir(self._this_dir))
+
+    def test_isdir__false(self):
+        fs = FileSystem()
+        self.assertFalse(fs.isdir(self._this_file))
+
+    def test_join(self):
+        fs = FileSystem()
+        self.assertEqual(fs.join('foo', 'bar'),
+                         os.path.join('foo', 'bar'))
+
+    def test_listdir(self):
+        fs = FileSystem()
+        with fs.mkdtemp(prefix='filesystem_unittest_') as d:
+            self.assertEqual(fs.listdir(d), [])
+            new_file = os.path.join(d, 'foo')
+            fs.write_text_file(new_file, u'foo')
+            self.assertEqual(fs.listdir(d), ['foo'])
+            os.remove(new_file)
+
+    def test_maybe_make_directory__success(self):
+        fs = FileSystem()
+
+        with fs.mkdtemp(prefix='filesystem_unittest_') as base_path:
+            sub_path = os.path.join(base_path, "newdir")
+            self.assertFalse(os.path.exists(sub_path))
+            self.assertFalse(fs.isdir(sub_path))
+
+            fs.maybe_make_directory(sub_path)
+            self.assertTrue(os.path.exists(sub_path))
+            self.assertTrue(fs.isdir(sub_path))
+
+            # Make sure we can re-create it.
+            fs.maybe_make_directory(sub_path)
+            self.assertTrue(os.path.exists(sub_path))
+            self.assertTrue(fs.isdir(sub_path))
+
+            # Clean up.
+            os.rmdir(sub_path)
+
+        self.assertFalse(os.path.exists(base_path))
+        self.assertFalse(fs.isdir(base_path))
+
+    def test_maybe_make_directory__failure(self):
+        # FIXME: os.chmod() doesn't work on Windows to set directories
+        # as readonly, so we skip this test for now.
+        if sys.platform in ('win32', 'cygwin'):
+            return
+
+        fs = FileSystem()
+        with fs.mkdtemp(prefix='filesystem_unittest_') as d:
+            # Remove write permissions on the parent directory.
+            os.chmod(d, stat.S_IRUSR)
+
+            # Now try to create a sub directory - should fail.
+            sub_dir = fs.join(d, 'subdir')
+            self.assertRaises(OSError, fs.maybe_make_directory, sub_dir)
+
+            # Clean up in case the test failed and we did create the
+            # directory.
+            if os.path.exists(sub_dir):
+                os.rmdir(sub_dir)
+
+    def test_read_and_write_text_file(self):
+        fs = FileSystem()
+        text_path = None
+
+        unicode_text_string = u'\u016An\u012Dc\u014Dde\u033D'
+        hex_equivalent = '\xC5\xAA\x6E\xC4\xAD\x63\xC5\x8D\x64\x65\xCC\xBD'
+        try:
+            text_path = tempfile.mktemp(prefix='tree_unittest_')
+            file = fs.open_text_file_for_writing(text_path)
+            file.write(unicode_text_string)
+            file.close()
+
+            file = fs.open_text_file_for_reading(text_path)
+            read_text = file.read()
+            file.close()
+
+            self.assertEqual(read_text, unicode_text_string)
+        finally:
+            if text_path and fs.isfile(text_path):
+                os.remove(text_path)
+
+    def test_read_and_write_file(self):
+        fs = FileSystem()
+        text_path = None
+        binary_path = None
+
+        unicode_text_string = u'\u016An\u012Dc\u014Dde\u033D'
+        hex_equivalent = '\xC5\xAA\x6E\xC4\xAD\x63\xC5\x8D\x64\x65\xCC\xBD'
+        try:
+            text_path = tempfile.mktemp(prefix='tree_unittest_')
+            binary_path = tempfile.mktemp(prefix='tree_unittest_')
+            fs.write_text_file(text_path, unicode_text_string)
+            contents = fs.read_binary_file(text_path)
+            self.assertEqual(contents, hex_equivalent)
+
+            fs.write_binary_file(binary_path, hex_equivalent)
+            text_contents = fs.read_text_file(binary_path)
+            self.assertEqual(text_contents, unicode_text_string)
+        finally:
+            if text_path and fs.isfile(text_path):
+                os.remove(text_path)
+            if binary_path and fs.isfile(binary_path):
+                os.remove(binary_path)
+
+    def test_read_binary_file__missing(self):
+        fs = FileSystem()
+        self.assertRaises(IOError, fs.read_binary_file, self._missing_file)
+
+    def test_read_text_file__missing(self):
+        fs = FileSystem()
+        self.assertRaises(IOError, fs.read_text_file, self._missing_file)
+
+    def test_remove_file_with_retry(self):
+        RealFileSystemTest._remove_failures = 2
+
+        def remove_with_exception(filename):
+            RealFileSystemTest._remove_failures -= 1
+            if RealFileSystemTest._remove_failures >= 0:
+                try:
+                    raise WindowsError
+                except NameError:
+                    raise FileSystem._WindowsError
+
+        fs = FileSystem()
+        self.assertTrue(fs.remove('filename', remove_with_exception))
+        self.assertEquals(-1, RealFileSystemTest._remove_failures)
+
+    def test_sep(self):
+        fs = FileSystem()
+
+        self.assertEquals(fs.sep, os.sep)
+        self.assertEquals(fs.join("foo", "bar"),
+                          os.path.join("foo", "bar"))
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/Tools/Scripts/webkitpy/common/system/logtesting.py b/Tools/Scripts/webkitpy/common/system/logtesting.py
new file mode 100644
index 0000000..e361cb5
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/system/logtesting.py
@@ -0,0 +1,258 @@
+# Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org)
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1.  Redistributions of source code must retain the above copyright
+#     notice, this list of conditions and the following disclaimer.
+# 2.  Redistributions in binary form must reproduce the above copyright
+#     notice, this list of conditions and the following disclaimer in the
+#     documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
+# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Supports the unit-testing of logging code.
+
+Provides support for unit-testing messages logged using the built-in
+logging module.
+
+Inherit from the LoggingTestCase class for basic testing needs.  For
+more advanced needs (e.g. unit-testing methods that configure logging),
+see the TestLogStream class, and perhaps also the LogTesting class.
+
+"""
+
+import logging
+import unittest
+
+
+class TestLogStream(object):
+
+    """Represents a file-like object for unit-testing logging.
+
+    This is meant for passing to the logging.StreamHandler constructor.
+    Log messages captured by instances of this object can be tested
+    using self.assertMessages() below.
+
+    """
+
+    def __init__(self, test_case):
+        """Create an instance.
+
+        Args:
+          test_case: A unittest.TestCase instance.
+
+        """
+        self._test_case = test_case
+        self.messages = []
+        """A list of log messages written to the stream."""
+
+    # Python documentation says that any object passed to the StreamHandler
+    # constructor should support write() and flush():
+    #
+    # http://docs.python.org/library/logging.html#module-logging.handlers
+    def write(self, message):
+        self.messages.append(message)
+
+    def flush(self):
+        pass
+
+    def assertMessages(self, messages):
+        """Assert that the given messages match the logged messages.
+
+        messages: A list of log message strings.
+
+        """
+        self._test_case.assertEquals(messages, self.messages)
+
+
+class LogTesting(object):
+
+    """Supports end-to-end unit-testing of log messages.
+
+        Sample usage:
+
+          class SampleTest(unittest.TestCase):
+
+              def setUp(self):
+                  self._log = LogTesting.setUp(self)  # Turn logging on.
+
+              def tearDown(self):
+                  self._log.tearDown()  # Turn off and reset logging.
+
+              def test_logging_in_some_method(self):
+                  call_some_method()  # Contains calls to _log.info(), etc.
+
+                  # Check the resulting log messages.
+                  self._log.assertMessages(["INFO: expected message #1",
+                                          "WARNING: expected message #2"])
+
+    """
+
+    def __init__(self, test_stream, handler):
+        """Create an instance.
+
+        This method should never be called directly.  Instances should
+        instead be created using the static setUp() method.
+
+        Args:
+          test_stream: A TestLogStream instance.
+          handler: The handler added to the logger.
+
+        """
+        self._test_stream = test_stream
+        self._handler = handler
+
+    @staticmethod
+    def _getLogger():
+        """Return the logger being tested."""
+        # It is possible we might want to return something other than
+        # the root logger in some special situation.  For now, the
+        # root logger seems to suffice.
+        return logging.getLogger()
+
+    @staticmethod
+    def setUp(test_case, logging_level=logging.INFO):
+        """Configure logging for unit testing.
+
+        Configures the root logger to log to a testing log stream.
+        Only messages logged at or above the given level are logged
+        to the stream.  Messages logged to the stream are formatted
+        in the following way, for example--
+
+        "INFO: This is a test log message."
+
+        This method should normally be called in the setUp() method
+        of a unittest.TestCase.  See the docstring of this class
+        for more details.
+
+        Returns:
+          A LogTesting instance.
+
+        Args:
+          test_case: A unittest.TestCase instance.
+          logging_level: An integer logging level that is the minimum level
+                         of log messages you would like to test.
+
+        """
+        stream = TestLogStream(test_case)
+        handler = logging.StreamHandler(stream)
+        handler.setLevel(logging_level)
+        formatter = logging.Formatter("%(levelname)s: %(message)s")
+        handler.setFormatter(formatter)
+
+        # Notice that we only change the root logger by adding a handler
+        # to it.  In particular, we do not reset its level using
+        # logger.setLevel().  This ensures that we have not interfered
+        # with how the code being tested may have configured the root
+        # logger.
+        logger = LogTesting._getLogger()
+        logger.addHandler(handler)
+
+        return LogTesting(stream, handler)
+
+    def tearDown(self):
+        """Assert there are no remaining log messages, and reset logging.
+
+        This method asserts that there are no more messages in the array of
+        log messages, and then restores logging to its original state.
+        This method should normally be called in the tearDown() method of a
+        unittest.TestCase.  See the docstring of this class for more details.
+
+        """
+        self.assertMessages([])
+        logger = LogTesting._getLogger()
+        logger.removeHandler(self._handler)
+
+    def messages(self):
+        """Return the current list of log messages."""
+        return self._test_stream.messages
+
+    # FIXME: Add a clearMessages() method for cases where the caller
+    #        deliberately doesn't want to assert every message.
+
+    # We clear the log messages after asserting since they are no longer
+    # needed after asserting.  This serves two purposes: (1) it simplifies
+    # the calling code when we want to check multiple logging calls in a
+    # single test method, and (2) it lets us check in the tearDown() method
+    # that there are no remaining log messages to be asserted.
+    #
+    # The latter ensures that no extra log messages are getting logged that
+    # the caller might not be aware of or may have forgotten to check for.
+    # This gets us a bit more mileage out of our tests without writing any
+    # additional code.
+    def assertMessages(self, messages):
+        """Assert the current array of log messages, and clear its contents.
+
+        Args:
+          messages: A list of log message strings.
+
+        """
+        try:
+            self._test_stream.assertMessages(messages)
+        finally:
+            # We want to clear the array of messages even in the case of
+            # an Exception (e.g. an AssertionError).  Otherwise, another
+            # AssertionError can occur in the tearDown() because the
+            # array might not have gotten emptied.
+            self._test_stream.messages = []
+
+
+# This class needs to inherit from unittest.TestCase.  Otherwise, the
+# setUp() and tearDown() methods will not get fired for test case classes
+# that inherit from this class -- even if the class inherits from *both*
+# unittest.TestCase and LoggingTestCase.
+#
+# FIXME: Rename this class to LoggingTestCaseBase to be sure that
+#        the unittest module does not interpret this class as a unittest
+#        test case itself.
+class LoggingTestCase(unittest.TestCase):
+
+    """Supports end-to-end unit-testing of log messages.
+
+        Sample usage:
+
+          class SampleTest(LoggingTestCase):
+
+              def test_logging_in_some_method(self):
+                  call_some_method()  # Contains calls to _log.info(), etc.
+
+                  # Check the resulting log messages.
+                  self.assertLog(["INFO: expected message #1",
+                                  "WARNING: expected message #2"])
+
+    """
+
+    def setUp(self):
+        self._log = LogTesting.setUp(self)
+
+    def tearDown(self):
+        self._log.tearDown()
+
+    def logMessages(self):
+        """Return the current list of log messages."""
+        return self._log.messages()
+
+    # FIXME: Add a clearMessages() method for cases where the caller
+    #        deliberately doesn't want to assert every message.
+
+    # See the code comments preceding LogTesting.assertMessages() for
+    # an explanation of why we clear the array of messages after
+    # asserting its contents.
+    def assertLog(self, messages):
+        """Assert the current array of log messages, and clear its contents.
+
+        Args:
+          messages: A list of log message strings.
+
+        """
+        self._log.assertMessages(messages)
diff --git a/Tools/Scripts/webkitpy/common/system/logutils.py b/Tools/Scripts/webkitpy/common/system/logutils.py
new file mode 100644
index 0000000..def3bec
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/system/logutils.py
@@ -0,0 +1,211 @@
+# Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org)
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1.  Redistributions of source code must retain the above copyright
+#     notice, this list of conditions and the following disclaimer.
+# 2.  Redistributions in binary form must reproduce the above copyright
+#     notice, this list of conditions and the following disclaimer in the
+#     documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
+# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Supports webkitpy logging."""
+
+# FIXME: Move this file to webkitpy/python24 since logging needs to
+#        be configured prior to running version-checking code.
+
+import logging
+import os
+import sys
+
+import webkitpy
+
+
+_log = logging.getLogger(__name__)
+
+# We set these directory paths lazily in get_logger() below.
+_scripts_dir = ""
+"""The normalized, absolute path to the ...Scripts directory."""
+
+_webkitpy_dir = ""
+"""The normalized, absolute path to the ...Scripts/webkitpy directory."""
+
+
+def _normalize_path(path):
+    """Return the given path normalized.
+
+    Converts a path to an absolute path, removes any trailing slashes,
+    removes any extension, and lower-cases it.
+
+    """
+    path = os.path.abspath(path)
+    path = os.path.normpath(path)
+    path = os.path.splitext(path)[0]  # Remove the extension, if any.
+    path = path.lower()
+
+    return path
+
+
+# Observe that the implementation of this function does not require
+# the use of any hard-coded strings like "webkitpy", etc.
+#
+# The main benefit this function has over using--
+#
+# _log = logging.getLogger(__name__)
+#
+# is that get_logger() returns the same value even if __name__ is
+# "__main__" -- i.e. even if the module is the script being executed
+# from the command-line.
+def get_logger(path):
+    """Return a logging.logger for the given path.
+
+    Returns:
+      A logger whose name is the name of the module corresponding to
+      the given path.  If the module is in webkitpy, the name is
+      the fully-qualified dotted module name beginning with webkitpy....
+      Otherwise, the name is the base name of the module (i.e. without
+      any dotted module name prefix).
+
+    Args:
+      path: The path of the module.  Normally, this parameter should be
+            the __file__ variable of the module.
+
+    Sample usage:
+
+      from webkitpy.common.system import logutils
+
+      _log = logutils.get_logger(__file__)
+
+    """
+    # Since we assign to _scripts_dir and _webkitpy_dir in this function,
+    # we need to declare them global.
+    global _scripts_dir
+    global _webkitpy_dir
+
+    path = _normalize_path(path)
+
+    # Lazily evaluate _webkitpy_dir and _scripts_dir.
+    if not _scripts_dir:
+        # The normalized, absolute path to ...Scripts/webkitpy/__init__.
+        webkitpy_path = _normalize_path(webkitpy.__file__)
+
+        _webkitpy_dir = os.path.split(webkitpy_path)[0]
+        _scripts_dir = os.path.split(_webkitpy_dir)[0]
+
+    if path.startswith(_webkitpy_dir):
+        # Remove the initial Scripts directory portion, so the path
+        # starts with /webkitpy, for example "/webkitpy/init/logutils".
+        path = path[len(_scripts_dir):]
+
+        parts = []
+        while True:
+            (path, tail) = os.path.split(path)
+            if not tail:
+                break
+            parts.insert(0, tail)
+
+        logger_name = ".".join(parts)  # For example, webkitpy.common.system.logutils.
+    else:
+        # The path is outside of webkitpy.  Default to the basename
+        # without the extension.
+        basename = os.path.basename(path)
+        logger_name = os.path.splitext(basename)[0]
+
+    return logging.getLogger(logger_name)
+
+
+def _default_handlers(stream, logging_level):
+    """Return a list of the default logging handlers to use.
+
+    Args:
+      stream: See the configure_logging() docstring.
+
+    """
+    # Create the filter.
+    def should_log(record):
+        """Return whether a logging.LogRecord should be logged."""
+        # FIXME: Enable the logging of autoinstall messages once
+        #        autoinstall is adjusted.  Currently, autoinstall logs
+        #        INFO messages when importing already-downloaded packages,
+        #        which is too verbose.
+        if record.name.startswith("webkitpy.thirdparty.autoinstall"):
+            return False
+        return True
+
+    logging_filter = logging.Filter()
+    logging_filter.filter = should_log
+
+    # Create the handler.
+    handler = logging.StreamHandler(stream)
+    if logging_level == logging.DEBUG:
+        formatter = logging.Formatter("%(name)s: [%(levelname)s] %(message)s")
+    else:
+        formatter = logging.Formatter("%(message)s")
+
+    handler.setFormatter(formatter)
+    handler.addFilter(logging_filter)
+
+    return [handler]
+
+
+def configure_logging(logging_level=None, logger=None, stream=None,
+                      handlers=None):
+    """Configure logging for standard purposes.
+
+    Returns:
+      A list of references to the logging handlers added to the root
+      logger.  This allows the caller to later remove the handlers
+      using logger.removeHandler.  This is useful primarily during unit
+      testing where the caller may want to configure logging temporarily
+      and then undo the configuring.
+
+    Args:
+      logging_level: The minimum logging level to log.  Defaults to
+                     logging.INFO.
+      logger: A logging.logger instance to configure.  This parameter
+              should be used only in unit tests.  Defaults to the
+              root logger.
+      stream: A file-like object to which to log used in creating the default
+              handlers.  The stream must define an "encoding" data attribute,
+              or else logging raises an error.  Defaults to sys.stderr.
+      handlers: A list of logging.Handler instances to add to the logger
+                being configured.  If this parameter is provided, then the
+                stream parameter is not used.
+
+    """
+    # If the stream does not define an "encoding" data attribute, the
+    # logging module can throw an error like the following:
+    #
+    # Traceback (most recent call last):
+    #   File "/System/Library/Frameworks/Python.framework/Versions/2.6/...
+    #         lib/python2.6/logging/__init__.py", line 761, in emit
+    #     self.stream.write(fs % msg.encode(self.stream.encoding))
+    # LookupError: unknown encoding: unknown
+    if logging_level is None:
+        logging_level = logging.INFO
+    if logger is None:
+        logger = logging.getLogger()
+    if stream is None:
+        stream = sys.stderr
+    if handlers is None:
+        handlers = _default_handlers(stream, logging_level)
+
+    logger.setLevel(logging_level)
+
+    for handler in handlers:
+        logger.addHandler(handler)
+
+    _log.debug("Debug logging enabled.")
+
+    return handlers
diff --git a/Tools/Scripts/webkitpy/common/system/logutils_unittest.py b/Tools/Scripts/webkitpy/common/system/logutils_unittest.py
new file mode 100644
index 0000000..72789eb
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/system/logutils_unittest.py
@@ -0,0 +1,158 @@
+# Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org)
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1.  Redistributions of source code must retain the above copyright
+#     notice, this list of conditions and the following disclaimer.
+# 2.  Redistributions in binary form must reproduce the above copyright
+#     notice, this list of conditions and the following disclaimer in the
+#     documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
+# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Unit tests for logutils.py."""
+
+import logging
+import os
+import unittest
+
+from webkitpy.common.system.logtesting import LogTesting
+from webkitpy.common.system.logtesting import TestLogStream
+from webkitpy.common.system import logutils
+
+
+class GetLoggerTest(unittest.TestCase):
+
+    """Tests get_logger()."""
+
+    def test_get_logger_in_webkitpy(self):
+        logger = logutils.get_logger(__file__)
+        self.assertEquals(logger.name, "webkitpy.common.system.logutils_unittest")
+
+    def test_get_logger_not_in_webkitpy(self):
+        # Temporarily change the working directory so that we
+        # can test get_logger() for a path outside of webkitpy.
+        working_directory = os.getcwd()
+        root_dir = "/"
+        os.chdir(root_dir)
+
+        logger = logutils.get_logger("/Tools/Scripts/test-webkitpy")
+        self.assertEquals(logger.name, "test-webkitpy")
+
+        logger = logutils.get_logger("/Tools/Scripts/test-webkitpy.py")
+        self.assertEquals(logger.name, "test-webkitpy")
+
+        os.chdir(working_directory)
+
+
+class ConfigureLoggingTestBase(unittest.TestCase):
+
+    """Base class for configure_logging() unit tests."""
+
+    def _logging_level(self):
+        raise Exception("Not implemented.")
+
+    def setUp(self):
+        log_stream = TestLogStream(self)
+
+        # Use a logger other than the root logger or one prefixed with
+        # "webkitpy." so as not to conflict with test-webkitpy logging.
+        logger = logging.getLogger("unittest")
+
+        # Configure the test logger not to pass messages along to the
+        # root logger.  This prevents test messages from being
+        # propagated to loggers used by test-webkitpy logging (e.g.
+        # the root logger).
+        logger.propagate = False
+
+        logging_level = self._logging_level()
+        self._handlers = logutils.configure_logging(logging_level=logging_level,
+                                                    logger=logger,
+                                                    stream=log_stream)
+        self._log = logger
+        self._log_stream = log_stream
+
+    def tearDown(self):
+        """Reset logging to its original state.
+
+        This method ensures that the logging configuration set up
+        for a unit test does not affect logging in other unit tests.
+
+        """
+        logger = self._log
+        for handler in self._handlers:
+            logger.removeHandler(handler)
+
+    def _assert_log_messages(self, messages):
+        """Assert that the logged messages equal the given messages."""
+        self._log_stream.assertMessages(messages)
+
+
+class ConfigureLoggingTest(ConfigureLoggingTestBase):
+
+    """Tests configure_logging() with the default logging level."""
+
+    def _logging_level(self):
+        return None
+
+    def test_info_message(self):
+        self._log.info("test message")
+        self._assert_log_messages(["test message\n"])
+
+    def test_debug_message(self):
+        self._log.debug("test message")
+        self._assert_log_messages([])
+
+    def test_below_threshold_message(self):
+        # We test the boundary case of a logging level equal to 19.
+        # In practice, we will probably only be calling log.debug(),
+        # which corresponds to a logging level of 10.
+        level = logging.INFO - 1  # Equals 19.
+        self._log.log(level, "test message")
+        self._assert_log_messages([])
+
+    def test_two_messages(self):
+        self._log.info("message1")
+        self._log.info("message2")
+        self._assert_log_messages(["message1\n",
+                                   "message2\n"])
+
+
+class ConfigureLoggingVerboseTest(ConfigureLoggingTestBase):
+    def _logging_level(self):
+        return logging.DEBUG
+
+    def test_info_message(self):
+        self._log.info("test message")
+        self._assert_log_messages(["unittest: [INFO] test message\n"])
+
+    def test_debug_message(self):
+        self._log.debug("test message")
+        self._assert_log_messages(["unittest: [DEBUG] test message\n"])
+
+class ConfigureLoggingCustomLevelTest(ConfigureLoggingTestBase):
+
+    """Tests configure_logging() with a custom logging level."""
+
+    _level = 36
+
+    def _logging_level(self):
+        return self._level
+
+    def test_logged_message(self):
+        self._log.log(self._level, "test message")
+        self._assert_log_messages(["test message\n"])
+
+    def test_below_threshold_message(self):
+        self._log.log(self._level - 1, "test message")
+        self._assert_log_messages([])
diff --git a/Tools/Scripts/webkitpy/common/system/outputcapture.py b/Tools/Scripts/webkitpy/common/system/outputcapture.py
new file mode 100644
index 0000000..78a12f0
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/system/outputcapture.py
@@ -0,0 +1,121 @@
+# Copyright (c) 2009, Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# Class for unittest support.  Used for capturing stderr/stdout.
+
+import logging
+import sys
+import unittest
+from StringIO import StringIO
+
+
+class OutputCapture(object):
+    # By default we capture the output to a stream. Other modules may override
+    # this function in order to do things like pass through the output. See
+    # webkitpy.test.main for an example.
+    @staticmethod
+    def stream_wrapper(stream):
+        return StringIO()
+
+    def __init__(self):
+        self.saved_outputs = dict()
+        self._log_level = logging.INFO
+
+    def set_log_level(self, log_level):
+        self._log_level = log_level
+        if hasattr(self, '_logs_handler'):
+            self._logs_handler.setLevel(self._log_level)
+
+    def _capture_output_with_name(self, output_name):
+        stream = getattr(sys, output_name)
+        captured_output = self.stream_wrapper(stream)
+        self.saved_outputs[output_name] = stream
+        setattr(sys, output_name, captured_output)
+        return captured_output
+
+    def _restore_output_with_name(self, output_name):
+        captured_output = getattr(sys, output_name).getvalue()
+        setattr(sys, output_name, self.saved_outputs[output_name])
+        del self.saved_outputs[output_name]
+        return captured_output
+
+    def capture_output(self):
+        self._logs = StringIO()
+        self._logs_handler = logging.StreamHandler(self._logs)
+        self._logs_handler.setLevel(self._log_level)
+        self._logger = logging.getLogger()
+        self._orig_log_level = self._logger.level
+        self._logger.addHandler(self._logs_handler)
+        self._logger.setLevel(min(self._log_level, self._orig_log_level))
+        return (self._capture_output_with_name("stdout"), self._capture_output_with_name("stderr"))
+
+    def restore_output(self):
+        self._logger.removeHandler(self._logs_handler)
+        self._logger.setLevel(self._orig_log_level)
+        self._logs_handler.flush()
+        self._logs.flush()
+        logs_string = self._logs.getvalue()
+        delattr(self, '_logs_handler')
+        delattr(self, '_logs')
+        return (self._restore_output_with_name("stdout"), self._restore_output_with_name("stderr"), logs_string)
+
+    def assert_outputs(self, testcase, function, args=[], kwargs={}, expected_stdout="", expected_stderr="", expected_exception=None, expected_logs=None):
+        self.capture_output()
+        try:
+            if expected_exception:
+                return_value = testcase.assertRaises(expected_exception, function, *args, **kwargs)
+            else:
+                return_value = function(*args, **kwargs)
+        finally:
+            (stdout_string, stderr_string, logs_string) = self.restore_output()
+
+        testcase.assertEqual(stdout_string, expected_stdout)
+        testcase.assertEqual(stderr_string, expected_stderr)
+        if expected_logs is not None:
+            testcase.assertEqual(logs_string, expected_logs)
+        # This is a little strange, but I don't know where else to return this information.
+        return return_value
+
+
+class OutputCaptureTestCaseBase(unittest.TestCase):
+    def setUp(self):
+        unittest.TestCase.setUp(self)
+        self.output_capture = OutputCapture()
+        (self.__captured_stdout, self.__captured_stderr) = self.output_capture.capture_output()
+
+    def tearDown(self):
+        del self.__captured_stdout
+        del self.__captured_stderr
+        self.output_capture.restore_output()
+        unittest.TestCase.tearDown(self)
+
+    def assertStdout(self, expected_stdout):
+        self.assertEquals(expected_stdout, self.__captured_stdout.getvalue())
+
+    def assertStderr(self, expected_stderr):
+        self.assertEquals(expected_stderr, self.__captured_stderr.getvalue())
diff --git a/Tools/Scripts/webkitpy/common/system/outputcapture_unittest.py b/Tools/Scripts/webkitpy/common/system/outputcapture_unittest.py
new file mode 100644
index 0000000..da4347c
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/system/outputcapture_unittest.py
@@ -0,0 +1,59 @@
+# Copyright (C) 2011 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1.  Redistributions of source code must retain the above copyright
+#     notice, this list of conditions and the following disclaimer.
+# 2.  Redistributions in binary form must reproduce the above copyright
+#     notice, this list of conditions and the following disclaimer in the
+#     documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
+# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import logging
+import unittest
+
+from webkitpy.common.system.outputcapture import OutputCapture
+
+
+_log = logging.getLogger(__name__)
+
+
+class OutputCaptureTest(unittest.TestCase):
+    def setUp(self):
+        self.output = OutputCapture()
+
+    def log_all_levels(self):
+        _log.info('INFO')
+        _log.warning('WARN')
+        _log.error('ERROR')
+        _log.critical('CRITICAL')
+
+    def assertLogged(self, expected_logs):
+        actual_stdout, actual_stderr, actual_logs = self.output.restore_output()
+        self.assertEqual('', actual_stdout)
+        self.assertEqual('', actual_stderr)
+        self.assertEqual(expected_logs, actual_logs)
+
+    def test_initial_log_level(self):
+        self.output.capture_output()
+        self.log_all_levels()
+        self.assertLogged('INFO\nWARN\nERROR\nCRITICAL\n')
+
+    def test_set_log_level(self):
+        self.output.set_log_level(logging.ERROR)
+        self.output.capture_output()
+        self.log_all_levels()
+        self.output.set_log_level(logging.WARN)
+        self.log_all_levels()
+        self.assertLogged('ERROR\nCRITICAL\nWARN\nERROR\nCRITICAL\n')
diff --git a/Tools/Scripts/webkitpy/common/system/path.py b/Tools/Scripts/webkitpy/common/system/path.py
new file mode 100644
index 0000000..e5a66bf
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/system/path.py
@@ -0,0 +1,135 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""generic routines to convert platform-specific paths to URIs."""
+
+import atexit
+import subprocess
+import sys
+import threading
+import urllib
+
+
+def abspath_to_uri(platform, path):
+    """Converts a platform-specific absolute path to a file: URL."""
+    return "file:" + _escape(_convert_path(platform, path))
+
+
+def cygpath(path):
+    """Converts an absolute cygwin path to an absolute Windows path."""
+    return _CygPath.convert_using_singleton(path)
+
+
+# Note that this object is not threadsafe and must only be called
+# from multiple threads under protection of a lock (as is done in cygpath())
+class _CygPath(object):
+    """Manages a long-running 'cygpath' process for file conversion."""
+    _lock = None
+    _singleton = None
+
+    @staticmethod
+    def stop_cygpath_subprocess():
+        if not _CygPath._lock:
+            return
+
+        with _CygPath._lock:
+            if _CygPath._singleton:
+                _CygPath._singleton.stop()
+
+    @staticmethod
+    def convert_using_singleton(path):
+        if not _CygPath._lock:
+            _CygPath._lock = threading.Lock()
+
+        with _CygPath._lock:
+            if not _CygPath._singleton:
+                _CygPath._singleton = _CygPath()
+                # Make sure the cygpath subprocess always gets shutdown cleanly.
+                atexit.register(_CygPath.stop_cygpath_subprocess)
+
+            return _CygPath._singleton.convert(path)
+
+    def __init__(self):
+        self._child_process = None
+
+    def start(self):
+        assert(self._child_process is None)
+        args = ['cygpath', '-f', '-', '-wa']
+        self._child_process = subprocess.Popen(args,
+                                               stdin=subprocess.PIPE,
+                                               stdout=subprocess.PIPE)
+
+    def is_running(self):
+        if not self._child_process:
+            return False
+        return self._child_process.returncode is None
+
+    def stop(self):
+        if self._child_process:
+            self._child_process.stdin.close()
+            self._child_process.wait()
+        self._child_process = None
+
+    def convert(self, path):
+        if not self.is_running():
+            self.start()
+        self._child_process.stdin.write("%s\r\n" % path)
+        self._child_process.stdin.flush()
+        windows_path = self._child_process.stdout.readline().rstrip()
+        # Some versions of cygpath use lowercase drive letters while others
+        # use uppercase. We always convert to uppercase for consistency.
+        windows_path = '%s%s' % (windows_path[0].upper(), windows_path[1:])
+        return windows_path
+
+
+def _escape(path):
+    """Handle any characters in the path that should be escaped."""
+    # FIXME: web browsers don't appear to blindly quote every character
+    # when converting filenames to files. Instead of using urllib's default
+    # rules, we allow a small list of other characters through un-escaped.
+    # It's unclear if this is the best possible solution.
+    return urllib.quote(path, safe='/+:')
+
+
+def _convert_path(platform, path):
+    """Handles any os-specific path separators, mappings, etc."""
+    if platform.is_cygwin():
+        return _winpath_to_uri(cygpath(path))
+    if platform.is_win():
+        return _winpath_to_uri(path)
+    return _unixypath_to_uri(path)
+
+
+def _winpath_to_uri(path):
+    """Converts a window absolute path to a file: URL."""
+    return "///" + path.replace("\\", "/")
+
+
+def _unixypath_to_uri(path):
+    """Converts a unix-style path to a file: URL."""
+    return "//" + path
diff --git a/Tools/Scripts/webkitpy/common/system/path_unittest.py b/Tools/Scripts/webkitpy/common/system/path_unittest.py
new file mode 100644
index 0000000..954d32d
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/system/path_unittest.py
@@ -0,0 +1,80 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#    * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+import sys
+
+from webkitpy.common.system.systemhost import SystemHost
+from webkitpy.common.system.platforminfo import PlatformInfo
+from webkitpy.common.system.platforminfo_mock import MockPlatformInfo
+from webkitpy.common.system import path
+
+class AbspathTest(unittest.TestCase):
+    def platforminfo(self):
+        return SystemHost().platform
+
+    def test_abspath_to_uri_cygwin(self):
+        if sys.platform != 'cygwin':
+            return
+        self.assertEquals(path.abspath_to_uri(self.platforminfo(), '/cygdrive/c/foo/bar.html'),
+                          'file:///C:/foo/bar.html')
+
+    def test_abspath_to_uri_unixy(self):
+        self.assertEquals(path.abspath_to_uri(MockPlatformInfo(), "/foo/bar.html"),
+                          'file:///foo/bar.html')
+
+    def test_abspath_to_uri_win(self):
+        if sys.platform != 'win32':
+            return
+        self.assertEquals(path.abspath_to_uri(self.platforminfo(), 'c:\\foo\\bar.html'),
+                         'file:///c:/foo/bar.html')
+
+    def test_abspath_to_uri_escaping_unixy(self):
+        self.assertEquals(path.abspath_to_uri(MockPlatformInfo(), '/foo/bar + baz%?.html'),
+                         'file:///foo/bar%20+%20baz%25%3F.html')
+
+        # Note that you can't have '?' in a filename on windows.
+    def test_abspath_to_uri_escaping_cygwin(self):
+        if sys.platform != 'cygwin':
+            return
+        self.assertEquals(path.abspath_to_uri(self.platforminfo(), '/cygdrive/c/foo/bar + baz%.html'),
+                          'file:///C:/foo/bar%20+%20baz%25.html')
+
+    def test_stop_cygpath_subprocess(self):
+        if sys.platform != 'cygwin':
+            return
+
+        # Call cygpath to ensure the subprocess is running.
+        path.cygpath("/cygdrive/c/foo.txt")
+        self.assertTrue(path._CygPath._singleton.is_running())
+
+        # Stop it.
+        path._CygPath.stop_cygpath_subprocess()
+
+        # Ensure that it is stopped.
+        self.assertFalse(path._CygPath._singleton.is_running())
diff --git a/Tools/Scripts/webkitpy/common/system/platforminfo.py b/Tools/Scripts/webkitpy/common/system/platforminfo.py
new file mode 100644
index 0000000..b2451f5
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/system/platforminfo.py
@@ -0,0 +1,161 @@
+# Copyright (c) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import re
+import sys
+
+
+class PlatformInfo(object):
+    """This class provides a consistent (and mockable) interpretation of
+    system-specific values (like sys.platform and platform.mac_ver())
+    to be used by the rest of the webkitpy code base.
+
+    Public (static) properties:
+    -- os_name
+    -- os_version
+
+    Note that 'future' is returned for os_version if the operating system is
+    newer than one known to the code.
+    """
+
+    def __init__(self, sys_module, platform_module, executive):
+        self._executive = executive
+        self._platform_module = platform_module
+        self.os_name = self._determine_os_name(sys_module.platform)
+        if self.os_name == 'linux':
+            self.os_version = self._determine_linux_version()
+        if self.os_name == 'freebsd':
+            self.os_version = platform_module.release()
+        if self.os_name.startswith('mac'):
+            self.os_version = self._determine_mac_version(platform_module.mac_ver()[0])
+        if self.os_name.startswith('win'):
+            self.os_version = self._determine_win_version(self._win_version_tuple(sys_module))
+        self._is_cygwin = sys_module.platform == 'cygwin'
+
+    def is_mac(self):
+        return self.os_name == 'mac'
+
+    def is_win(self):
+        return self.os_name == 'win'
+
+    def is_cygwin(self):
+        return self._is_cygwin
+
+    def is_linux(self):
+        return self.os_name == 'linux'
+
+    def is_freebsd(self):
+        return self.os_name == 'freebsd'
+
+    def display_name(self):
+        # platform.platform() returns Darwin information for Mac, which is just confusing.
+        if self.is_mac():
+            return "Mac OS X %s" % self._platform_module.mac_ver()[0]
+
+        # Returns strings like:
+        # Linux-2.6.18-194.3.1.el5-i686-with-redhat-5.5-Final
+        # Windows-2008ServerR2-6.1.7600
+        return self._platform_module.platform()
+
+    def total_bytes_memory(self):
+        if self.is_mac():
+            return long(self._executive.run_command(["sysctl", "-n", "hw.memsize"]))
+        return None
+
+    def terminal_width(self):
+        """Returns sys.maxint if the width cannot be determined."""
+        try:
+            if self.is_win():
+                # From http://code.activestate.com/recipes/440694-determine-size-of-console-window-on-windows/
+                from ctypes import windll, create_string_buffer
+                handle = windll.kernel32.GetStdHandle(-12)  # -12 == stderr
+                console_screen_buffer_info = create_string_buffer(22)  # 22 == sizeof(console_screen_buffer_info)
+                if windll.kernel32.GetConsoleScreenBufferInfo(handle, console_screen_buffer_info):
+                    import struct
+                    _, _, _, _, _, left, _, right, _, _, _ = struct.unpack("hhhhHhhhhhh", console_screen_buffer_info.raw)
+                    # Note that we return 1 less than the width since writing into the rightmost column
+                    # automatically performs a line feed.
+                    return right - left
+                return sys.maxint
+            else:
+                import fcntl
+                import struct
+                import termios
+                packed = fcntl.ioctl(sys.stderr.fileno(), termios.TIOCGWINSZ, '\0' * 8)
+                _, columns, _, _ = struct.unpack('HHHH', packed)
+                return columns
+        except:
+            return sys.maxint
+
+    def _determine_os_name(self, sys_platform):
+        if sys_platform == 'darwin':
+            return 'mac'
+        if sys_platform.startswith('linux'):
+            return 'linux'
+        if sys_platform in ('win32', 'cygwin'):
+            return 'win'
+        if sys_platform.startswith('freebsd'):
+            return 'freebsd'
+        raise AssertionError('unrecognized platform string "%s"' % sys_platform)
+
+    def _determine_mac_version(self, mac_version_string):
+        release_version = mac_version_string.split('.')[1]
+        version_strings = {
+            '5': 'leopard',
+            '6': 'snowleopard',
+            '7': 'lion',
+            '8': 'mountainlion',
+        }
+        assert release_version >= min(version_strings.keys())
+        return version_strings.get(release_version, 'future')
+
+    def _determine_linux_version(self):
+        # FIXME: we ignore whatever the real version is and pretend it's lucid for now.
+        return 'lucid'
+
+    def _determine_win_version(self, win_version_tuple):
+        if win_version_tuple[:3] == (6, 1, 7600):
+            return '7sp0'
+        if win_version_tuple[:2] == (6, 0):
+            return 'vista'
+        if win_version_tuple[:2] == (5, 1):
+            return 'xp'
+        assert win_version_tuple[0] > 6 or win_version_tuple[1] >= 1, 'Unrecognized Windows version tuple: "%s"' % (win_version_tuple,)
+        return 'future'
+
+    def _win_version_tuple(self, sys_module):
+        if hasattr(sys_module, 'getwindowsversion'):
+            return sys_module.getwindowsversion()
+        return self._win_version_tuple_from_cmd()
+
+    def _win_version_tuple_from_cmd(self):
+        # Note that this should only ever be called on windows, so this should always work.
+        ver_output = self._executive.run_command(['cmd', '/c', 'ver'])
+        match_object = re.search(r'(?P<major>\d)\.(?P<minor>\d)\.(?P<build>\d+)', ver_output)
+        assert match_object, 'cmd returned an unexpected version string: ' + ver_output
+        return tuple(map(int, match_object.groups()))
diff --git a/Tools/Scripts/webkitpy/common/system/platforminfo_mock.py b/Tools/Scripts/webkitpy/common/system/platforminfo_mock.py
new file mode 100644
index 0000000..bc72810
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/system/platforminfo_mock.py
@@ -0,0 +1,57 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#    * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+class MockPlatformInfo(object):
+    def __init__(self, os_name='mac', os_version='snowleopard'):
+        self.os_name = os_name
+        self.os_version = os_version
+
+    def is_mac(self):
+        return self.os_name == 'mac'
+
+    def is_linux(self):
+        return self.os_name == 'linux'
+
+    def is_win(self):
+        return self.os_name == 'win'
+
+    def is_cygwin(self):
+        return self.os_name == 'cygwin'
+
+    def is_freebsd(self):
+        return self.os_name == 'freebsd'
+
+    def display_name(self):
+        return "MockPlatform 1.0"
+
+    def total_bytes_memory(self):
+        return 3 * 1024 * 1024 * 1024  # 3GB is a reasonable amount of ram to mock.
+
+    def terminal_width(self):
+        return 80
diff --git a/Tools/Scripts/webkitpy/common/system/platforminfo_unittest.py b/Tools/Scripts/webkitpy/common/system/platforminfo_unittest.py
new file mode 100644
index 0000000..a2b4255
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/system/platforminfo_unittest.py
@@ -0,0 +1,185 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#    * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import platform
+import sys
+import unittest
+
+from webkitpy.common.system.executive import Executive
+from webkitpy.common.system.executive_mock import MockExecutive, MockExecutive2
+from webkitpy.common.system.platforminfo import PlatformInfo
+
+
+def fake_sys(platform_str='darwin', windows_version_tuple=None):
+
+    class FakeSysModule(object):
+        platform = platform_str
+        if windows_version_tuple:
+            getwindowsversion = lambda x: windows_version_tuple
+
+    return FakeSysModule()
+
+
+def fake_platform(mac_version_string='10.6.3', release_string='bar'):
+
+    class FakePlatformModule(object):
+        def mac_ver(self):
+            return tuple([mac_version_string, tuple(['', '', '']), 'i386'])
+
+        def platform(self):
+            return 'foo'
+
+        def release(self):
+            return release_string
+
+    return FakePlatformModule()
+
+
+def fake_executive(output=None):
+    if output:
+        return MockExecutive2(output=output)
+    return MockExecutive2(exception=SystemError)
+
+
+class TestPlatformInfo(unittest.TestCase):
+    def make_info(self, sys_module=None, platform_module=None, executive=None):
+        return PlatformInfo(sys_module or fake_sys(), platform_module or fake_platform(), executive or fake_executive())
+
+    # FIXME: This should be called integration_test_real_code(), but integration tests aren't
+    # yet run by default and there's no reason not to run this everywhere by default.
+    def test_real_code(self):
+        # This test makes sure the real (unmocked) code actually works.
+        info = PlatformInfo(sys, platform, Executive())
+        self.assertNotEquals(info.os_name, '')
+        self.assertNotEquals(info.os_version, '')
+        self.assertNotEquals(info.display_name(), '')
+        self.assertTrue(info.is_mac() or info.is_win() or info.is_linux() or info.is_freebsd())
+        self.assertNotEquals(info.terminal_width(), None)
+
+        if info.is_mac():
+            self.assertTrue(info.total_bytes_memory() > 0)
+        else:
+            self.assertEquals(info.total_bytes_memory(), None)
+
+    def test_os_name_and_wrappers(self):
+        info = self.make_info(fake_sys('linux2'))
+        self.assertTrue(info.is_linux())
+        self.assertFalse(info.is_mac())
+        self.assertFalse(info.is_win())
+        self.assertFalse(info.is_freebsd())
+
+        info = self.make_info(fake_sys('linux3'))
+        self.assertTrue(info.is_linux())
+        self.assertFalse(info.is_mac())
+        self.assertFalse(info.is_win())
+        self.assertFalse(info.is_freebsd())
+
+        info = self.make_info(fake_sys('darwin'), fake_platform('10.6.3'))
+        self.assertEquals(info.os_name, 'mac')
+        self.assertFalse(info.is_linux())
+        self.assertTrue(info.is_mac())
+        self.assertFalse(info.is_win())
+        self.assertFalse(info.is_freebsd())
+
+        info = self.make_info(fake_sys('win32', tuple([6, 1, 7600])))
+        self.assertEquals(info.os_name, 'win')
+        self.assertFalse(info.is_linux())
+        self.assertFalse(info.is_mac())
+        self.assertTrue(info.is_win())
+        self.assertFalse(info.is_freebsd())
+
+        info = self.make_info(fake_sys('cygwin'), executive=fake_executive('6.1.7600'))
+        self.assertEquals(info.os_name, 'win')
+        self.assertFalse(info.is_linux())
+        self.assertFalse(info.is_mac())
+        self.assertTrue(info.is_win())
+        self.assertFalse(info.is_freebsd())
+
+        info = self.make_info(fake_sys('freebsd8'))
+        self.assertEquals(info.os_name, 'freebsd')
+        self.assertFalse(info.is_linux())
+        self.assertFalse(info.is_mac())
+        self.assertFalse(info.is_win())
+        self.assertTrue(info.is_freebsd())
+
+        self.assertRaises(AssertionError, self.make_info, fake_sys('vms'))
+
+    def test_os_version(self):
+        self.assertRaises(AssertionError, self.make_info, fake_sys('darwin'), fake_platform('10.4.3'))
+        self.assertEquals(self.make_info(fake_sys('darwin'), fake_platform('10.5.1')).os_version, 'leopard')
+        self.assertEquals(self.make_info(fake_sys('darwin'), fake_platform('10.6.1')).os_version, 'snowleopard')
+        self.assertEquals(self.make_info(fake_sys('darwin'), fake_platform('10.7.1')).os_version, 'lion')
+        self.assertEquals(self.make_info(fake_sys('darwin'), fake_platform('10.8.1')).os_version, 'mountainlion')
+        self.assertEquals(self.make_info(fake_sys('darwin'), fake_platform('10.9.0')).os_version, 'future')
+
+        self.assertEquals(self.make_info(fake_sys('linux2')).os_version, 'lucid')
+
+        self.assertEquals(self.make_info(fake_sys('freebsd8'), fake_platform('', '8.3-PRERELEASE')).os_version, '8.3-PRERELEASE')
+        self.assertEquals(self.make_info(fake_sys('freebsd9'), fake_platform('', '9.0-RELEASE')).os_version, '9.0-RELEASE')
+
+        self.assertRaises(AssertionError, self.make_info, fake_sys('win32', tuple([5, 0, 1234])))
+        self.assertEquals(self.make_info(fake_sys('win32', tuple([6, 2, 1234]))).os_version, 'future')
+        self.assertEquals(self.make_info(fake_sys('win32', tuple([6, 1, 7600]))).os_version, '7sp0')
+        self.assertEquals(self.make_info(fake_sys('win32', tuple([6, 0, 1234]))).os_version, 'vista')
+        self.assertEquals(self.make_info(fake_sys('win32', tuple([5, 1, 1234]))).os_version, 'xp')
+
+        self.assertRaises(AssertionError, self.make_info, fake_sys('win32'), executive=fake_executive('5.0.1234'))
+        self.assertEquals(self.make_info(fake_sys('cygwin'), executive=fake_executive('6.2.1234')).os_version, 'future')
+        self.assertEquals(self.make_info(fake_sys('cygwin'), executive=fake_executive('6.1.7600')).os_version, '7sp0')
+        self.assertEquals(self.make_info(fake_sys('cygwin'), executive=fake_executive('6.0.1234')).os_version, 'vista')
+        self.assertEquals(self.make_info(fake_sys('cygwin'), executive=fake_executive('5.1.1234')).os_version, 'xp')
+
+    def test_display_name(self):
+        info = self.make_info(fake_sys('darwin'))
+        self.assertNotEquals(info.display_name(), '')
+
+        info = self.make_info(fake_sys('win32', tuple([6, 1, 7600])))
+        self.assertNotEquals(info.display_name(), '')
+
+        info = self.make_info(fake_sys('linux2'))
+        self.assertNotEquals(info.display_name(), '')
+
+        info = self.make_info(fake_sys('freebsd9'))
+        self.assertNotEquals(info.display_name(), '')
+
+    def test_total_bytes_memory(self):
+        info = self.make_info(fake_sys('darwin'), fake_platform('10.6.3'), fake_executive('1234'))
+        self.assertEquals(info.total_bytes_memory(), 1234)
+
+        info = self.make_info(fake_sys('win32', tuple([6, 1, 7600])))
+        self.assertEquals(info.total_bytes_memory(), None)
+
+        info = self.make_info(fake_sys('linux2'))
+        self.assertEquals(info.total_bytes_memory(), None)
+
+        info = self.make_info(fake_sys('freebsd9'))
+        self.assertEquals(info.total_bytes_memory(), None)
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/Tools/Scripts/webkitpy/common/system/stack_utils.py b/Tools/Scripts/webkitpy/common/system/stack_utils.py
new file mode 100644
index 0000000..a343807
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/system/stack_utils.py
@@ -0,0 +1,67 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Simple routines for logging, obtaining thread stack information."""
+
+import sys
+import traceback
+
+
+def log_thread_state(logger, name, thread_id, msg=''):
+    """Log information about the given thread state."""
+    stack = _find_thread_stack(thread_id)
+    assert(stack is not None)
+    logger("")
+    logger("%s (tid %d) %s" % (name, thread_id, msg))
+    _log_stack(logger, stack)
+    logger("")
+
+
+def _find_thread_stack(thread_id):
+    """Returns a stack object that can be used to dump a stack trace for
+    the given thread id (or None if the id is not found)."""
+    for tid, stack in sys._current_frames().items():
+        if tid == thread_id:
+            return stack
+    return None
+
+
+def _log_stack(logger, stack):
+    """Log a stack trace to the logger callback."""
+    for filename, lineno, name, line in traceback.extract_stack(stack):
+        logger('File: "%s", line %d, in %s' % (filename, lineno, name))
+        if line:
+            logger('  %s' % line.strip())
+
+
+def log_traceback(logger, tb):
+    stack = traceback.extract_tb(tb)
+    for frame_str in traceback.format_list(stack):
+        for line in frame_str.split('\n'):
+            if line:
+                logger("  %s" % line)
diff --git a/Tools/Scripts/webkitpy/common/system/stack_utils_unittest.py b/Tools/Scripts/webkitpy/common/system/stack_utils_unittest.py
new file mode 100644
index 0000000..625acf2
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/system/stack_utils_unittest.py
@@ -0,0 +1,72 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import sys
+import unittest
+
+from webkitpy.common.system import outputcapture
+from webkitpy.common.system import stack_utils
+
+
+def current_thread_id():
+    thread_id, _ = sys._current_frames().items()[0]
+    return thread_id
+
+
+class StackUtilsTest(unittest.TestCase):
+    def test_find_thread_stack_found(self):
+        thread_id = current_thread_id()
+        found_stack = stack_utils._find_thread_stack(thread_id)
+        self.assertNotEqual(found_stack, None)
+
+    def test_find_thread_stack_not_found(self):
+        found_stack = stack_utils._find_thread_stack(0)
+        self.assertEqual(found_stack, None)
+
+    def test_log_thread_state(self):
+        msgs = []
+
+        def logger(msg):
+            msgs.append(msg)
+
+        thread_id = current_thread_id()
+        stack_utils.log_thread_state(logger, "test-thread", thread_id,
+                                     "is tested")
+        self.assertTrue(msgs)
+
+    def test_log_traceback(self):
+        msgs = []
+
+        def logger(msg):
+            msgs.append(msg)
+
+        try:
+            raise ValueError
+        except:
+            stack_utils.log_traceback(logger, sys.exc_info()[2])
+        self.assertTrue(msgs)
diff --git a/Tools/Scripts/webkitpy/common/system/systemhost.py b/Tools/Scripts/webkitpy/common/system/systemhost.py
new file mode 100644
index 0000000..dfec68b
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/system/systemhost.py
@@ -0,0 +1,48 @@
+# Copyright (c) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import os
+import platform
+import sys
+
+from webkitpy.common.system import environment, executive, file_lock, filesystem, platforminfo, user, workspace
+
+
+class SystemHost(object):
+    def __init__(self):
+        self.executive = executive.Executive()
+        self.filesystem = filesystem.FileSystem()
+        self.user = user.User()
+        self.platform = platforminfo.PlatformInfo(sys, platform, self.executive)
+        self.workspace = workspace.Workspace(self.filesystem, self.executive)
+
+    def copy_current_environment(self):
+        return environment.Environment(os.environ.copy())
+
+    def make_file_lock(self, path):
+        return file_lock.FileLock(path)
diff --git a/Tools/Scripts/webkitpy/common/system/systemhost_mock.py b/Tools/Scripts/webkitpy/common/system/systemhost_mock.py
new file mode 100644
index 0000000..a529f34
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/system/systemhost_mock.py
@@ -0,0 +1,56 @@
+    # Copyright (c) 2011 Google Inc. All rights reserved.
+    #
+    # Redistribution and use in source and binary forms, with or without
+    # modification, are permitted provided that the following conditions are
+    # met:
+    #
+    #     * Redistributions of source code must retain the above copyright
+    # notice, this list of conditions and the following disclaimer.
+    #     * Redistributions in binary form must reproduce the above
+    # copyright notice, this list of conditions and the following disclaimer
+    # in the documentation and/or other materials provided with the
+    # distribution.
+    #     * Neither the name of Google Inc. nor the names of its
+    # contributors may be used to endorse or promote products derived from
+    # this software without specific prior written permission.
+    #
+    # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+    # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+    # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+    # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+    # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+    # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+    # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+    # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+    # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+    # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+    # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from webkitpy.common.system.environment import Environment
+from webkitpy.common.system.executive_mock import MockExecutive
+from webkitpy.common.system.filesystem_mock import MockFileSystem
+from webkitpy.common.system.file_lock_mock import MockFileLock
+from webkitpy.common.system.platforminfo_mock import MockPlatformInfo
+from webkitpy.common.system.user_mock import MockUser
+from webkitpy.common.system.workspace_mock import MockWorkspace
+
+
+class MockSystemHost(object):
+    def __init__(self, log_executive=False, executive_throws_when_run=None, os_name=None, os_version=None, executive=None, filesystem=None):
+        self.executive = executive or MockExecutive(should_log=log_executive, should_throw_when_run=executive_throws_when_run)
+        self.filesystem = filesystem or MockFileSystem()
+        self.user = MockUser()
+        self.platform = MockPlatformInfo()
+        if os_name:
+            self.platform.os_name = os_name
+        if os_version:
+            self.platform.os_version = os_version
+
+        # FIXME: Should this take pointers to the filesystem and the executive?
+        self.workspace = MockWorkspace()
+
+    def copy_current_environment(self):
+        return Environment({"MOCK_ENVIRON_COPY": '1'})
+
+    def make_file_lock(self, path):
+        return MockFileLock(path)
diff --git a/Tools/Scripts/webkitpy/common/system/urlfetcher.py b/Tools/Scripts/webkitpy/common/system/urlfetcher.py
new file mode 100644
index 0000000..2d9e5ec
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/system/urlfetcher.py
@@ -0,0 +1,55 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Wrapper module for fetching URLs."""
+
+import urllib
+
+
+class UrlFetcher(object):
+    """Class with restricted interface to fetch URLs (makes testing easier)"""
+    def __init__(self, filesystem):
+        self._filesystem = filesystem
+
+    def fetch(self, url):
+        """Fetches the contents of the URL as a string."""
+        file_object = urllib.urlopen(url)
+        content = file_object.read()
+        file_object.close()
+        return content
+
+    def fetch_into_file(self, url):
+        """Fetches the contents of the URL into a temporary file and return the filename.
+
+        This is the equivalent of urllib.retrieve() except that we don't return any headers.
+        """
+        file_object, filename = self._filesystem.open_binary_tempfile('-fetched')
+        contents = self.fetch(url)
+        file_object.write(contents)
+        file_object.close()
+        return filename
diff --git a/Tools/Scripts/webkitpy/common/system/urlfetcher_mock.py b/Tools/Scripts/webkitpy/common/system/urlfetcher_mock.py
new file mode 100644
index 0000000..e8a7532
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/system/urlfetcher_mock.py
@@ -0,0 +1,46 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#    * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+def make_fetcher_cls(urls):
+    """UrlFetcher factory routine that simulates network access
+    using a dict of URLs -> contents."""
+    class MockFetcher(object):
+        def __init__(self, filesystem):
+            self._filesystem = filesystem
+
+        def fetch(self, url):
+            return urls[url]
+
+        def fetch_into_file(self, url):
+            f, fn = self._filesystem.open_binary_tempfile('mockfetcher')
+            f.write(self.fetch(url))
+            f.close()
+            return fn
+
+    return MockFetcher
diff --git a/Tools/Scripts/webkitpy/common/system/user.py b/Tools/Scripts/webkitpy/common/system/user.py
new file mode 100644
index 0000000..c49429c
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/system/user.py
@@ -0,0 +1,175 @@
+# Copyright (c) 2009, Google Inc. All rights reserved.
+# 
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+# 
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+# 
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import getpass
+import logging
+import os
+import platform
+import re
+import shlex
+import subprocess
+import sys
+import webbrowser
+
+from webkitpy.common.system.executive import Executive
+from webkitpy.common.system.platforminfo import PlatformInfo
+
+
+_log = logging.getLogger(__name__)
+
+
+try:
+    import readline
+except ImportError:
+    if sys.platform != "win32":
+        # There is no readline module for win32, not much to do except cry.
+        _log.warn("Unable to import readline.")
+
+
+class User(object):
+    DEFAULT_NO = 'n'
+    DEFAULT_YES = 'y'
+
+    def __init__(self, platforminfo=None):
+        # We cannot get the PlatformInfo object from a SystemHost because
+        # User is part of SystemHost itself.
+        self._platforminfo = platforminfo or PlatformInfo(sys, platform, Executive())
+
+    # FIXME: These are @classmethods because bugzilla.py doesn't have a Tool object (thus no User instance).
+    @classmethod
+    def prompt(cls, message, repeat=1, raw_input=raw_input):
+        response = None
+        while (repeat and not response):
+            repeat -= 1
+            response = raw_input(message)
+        return response
+
+    @classmethod
+    def prompt_password(cls, message, repeat=1):
+        return cls.prompt(message, repeat=repeat, raw_input=getpass.getpass)
+
+    @classmethod
+    def prompt_with_multiple_lists(cls, list_title, subtitles, lists, can_choose_multiple=False, raw_input=raw_input):
+        item_index = 0
+        cumulated_list = []
+        print list_title
+        for i in range(len(subtitles)):
+            print "\n" + subtitles[i]
+            for item in lists[i]:
+                item_index += 1
+                print "%2d. %s" % (item_index, item)
+            cumulated_list += lists[i]
+        return cls._wait_on_list_response(cumulated_list, can_choose_multiple, raw_input)
+
+    @classmethod
+    def _wait_on_list_response(cls, list_items, can_choose_multiple, raw_input):
+        while True:
+            if can_choose_multiple:
+                response = cls.prompt("Enter one or more numbers (comma-separated) or ranges (e.g. 3-7), or \"all\": ", raw_input=raw_input)
+                if not response.strip() or response == "all":
+                    return list_items
+
+                try:
+                    indices = []
+                    for value in re.split("\s*,\s*", response):
+                        parts = value.split('-')
+                        if len(parts) == 2:
+                            indices += range(int(parts[0]) - 1, int(parts[1]))
+                        else:
+                            indices.append(int(value) - 1)
+                except ValueError, err:
+                    continue
+
+                return [list_items[i] for i in indices]
+            else:
+                try:
+                    result = int(cls.prompt("Enter a number: ", raw_input=raw_input)) - 1
+                except ValueError, err:
+                    continue
+                return list_items[result]
+
+    @classmethod
+    def prompt_with_list(cls, list_title, list_items, can_choose_multiple=False, raw_input=raw_input):
+        print list_title
+        i = 0
+        for item in list_items:
+            i += 1
+            print "%2d. %s" % (i, item)
+        return cls._wait_on_list_response(list_items, can_choose_multiple, raw_input)
+
+    def edit(self, files):
+        editor = os.environ.get("EDITOR") or "vi"
+        args = shlex.split(editor)
+        # Note: Not thread safe: http://bugs.python.org/issue2320
+        subprocess.call(args + files)
+
+    def _warn_if_application_is_xcode(self, edit_application):
+        if "Xcode" in edit_application:
+            print "Instead of using Xcode.app, consider using EDITOR=\"xed --wait\"."
+
+    def edit_changelog(self, files):
+        edit_application = os.environ.get("CHANGE_LOG_EDIT_APPLICATION")
+        if edit_application and self._platforminfo.is_mac():
+            # On Mac we support editing ChangeLogs using an application.
+            args = shlex.split(edit_application)
+            print "Using editor in the CHANGE_LOG_EDIT_APPLICATION environment variable."
+            print "Please quit the editor application when done editing."
+            self._warn_if_application_is_xcode(edit_application)
+            subprocess.call(["open", "-W", "-n", "-a"] + args + files)
+            return
+        self.edit(files)
+
+    def page(self, message):
+        pager = os.environ.get("PAGER") or "less"
+        try:
+            # Note: Not thread safe: http://bugs.python.org/issue2320
+            child_process = subprocess.Popen([pager], stdin=subprocess.PIPE)
+            child_process.communicate(input=message)
+        except IOError, e:
+            pass
+
+    def confirm(self, message=None, default=DEFAULT_YES, raw_input=raw_input):
+        if not message:
+            message = "Continue?"
+        choice = {'y': 'Y/n', 'n': 'y/N'}[default]
+        response = raw_input("%s [%s]: " % (message, choice))
+        if not response:
+            response = default
+        return response.lower() == 'y'
+
+    def can_open_url(self):
+        try:
+            webbrowser.get()
+            return True
+        except webbrowser.Error, e:
+            return False
+
+    def open_url(self, url):
+        if not self.can_open_url():
+            _log.warn("Failed to open %s" % url)
+        webbrowser.open(url)
diff --git a/Tools/Scripts/webkitpy/common/system/user_mock.py b/Tools/Scripts/webkitpy/common/system/user_mock.py
new file mode 100644
index 0000000..16f79a0
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/system/user_mock.py
@@ -0,0 +1,66 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#    * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from webkitpy.common.system.deprecated_logging import log
+
+
+class MockUser(object):
+
+    @classmethod
+    def prompt(cls, message, repeat=1, raw_input=raw_input):
+        return "Mock user response"
+
+    @classmethod
+    def prompt_with_list(cls, list_title, list_items, can_choose_multiple=False, raw_input=raw_input):
+        pass
+
+    def __init__(self):
+        self.opened_urls = []
+
+    def edit(self, files):
+        pass
+
+    def edit_changelog(self, files):
+        pass
+
+    def page(self, message):
+        pass
+
+    def confirm(self, message=None, default='y'):
+        log(message)
+        return default == 'y'
+
+    def can_open_url(self):
+        return True
+
+    def open_url(self, url):
+        self.opened_urls.append(url)
+        if url.startswith("file://"):
+            log("MOCK: user.open_url: file://...")
+            return
+        log("MOCK: user.open_url: %s" % url)
diff --git a/Tools/Scripts/webkitpy/common/system/user_unittest.py b/Tools/Scripts/webkitpy/common/system/user_unittest.py
new file mode 100644
index 0000000..86b9db7
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/system/user_unittest.py
@@ -0,0 +1,139 @@
+# Copyright (C) 2010 Research in Motion Ltd. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#    * Neither the name of Research in Motion Ltd. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from webkitpy.common.system.outputcapture import OutputCapture
+from webkitpy.common.system.user import User
+
+class UserTest(unittest.TestCase):
+
+    example_user_response = "example user response"
+
+    def test_prompt_repeat(self):
+        self.repeatsRemaining = 2
+        def mock_raw_input(message):
+            self.repeatsRemaining -= 1
+            if not self.repeatsRemaining:
+                return UserTest.example_user_response
+            return None
+        self.assertEqual(User.prompt("input", repeat=self.repeatsRemaining, raw_input=mock_raw_input), UserTest.example_user_response)
+
+    def test_prompt_when_exceeded_repeats(self):
+        self.repeatsRemaining = 2
+        def mock_raw_input(message):
+            self.repeatsRemaining -= 1
+            return None
+        self.assertEqual(User.prompt("input", repeat=self.repeatsRemaining, raw_input=mock_raw_input), None)
+
+    def test_prompt_with_multiple_lists(self):
+        def run_prompt_test(inputs, expected_result, can_choose_multiple=False):
+            def mock_raw_input(message):
+                return inputs.pop(0)
+            output_capture = OutputCapture()
+            actual_result = output_capture.assert_outputs(
+                self,
+                User.prompt_with_multiple_lists,
+                args=["title", ["subtitle1", "subtitle2"], [["foo", "bar"], ["foobar", "barbaz", "foobaz"]]],
+                kwargs={"can_choose_multiple": can_choose_multiple, "raw_input": mock_raw_input},
+                expected_stdout="title\n\nsubtitle1\n 1. foo\n 2. bar\n\nsubtitle2\n 3. foobar\n 4. barbaz\n 5. foobaz\n")
+            self.assertEqual(actual_result, expected_result)
+            self.assertEqual(len(inputs), 0)
+
+        run_prompt_test(["1"], "foo")
+        run_prompt_test(["badinput", "2"], "bar")
+        run_prompt_test(["3"], "foobar")
+        run_prompt_test(["4"], "barbaz")
+        run_prompt_test(["5"], "foobaz")
+
+        run_prompt_test(["1,2"], ["foo", "bar"], can_choose_multiple=True)
+        run_prompt_test(["1-3"], ["foo", "bar", "foobar"], can_choose_multiple=True)
+        run_prompt_test(["1-2,3"], ["foo", "bar", "foobar"], can_choose_multiple=True)
+        run_prompt_test(["2-1,3"], ["foobar"], can_choose_multiple=True)
+        run_prompt_test(["  1,  2   "], ["foo", "bar"], can_choose_multiple=True)
+        run_prompt_test(["all"], ["foo", "bar", 'foobar', 'barbaz', 'foobaz'], can_choose_multiple=True)
+        run_prompt_test([""], ["foo", "bar", 'foobar', 'barbaz', 'foobaz'], can_choose_multiple=True)
+        run_prompt_test(["  "], ["foo", "bar", 'foobar', 'barbaz', 'foobaz'], can_choose_multiple=True)
+        run_prompt_test(["badinput", "all"], ["foo", "bar", 'foobar', 'barbaz', 'foobaz'], can_choose_multiple=True)
+
+    def test_prompt_with_list(self):
+        def run_prompt_test(inputs, expected_result, can_choose_multiple=False):
+            def mock_raw_input(message):
+                return inputs.pop(0)
+            output_capture = OutputCapture()
+            actual_result = output_capture.assert_outputs(
+                self,
+                User.prompt_with_list,
+                args=["title", ["foo", "bar"]],
+                kwargs={"can_choose_multiple": can_choose_multiple, "raw_input": mock_raw_input},
+                expected_stdout="title\n 1. foo\n 2. bar\n")
+            self.assertEqual(actual_result, expected_result)
+            self.assertEqual(len(inputs), 0)
+
+        run_prompt_test(["1"], "foo")
+        run_prompt_test(["badinput", "2"], "bar")
+
+        run_prompt_test(["1,2"], ["foo", "bar"], can_choose_multiple=True)
+        run_prompt_test(["  1,  2   "], ["foo", "bar"], can_choose_multiple=True)
+        run_prompt_test(["all"], ["foo", "bar"], can_choose_multiple=True)
+        run_prompt_test([""], ["foo", "bar"], can_choose_multiple=True)
+        run_prompt_test(["  "], ["foo", "bar"], can_choose_multiple=True)
+        run_prompt_test(["badinput", "all"], ["foo", "bar"], can_choose_multiple=True)
+
+    def test_confirm(self):
+        test_cases = (
+            (("Continue? [Y/n]: ", True), (User.DEFAULT_YES, 'y')),
+            (("Continue? [Y/n]: ", False), (User.DEFAULT_YES, 'n')),
+            (("Continue? [Y/n]: ", True), (User.DEFAULT_YES, '')),
+            (("Continue? [Y/n]: ", False), (User.DEFAULT_YES, 'q')),
+            (("Continue? [y/N]: ", True), (User.DEFAULT_NO, 'y')),
+            (("Continue? [y/N]: ", False), (User.DEFAULT_NO, 'n')),
+            (("Continue? [y/N]: ", False), (User.DEFAULT_NO, '')),
+            (("Continue? [y/N]: ", False), (User.DEFAULT_NO, 'q')),
+        )
+        for test_case in test_cases:
+            expected, inputs = test_case
+
+            def mock_raw_input(message):
+                self.assertEquals(expected[0], message)
+                return inputs[1]
+
+            result = User().confirm(default=inputs[0],
+                                    raw_input=mock_raw_input)
+            self.assertEquals(expected[1], result)
+
+    def test_warn_if_application_is_xcode(self):
+        output = OutputCapture()
+        user = User()
+        output.assert_outputs(self, user._warn_if_application_is_xcode, ["TextMate"])
+        output.assert_outputs(self, user._warn_if_application_is_xcode, ["/Applications/TextMate.app"])
+        output.assert_outputs(self, user._warn_if_application_is_xcode, ["XCode"])  # case sensitive matching
+
+        xcode_warning = "Instead of using Xcode.app, consider using EDITOR=\"xed --wait\".\n"
+        output.assert_outputs(self, user._warn_if_application_is_xcode, ["Xcode"], expected_stdout=xcode_warning)
+        output.assert_outputs(self, user._warn_if_application_is_xcode, ["/Developer/Applications/Xcode.app"], expected_stdout=xcode_warning)
diff --git a/Tools/Scripts/webkitpy/common/system/workspace.py b/Tools/Scripts/webkitpy/common/system/workspace.py
new file mode 100644
index 0000000..6868376
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/system/workspace.py
@@ -0,0 +1,73 @@
+# Copyright (c) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# A home for file logic which should sit above FileSystem, but
+# below more complicated objects.
+
+import logging
+import zipfile
+
+from webkitpy.common.system.executive import ScriptError
+
+
+_log = logging.getLogger(__name__)
+
+
+class Workspace(object):
+    def __init__(self, filesystem, executive):
+        self._filesystem = filesystem
+        self._executive = executive  # FIXME: Remove if create_zip is moved to python.
+
+    def find_unused_filename(self, directory, name, extension, search_limit=100):
+        for count in range(search_limit):
+            if count:
+                target_name = "%s-%s.%s" % (name, count, extension)
+            else:
+                target_name = "%s.%s" % (name, extension)
+            target_path = self._filesystem.join(directory, target_name)
+            if not self._filesystem.exists(target_path):
+                return target_path
+        # If we can't find an unused name in search_limit tries, just give up.
+        return None
+
+    def create_zip(self, zip_path, source_path, zip_class=zipfile.ZipFile):
+        # It's possible to create zips with Python:
+        # zip_file = ZipFile(zip_path, 'w')
+        # for root, dirs, files in os.walk(source_path):
+        #     for path in files:
+        #         absolute_path = os.path.join(root, path)
+        #         zip_file.write(os.path.relpath(path, source_path))
+        # However, getting the paths, encoding and compression correct could be non-trivial.
+        # So, for now we depend on the environment having "zip" installed (likely fails on Win32)
+        try:
+            self._executive.run_command(['zip', '-9', '-r', zip_path, '.'], cwd=source_path)
+        except ScriptError, e:
+            _log.error("Workspace.create_zip failed:\n%s" % e.message_with_output())
+            return None
+
+        return zip_class(zip_path)
diff --git a/Tools/Scripts/webkitpy/common/system/workspace_mock.py b/Tools/Scripts/webkitpy/common/system/workspace_mock.py
new file mode 100644
index 0000000..005f86c
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/system/workspace_mock.py
@@ -0,0 +1,35 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#    * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+class MockWorkspace(object):
+    def find_unused_filename(self, directory, name, extension, search_limit=10):
+        return "%s/%s.%s" % (directory, name, extension)
+
+    def create_zip(self, zip_path, source_path):
+        return object()  # Something that is not None
diff --git a/Tools/Scripts/webkitpy/common/system/workspace_unittest.py b/Tools/Scripts/webkitpy/common/system/workspace_unittest.py
new file mode 100644
index 0000000..49094ac
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/system/workspace_unittest.py
@@ -0,0 +1,67 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#    * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from webkitpy.common.system.filesystem_mock import MockFileSystem
+from webkitpy.common.system.outputcapture import OutputCapture
+from webkitpy.common.system.workspace import Workspace
+from webkitpy.common.system.executive_mock import MockExecutive
+
+
+class WorkspaceTest(unittest.TestCase):
+
+    def test_find_unused_filename(self):
+        filesystem = MockFileSystem({
+            "dir/foo.jpg": "",
+            "dir/foo-1.jpg": "",
+            "dir/foo-2.jpg": "",
+        })
+        workspace = Workspace(filesystem, None)
+        self.assertEqual(workspace.find_unused_filename("bar", "bar", "bar"), "bar/bar.bar")
+        self.assertEqual(workspace.find_unused_filename("dir", "foo", "jpg", search_limit=1), None)
+        self.assertEqual(workspace.find_unused_filename("dir", "foo", "jpg", search_limit=2), None)
+        self.assertEqual(workspace.find_unused_filename("dir", "foo", "jpg"), "dir/foo-3.jpg")
+
+    def test_create_zip(self):
+        workspace = Workspace(None, MockExecutive(should_log=True))
+        expected_stderr = "MOCK run_command: ['zip', '-9', '-r', '/zip/path', '.'], cwd=/source/path\n"
+        class MockZipFile(object):
+            def __init__(self, path):
+                self.filename = path
+        archive = OutputCapture().assert_outputs(self, workspace.create_zip, ["/zip/path", "/source/path", MockZipFile], expected_stderr=expected_stderr)
+        self.assertEqual(archive.filename, "/zip/path")
+
+    def test_create_zip_exception(self):
+        workspace = Workspace(None, MockExecutive(should_log=True, should_throw=True))
+        expected_stderr = "MOCK run_command: ['zip', '-9', '-r', '/zip/path', '.'], cwd=/source/path\n"
+        class MockZipFile(object):
+            def __init__(self, path):
+                self.filename = path
+        archive = OutputCapture().assert_outputs(self, workspace.create_zip, ["/zip/path", "/source/path", MockZipFile], expected_stderr=expected_stderr)
+        self.assertEqual(archive, None)
diff --git a/Tools/Scripts/webkitpy/common/system/zip_mock.py b/Tools/Scripts/webkitpy/common/system/zip_mock.py
new file mode 100644
index 0000000..dcfaba7
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/system/zip_mock.py
@@ -0,0 +1,55 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# 1.  Redistributions of source code must retain the above copyright
+#     notice, this list of conditions and the following disclaimer.
+# 2.  Redistributions in binary form must reproduce the above copyright
+#     notice, this list of conditions and the following disclaimer in the
+#     documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from webkitpy.common.system.fileset import FileSetFileHandle
+from webkitpy.common.system.filesystem_mock import MockFileSystem
+
+
+class MockZip(object):
+    """A mock zip file that can have new files inserted into it."""
+    def __init__(self, filesystem=None):
+        self._filesystem = filesystem or MockFileSystem()
+        self._files = {}
+
+    def __str__(self):
+        return "MockZip"
+
+    def insert(self, filename, content):
+        self._files[filename] = content
+
+    def namelist(self):
+        return self._files.keys()
+
+    def open(self, filename):
+        return FileSetFileHandle(self, filename)
+
+    def read(self, filename):
+        return self._files[filename]
+
+    def extract(self, filename, path):
+        full_path = self._filesystem.join(path, filename)
+        contents = self.open(filename).contents()
+        self._filesystem.write_text_file(full_path, contents)
+
+    def delete(self, filename):
+        self._files[filename] = None
diff --git a/Tools/Scripts/webkitpy/common/system/zipfileset.py b/Tools/Scripts/webkitpy/common/system/zipfileset.py
new file mode 100644
index 0000000..5cf3616
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/system/zipfileset.py
@@ -0,0 +1,71 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# 1.  Redistributions of source code must retain the above copyright
+#     notice, this list of conditions and the following disclaimer.
+# 2.  Redistributions in binary form must reproduce the above copyright
+#     notice, this list of conditions and the following disclaimer in the
+#     documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import urllib
+import zipfile
+
+from webkitpy.common.net.networktransaction import NetworkTransaction
+from webkitpy.common.system.fileset import FileSetFileHandle
+from webkitpy.common.system.filesystem import FileSystem
+
+
+class ZipFileSet(object):
+    """The set of files in a zip file that resides at a URL (local or remote)"""
+    def __init__(self, zip_url, filesystem=None, zip_factory=None):
+        self._zip_url = zip_url
+        self._temp_file = None
+        self._zip_file = None
+        self._filesystem = filesystem or FileSystem()
+        self._zip_factory = zip_factory or self._retrieve_zip_file
+
+    def _retrieve_zip_file(self, zip_url):
+        temp_file = NetworkTransaction().run(lambda: urllib.urlretrieve(zip_url)[0])
+        return (temp_file, zipfile.ZipFile(temp_file))
+
+    def _load(self):
+        if self._zip_file is None:
+            self._temp_file, self._zip_file = self._zip_factory(self._zip_url)
+
+    def open(self, filename):
+        self._load()
+        return FileSetFileHandle(self, filename, self._filesystem)
+
+    def close(self):
+        if self._temp_file:
+            self._filesystem.remove(self._temp_file)
+            self._temp_file = None
+
+    def namelist(self):
+        self._load()
+        return self._zip_file.namelist()
+
+    def read(self, filename):
+        self._load()
+        return self._zip_file.read(filename)
+
+    def extract(self, filename, path):
+        self._load()
+        self._zip_file.extract(filename, path)
+
+    def delete(self, filename):
+        raise Exception("Can't delete from a ZipFileSet.")
diff --git a/Tools/Scripts/webkitpy/common/system/zipfileset_mock.py b/Tools/Scripts/webkitpy/common/system/zipfileset_mock.py
new file mode 100644
index 0000000..24ac8cb
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/system/zipfileset_mock.py
@@ -0,0 +1,51 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#    * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+def make_factory(ziphashes):
+    """ZipFileSet factory routine that looks up zipfiles in a dict;
+    each zipfile should also be a dict of member names -> contents."""
+    class MockZipFileSet(object):
+        def __init__(self, url):
+            self._url = url
+            self._ziphash = ziphashes[url]
+
+        def namelist(self):
+            return self._ziphash.keys()
+
+        def read(self, member):
+            return self._ziphash[member]
+
+        def close(self):
+            pass
+
+    def maker(url):
+        # We return None because there's no tempfile to delete.
+        return (None, MockZipFileSet(url))
+
+    return maker
diff --git a/Tools/Scripts/webkitpy/common/system/zipfileset_unittest.py b/Tools/Scripts/webkitpy/common/system/zipfileset_unittest.py
new file mode 100644
index 0000000..16a74cb
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/system/zipfileset_unittest.py
@@ -0,0 +1,98 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# 1.  Redistributions of source code must retain the above copyright
+#     notice, this list of conditions and the following disclaimer.
+# 2.  Redistributions in binary form must reproduce the above copyright
+#     notice, this list of conditions and the following disclaimer in the
+#     documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import shutil
+import tempfile
+import unittest
+import zipfile
+
+from webkitpy.common.system.filesystem_mock import MockFileSystem
+from webkitpy.common.system.zipfileset import ZipFileSet
+
+
+class FakeZip(object):
+    def __init__(self, filesystem):
+        self._filesystem = filesystem
+        self._files = {}
+
+    def add_file(self, filename, contents):
+        self._files[filename] = contents
+
+    def open(self, filename):
+        return FileSetFileHandle(self, filename, self._filesystem)
+
+    def namelist(self):
+        return self._files.keys()
+
+    def read(self, filename):
+        return self._files[filename]
+
+    def extract(self, filename, path):
+        self._filesystem.write_text_file(self._filesystem.join(path, filename), self.read(filename))
+
+    def delete(self, filename):
+        raise Exception("Can't delete from a ZipFileSet.")
+
+
+class ZipFileSetTest(unittest.TestCase):
+    def setUp(self):
+        self._filesystem = MockFileSystem()
+        self._zip = ZipFileSet('blah', self._filesystem, self.make_fake_zip)
+
+    def make_fake_zip(self, zip_url):
+        result = FakeZip(self._filesystem)
+        result.add_file('some-file', 'contents')
+        result.add_file('a/b/some-other-file', 'other contents')
+        return (None, result)
+
+    def test_open(self):
+        file = self._zip.open('a/b/some-other-file')
+        self.assertEquals('a/b/some-other-file', file.name())
+        self.assertEquals('other contents', file.contents())
+
+    def test_close(self):
+        zipfileset = ZipFileSet('blah', self._filesystem, self.make_fake_zip)
+        zipfileset.close()
+
+    def test_read(self):
+        self.assertEquals('contents', self._zip.read('some-file'))
+
+    def test_extract(self):
+        self._filesystem.maybe_make_directory('/some-dir')
+        self._zip.extract('some-file', '/some-dir')
+        self.assertTrue(self._filesystem.isfile('/some-dir/some-file'))
+
+    def test_deep_extract(self):
+        self._filesystem.maybe_make_directory('/some-dir')
+        self._zip.extract('a/b/some-other-file', '/some-dir')
+        self.assertTrue(self._filesystem.isfile('/some-dir/a/b/some-other-file'))
+
+    def test_cant_delete(self):
+        self.assertRaises(Exception, self._zip.delete, 'some-file')
+
+    def test_namelist(self):
+        self.assertTrue('some-file' in self._zip.namelist())
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/Tools/Scripts/webkitpy/common/thread/__init__.py b/Tools/Scripts/webkitpy/common/thread/__init__.py
new file mode 100644
index 0000000..ef65bee
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/thread/__init__.py
@@ -0,0 +1 @@
+# Required for Python to search this directory for module files
diff --git a/Tools/Scripts/webkitpy/common/thread/messagepump.py b/Tools/Scripts/webkitpy/common/thread/messagepump.py
new file mode 100644
index 0000000..0e39285
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/thread/messagepump.py
@@ -0,0 +1,59 @@
+# Copyright (c) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+class MessagePumpDelegate(object):
+    def schedule(self, interval, callback):
+        raise NotImplementedError, "subclasses must implement"
+
+    def message_available(self, message):
+        raise NotImplementedError, "subclasses must implement"
+
+    def final_message_delivered(self):
+        raise NotImplementedError, "subclasses must implement"
+
+
+class MessagePump(object):
+    interval = 10 # seconds
+
+    def __init__(self, delegate, message_queue):
+        self._delegate = delegate
+        self._message_queue = message_queue
+        self._schedule()
+
+    def _schedule(self):
+        self._delegate.schedule(self.interval, self._callback)
+
+    def _callback(self):
+        (messages, is_running) = self._message_queue.take_all()
+        for message in messages:
+            self._delegate.message_available(message)
+        if not is_running:
+            self._delegate.final_message_delivered()
+            return
+        self._schedule()
diff --git a/Tools/Scripts/webkitpy/common/thread/messagepump_unittest.py b/Tools/Scripts/webkitpy/common/thread/messagepump_unittest.py
new file mode 100644
index 0000000..f731db2
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/thread/messagepump_unittest.py
@@ -0,0 +1,83 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#    * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from webkitpy.common.thread.messagepump import MessagePump, MessagePumpDelegate
+from webkitpy.common.thread.threadedmessagequeue import ThreadedMessageQueue
+
+
+class TestDelegate(MessagePumpDelegate):
+    def __init__(self):
+        self.log = []
+
+    def schedule(self, interval, callback):
+        self.callback = callback
+        self.log.append("schedule")
+
+    def message_available(self, message):
+        self.log.append("message_available: %s" % message)
+
+    def final_message_delivered(self):
+        self.log.append("final_message_delivered")
+
+
+class MessagePumpTest(unittest.TestCase):
+
+    def test_basic(self):
+        queue = ThreadedMessageQueue()
+        delegate = TestDelegate()
+        pump = MessagePump(delegate, queue)
+        self.assertEqual(delegate.log, [
+            'schedule'
+        ])
+        delegate.callback()
+        queue.post("Hello")
+        queue.post("There")
+        delegate.callback()
+        self.assertEqual(delegate.log, [
+            'schedule',
+            'schedule',
+            'message_available: Hello',
+            'message_available: There',
+            'schedule'
+        ])
+        queue.post("More")
+        queue.post("Messages")
+        queue.stop()
+        delegate.callback()
+        self.assertEqual(delegate.log, [
+            'schedule',
+            'schedule',
+            'message_available: Hello',
+            'message_available: There',
+            'schedule',
+            'message_available: More',
+            'message_available: Messages',
+            'final_message_delivered'
+        ])
diff --git a/Tools/Scripts/webkitpy/common/thread/threadedmessagequeue.py b/Tools/Scripts/webkitpy/common/thread/threadedmessagequeue.py
new file mode 100644
index 0000000..e434767
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/thread/threadedmessagequeue.py
@@ -0,0 +1,52 @@
+# Copyright (c) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import threading
+
+
+class ThreadedMessageQueue(object):
+    def __init__(self):
+        self._messages = []
+        self._is_running = True
+        self._lock = threading.Lock()
+
+    def post(self, message):
+        with self._lock:
+            self._messages.append(message)
+
+    def stop(self):
+        with self._lock:
+            self._is_running = False
+
+    def take_all(self):
+        with self._lock:
+            messages = self._messages
+            is_running = self._is_running
+            self._messages = []
+        return (messages, is_running)
+
diff --git a/Tools/Scripts/webkitpy/common/thread/threadedmessagequeue_unittest.py b/Tools/Scripts/webkitpy/common/thread/threadedmessagequeue_unittest.py
new file mode 100644
index 0000000..cb67c1e
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/thread/threadedmessagequeue_unittest.py
@@ -0,0 +1,53 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#    * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from webkitpy.common.thread.threadedmessagequeue import ThreadedMessageQueue
+
+class ThreadedMessageQueueTest(unittest.TestCase):
+
+    def test_basic(self):
+        queue = ThreadedMessageQueue()
+        queue.post("Hello")
+        queue.post("There")
+        (messages, is_running) = queue.take_all()
+        self.assertEqual(messages, ["Hello", "There"])
+        self.assertTrue(is_running)
+        (messages, is_running) = queue.take_all()
+        self.assertEqual(messages, [])
+        self.assertTrue(is_running)
+        queue.post("More")
+        queue.stop()
+        queue.post("Messages")
+        (messages, is_running) = queue.take_all()
+        self.assertEqual(messages, ["More", "Messages"])
+        self.assertFalse(is_running)
+        (messages, is_running) = queue.take_all()
+        self.assertEqual(messages, [])
+        self.assertFalse(is_running)
diff --git a/Tools/Scripts/webkitpy/common/version_check.py b/Tools/Scripts/webkitpy/common/version_check.py
new file mode 100644
index 0000000..6acc9b4
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/version_check.py
@@ -0,0 +1,34 @@
+#!/usr/bin/env python
+# Copyright (c) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import sys
+
+if sys.version < '2.6' or sys.version >= '2.8':
+    print >> sys.stderr, "Unsupported Python version: WebKit only supports 2.6.x - 2.7.x, and you're running %s." % sys.version.split()[0]
+    sys.exit(1)
diff --git a/Tools/Scripts/webkitpy/common/watchlist/__init__.py b/Tools/Scripts/webkitpy/common/watchlist/__init__.py
new file mode 100644
index 0000000..ef65bee
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/watchlist/__init__.py
@@ -0,0 +1 @@
+# Required for Python to search this directory for module files
diff --git a/Tools/Scripts/webkitpy/common/watchlist/amountchangedpattern.py b/Tools/Scripts/webkitpy/common/watchlist/amountchangedpattern.py
new file mode 100644
index 0000000..fc8adc9
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/watchlist/amountchangedpattern.py
@@ -0,0 +1,70 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+class AmountChangedPattern:
+    def __init__(self, compile_regex, index_for_zero_value):
+        self._regex = compile_regex
+        self._index_for_zero_value = index_for_zero_value
+
+    def match(self, path, diff_file):
+        examined_strings = set()
+        for diff_line in diff_file:
+            if diff_line[self._index_for_zero_value]:
+                continue
+            match = self._regex.search(diff_line[2])
+            if not match:
+                continue
+            matching_string = match.group(0)
+            if matching_string in examined_strings:
+                continue
+            if self._instance_difference(diff_file, matching_string) > 0:
+                return True
+            # Avoid reprocessing this same string.
+            examined_strings.add(matching_string)
+        return False
+
+    def _instance_difference(self, diff_file, matching_string):
+        '''Returns the difference between the number of string occurences in
+        the added lines and deleted lines (which one is subtracted from the
+        other depends on _index_for_zero_value).'''
+        count = 0
+        for diff_line in diff_file:
+            # If the line is unchanged, then don't examine it.
+            if diff_line[self._index_for_zero_value] and diff_line[1 - self._index_for_zero_value]:
+                continue
+            location_found = -len(matching_string)
+            while True:
+                location_found = diff_line[2].find(matching_string, location_found + len(matching_string))
+                if location_found == -1:
+                    break
+                if not diff_line[self._index_for_zero_value]:
+                    count += 1
+                else:
+                    count -= 1
+        return count
diff --git a/Tools/Scripts/webkitpy/common/watchlist/amountchangedpattern_unittest.py b/Tools/Scripts/webkitpy/common/watchlist/amountchangedpattern_unittest.py
new file mode 100644
index 0000000..7ae45fa
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/watchlist/amountchangedpattern_unittest.py
@@ -0,0 +1,70 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+'''Unit tests for amountchangedpattern.py.'''
+
+
+import re
+import unittest
+
+
+from webkitpy.common.watchlist.amountchangedpattern import AmountChangedPattern
+
+
+class AmountChangedPatternTest(unittest.TestCase):
+
+    # A quick note about the diff file structure.
+    # The first column indicated the old line number.
+    # The second column indicates the new line number.
+    # 0 in either column indicates it had no old or new line number.
+    _DIFF_FILE = ((0, 1, 'hi hi'),
+                  (1, 0, 'bye hi'),
+                  (2, 2, 'other hi'),
+                  (3, 0, 'both'),
+                  (0, 3, 'both'),
+                  )
+
+    def run_amount_changed_pattern_match(self, pattern, index_for_zero_value):
+        return AmountChangedPattern(re.compile(pattern), index_for_zero_value).match(None, self._DIFF_FILE)
+
+    def test_added_lines(self):
+        self.assertTrue(self.run_amount_changed_pattern_match('hi', 0))
+        self.assertTrue(self.run_amount_changed_pattern_match('hi hi', 0))
+        self.assertFalse(self.run_amount_changed_pattern_match('other', 0))
+        self.assertFalse(self.run_amount_changed_pattern_match('both', 0))
+        self.assertFalse(self.run_amount_changed_pattern_match('bye', 0))
+        self.assertFalse(self.run_amount_changed_pattern_match('MatchesNothing', 0))
+
+    def test_removed_lines(self):
+        self.assertFalse(self.run_amount_changed_pattern_match('hi', 1))
+        self.assertFalse(self.run_amount_changed_pattern_match('hi hi', 1))
+        self.assertFalse(self.run_amount_changed_pattern_match('other', 1))
+        self.assertFalse(self.run_amount_changed_pattern_match('both', 1))
+        self.assertTrue(self.run_amount_changed_pattern_match('bye', 1))
+        self.assertFalse(self.run_amount_changed_pattern_match('MatchesNothing', 1))
diff --git a/Tools/Scripts/webkitpy/common/watchlist/changedlinepattern.py b/Tools/Scripts/webkitpy/common/watchlist/changedlinepattern.py
new file mode 100644
index 0000000..61fac9a
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/watchlist/changedlinepattern.py
@@ -0,0 +1,41 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+class ChangedLinePattern:
+    def __init__(self, compile_regex, index_for_zero_value):
+        self._regex = compile_regex
+        self._index_for_zero_value = index_for_zero_value
+
+    def match(self, path, diff_file):
+        for diff_line in diff_file:
+            if diff_line[self._index_for_zero_value]:
+                continue
+            if self._regex.search(diff_line[2]):
+                return True
+        return False
diff --git a/Tools/Scripts/webkitpy/common/watchlist/changedlinepattern_unittest.py b/Tools/Scripts/webkitpy/common/watchlist/changedlinepattern_unittest.py
new file mode 100644
index 0000000..1f2aeda
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/watchlist/changedlinepattern_unittest.py
@@ -0,0 +1,68 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+'''Unit tests for changedlinepattern.py.'''
+
+import re
+import unittest
+
+
+from webkitpy.common.watchlist.changedlinepattern import ChangedLinePattern
+
+
+class ChangedLinePatternTest(unittest.TestCase):
+
+    # A quick note about the diff file structure.
+    # The first column indicated the old line number.
+    # The second column indicates the new line number.
+    # 0 in either column indicates it had no old or new line number.
+    _DIFF_FILE = ((0, 1, 'hi'),
+                  (1, 0, 'bye'),
+                  (2, 2, 'other'),
+                  (3, 0, 'both'),
+                  (0, 3, 'both'),
+                  )
+
+    def run_changed_line_pattern_match(self, pattern, index_for_zero_value):
+        return ChangedLinePattern(re.compile(pattern), index_for_zero_value).match(None, self._DIFF_FILE)
+
+    def test_added_lines(self):
+        self.assertTrue(self.run_changed_line_pattern_match('hi', 0))
+        self.assertTrue(self.run_changed_line_pattern_match('h.', 0))
+        self.assertTrue(self.run_changed_line_pattern_match('both', 0))
+        self.assertFalse(self.run_changed_line_pattern_match('bye', 0))
+        self.assertFalse(self.run_changed_line_pattern_match('y', 0))
+        self.assertFalse(self.run_changed_line_pattern_match('other', 0))
+
+    def test_removed_lines(self):
+        self.assertFalse(self.run_changed_line_pattern_match('hi', 1))
+        self.assertFalse(self.run_changed_line_pattern_match('h.', 1))
+        self.assertTrue(self.run_changed_line_pattern_match('both', 1))
+        self.assertTrue(self.run_changed_line_pattern_match('bye', 1))
+        self.assertTrue(self.run_changed_line_pattern_match('y', 1))
+        self.assertFalse(self.run_changed_line_pattern_match('other', 1))
diff --git a/Tools/Scripts/webkitpy/common/watchlist/filenamepattern.py b/Tools/Scripts/webkitpy/common/watchlist/filenamepattern.py
new file mode 100644
index 0000000..799eeb4
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/watchlist/filenamepattern.py
@@ -0,0 +1,35 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+class FilenamePattern:
+    def __init__(self, compiled_regex):
+        self._regex = compiled_regex
+
+    def match(self, path, diff_file):
+        return self._regex.match(path)
diff --git a/Tools/Scripts/webkitpy/common/watchlist/filenamepattern_unittest.py b/Tools/Scripts/webkitpy/common/watchlist/filenamepattern_unittest.py
new file mode 100644
index 0000000..0afdf30
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/watchlist/filenamepattern_unittest.py
@@ -0,0 +1,52 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import re
+import unittest
+
+
+from webkitpy.common.watchlist.filenamepattern import FilenamePattern
+
+
+class FileNamePatternTest(unittest.TestCase):
+    def test_filename_pattern_literal(self):
+        filename_pattern = FilenamePattern(re.compile(r'MyFileName\.cpp'))
+
+        # Note the follow filenames are not regex.
+        self.assertTrue(filename_pattern.match('MyFileName.cpp', None))
+        self.assertTrue(filename_pattern.match('MyFileName.cppa', None))
+        self.assertFalse(filename_pattern.match('aMyFileName.cpp', None))
+        self.assertFalse(filename_pattern.match('MyFileNamebcpp', None))
+
+    def test_filename_pattern_substring(self):
+        filename_pattern = FilenamePattern(re.compile(r'.*\\MyFileName\..*'))
+
+        # Note the follow filenames are not regex.
+        self.assertTrue(filename_pattern.match(r'\\MyFileName.cpp', None))
+        self.assertTrue(filename_pattern.match(r'a\\MyFileName.h', None))
+        self.assertFalse(filename_pattern.match(r'\\aMyFileName.cpp', None))
diff --git a/Tools/Scripts/webkitpy/common/watchlist/watchlist.py b/Tools/Scripts/webkitpy/common/watchlist/watchlist.py
new file mode 100644
index 0000000..4a81039
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/watchlist/watchlist.py
@@ -0,0 +1,75 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from webkitpy.common.checkout.diff_parser import DiffParser
+
+
+class WatchList(object):
+    def __init__(self):
+        self.definitions = {}
+        self.cc_rules = set()
+        self.message_rules = set()
+
+    def find_matching_definitions(self, diff):
+        matching_definitions = set()
+        patch_files = DiffParser(diff.splitlines()).files
+
+        for path, diff_file in patch_files.iteritems():
+            for definition in self.definitions:
+                # If a definition has already matched, there is no need to process it.
+                if definition in matching_definitions:
+                    continue
+
+                # See if the definition matches within one file.
+                for pattern in self.definitions[definition]:
+                    if not pattern.match(path, diff_file.lines):
+                        break
+                else:
+                    matching_definitions.add(definition)
+        return matching_definitions
+
+    def _determine_instructions(self, matching_definitions, rules):
+        instructions = set()
+        for rule in rules:
+            if rule.match(matching_definitions):
+                instructions.update(rule.instructions())
+        # Sort the results to make the order deterministic (for consistency and easier testing).
+        return sorted(instructions)
+
+    def determine_cc_list(self, matching_definitions):
+        return self._determine_instructions(matching_definitions, self.cc_rules)
+
+    def determine_messages(self, matching_definitions):
+        return self._determine_instructions(matching_definitions, self.message_rules)
+
+    def determine_cc_and_messages(self, diff):
+        definitions = self.find_matching_definitions(diff)
+        return {
+            'cc_list': self.determine_cc_list(definitions),
+            'messages':  self.determine_messages(definitions),
+        }
diff --git a/Tools/Scripts/webkitpy/common/watchlist/watchlist_mock.py b/Tools/Scripts/webkitpy/common/watchlist/watchlist_mock.py
new file mode 100644
index 0000000..2fd2f88
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/watchlist/watchlist_mock.py
@@ -0,0 +1,35 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#    * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from webkitpy.common.system.deprecated_logging import log
+
+
+class MockWatchList(object):
+    def determine_cc_and_messages(self, diff):
+        log("MockWatchList: determine_cc_and_messages")
+        return {'cc_list': ['abarth@webkit.org', 'eric@webkit.org', 'levin@chromium.org'], 'messages': ['Message1.', 'Message2.'], }
diff --git a/Tools/Scripts/webkitpy/common/watchlist/watchlist_unittest.py b/Tools/Scripts/webkitpy/common/watchlist/watchlist_unittest.py
new file mode 100644
index 0000000..09010b2
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/watchlist/watchlist_unittest.py
@@ -0,0 +1,277 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#    * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+'''Unit tests for watchlist.py.'''
+
+import unittest
+
+from webkitpy.common.checkout.diff_test_data import DIFF_TEST_DATA
+from webkitpy.common.watchlist.watchlistparser import WatchListParser
+
+
+class WatchListTest(unittest.TestCase):
+    def setUp(self):
+        self._watch_list_parser = WatchListParser()
+
+    def test_filename_definition_no_matches(self):
+        watch_list = self._watch_list_parser.parse(
+            '{'
+            '    "DEFINITIONS": {'
+            '        "WatchList1": {'
+            '            "filename": r".*\\MyFileName\\.cpp",'
+            '        },'
+            '     },'
+            '    "CC_RULES": {'
+            '        "WatchList1": ['
+            '            "levin@chromium.org",'
+            '        ],'
+           '    },'
+            '}')
+        self.assertEquals(set([]), watch_list.find_matching_definitions(DIFF_TEST_DATA))
+
+    def test_filename_definition(self):
+        watch_list = self._watch_list_parser.parse(
+            '{'
+            '    "DEFINITIONS": {'
+            '        "WatchList1": {'
+            '            "filename": r"WebCore/rendering/style/StyleFlexibleBoxData\.h",'
+            '        },'
+            '     },'
+            '    "CC_RULES": {'
+            '        "WatchList1": ['
+            '            "levin@chromium.org",'
+            '        ],'
+           '    },'
+            '}')
+        self.assertEquals(set(['WatchList1']), watch_list.find_matching_definitions(DIFF_TEST_DATA))
+
+    def test_cc_rules_simple(self):
+        watch_list = self._watch_list_parser.parse(
+            '{'
+            '    "DEFINITIONS": {'
+            '        "WatchList1": {'
+            '            "filename": r"WebCore/rendering/style/StyleFlexibleBoxData\.h",'
+            '        },'
+            '     },'
+            '    "CC_RULES": {'
+            '        "WatchList1": ['
+            '            "levin@chromium.org",'
+            '        ],'
+           '    },'
+            '}')
+        cc_and_messages = watch_list.determine_cc_and_messages(DIFF_TEST_DATA)
+        self.assertEquals({
+                'cc_list': ['levin@chromium.org'],
+                'messages': [],
+                }, cc_and_messages)
+
+    def test_cc_rules_complex(self):
+        watch_list = self._watch_list_parser.parse(
+            '{'
+            '    "DEFINITIONS": {'
+            '        "WatchList1": {'
+            '            "filename": r"WebCore/rendering/style/StyleFlexibleBoxData\.h",'
+            '        },'
+            '        "WatchList2": {'
+            '            "filename": r"WillNotMatch",'
+            '        },'
+            '        "WatchList3": {'
+            '            "filename": r"WillNotMatch",'
+            '        },'
+            '     },'
+            '    "CC_RULES": {'
+            '        "WatchList2|WatchList1|WatchList3": [ "levin@chromium.org", ],'
+            '    },'
+            '}')
+        cc_and_messages = watch_list.determine_cc_and_messages(DIFF_TEST_DATA)
+        self.assertEquals({
+                'cc_list': ['levin@chromium.org'],
+                'messages': [],
+                }, cc_and_messages)
+
+    def test_cc_and_message_rules_complex(self):
+        watch_list = self._watch_list_parser.parse(
+            '{'
+            '    "DEFINITIONS": {'
+            '        "WatchList1": {'
+            '            "filename": r"WebCore/rendering/style/StyleFlexibleBoxData\.h",'
+            '        },'
+            '        "WatchList2": {'
+            '            "filename": r"WillNotMatch",'
+            '        },'
+            '        "WatchList3": {'
+            '            "filename": r"WillNotMatch",'
+            '        },'
+            '     },'
+            '    "CC_RULES": {'
+            '        "WatchList2|WatchList1|WatchList3": [ "levin@chromium.org", ],'
+            '    },'
+            '    "MESSAGE_RULES": {'
+            '        "WatchList2|WatchList1|WatchList3": [ "msg1", "msg2", ],'
+            '    },'
+            '}')
+        cc_and_messages = watch_list.determine_cc_and_messages(DIFF_TEST_DATA)
+        self.assertEquals({
+                'cc_list': ['levin@chromium.org'],
+                'messages': ['msg1', 'msg2'],
+                }, cc_and_messages)
+
+    def test_cc_and_message_rules_no_matches(self):
+        watch_list = self._watch_list_parser.parse(
+            '{'
+            '    "DEFINITIONS": {'
+            '        "WatchList1": {'
+            '            "filename": r"WebCore/rendering/style/ThisFileDoesNotExist\.h",'
+            '        },'
+            '        "WatchList2": {'
+            '            "filename": r"WillNotMatch",'
+            '        },'
+            '        "WatchList3": {'
+            '            "filename": r"WillNotMatch",'
+            '        },'
+            '     },'
+            '    "CC_RULES": {'
+            '        "WatchList2|WatchList1|WatchList3": [ "levin@chromium.org", ],'
+            '    },'
+            '    "MESSAGE_RULES": {'
+            '        "WatchList2|WatchList1|WatchList3": [ "msg1", "msg2", ],'
+            '    },'
+            '}')
+        cc_and_messages = watch_list.determine_cc_and_messages(DIFF_TEST_DATA)
+        self.assertEquals({
+                'cc_list': [],
+                'messages': [],
+                }, cc_and_messages)
+
+    def test_added_match(self):
+        watch_list = self._watch_list_parser.parse(
+            '{'
+            '    "DEFINITIONS": {'
+            '        "WatchList1": {'
+            '            "in_added_lines": r"RenderStyle::initialBoxOrient",'
+            '        },'
+            '        "WatchList2": {'
+            '            "in_deleted_lines": r"RenderStyle::initialBoxOrient",'
+            '        },'
+            '     },'
+            '    "CC_RULES": {'
+            '        "WatchList1": [ "eric@webkit.org", ],'
+            '        "WatchList2": [ "abarth@webkit.org", ],'
+            '    },'
+            '}')
+        cc_and_messages = watch_list.determine_cc_and_messages(DIFF_TEST_DATA)
+        self.assertEquals({
+                'cc_list': ['eric@webkit.org'],
+                'messages': [],
+                }, cc_and_messages)
+
+    def test_deleted_match(self):
+        watch_list = self._watch_list_parser.parse(
+            '{'
+            '    "DEFINITIONS": {'
+            '        "WatchList1": {'
+            '            "in_added_lines": r"unsigned orient: 1;",'
+            '        },'
+            '        "WatchList2": {'
+            '            "in_deleted_lines": r"unsigned orient: 1;",'
+            '        },'
+            '     },'
+            '    "CC_RULES": {'
+            '        "WatchList1": [ "eric@webkit.org", ],'
+            '        "WatchList2": [ "abarth@webkit.org", ],'
+            '    },'
+            '}')
+        cc_and_messages = watch_list.determine_cc_and_messages(DIFF_TEST_DATA)
+        self.assertEquals({
+                'cc_list': ['abarth@webkit.org'],
+                'messages': [],
+                }, cc_and_messages)
+
+    def test_more_and_less_match(self):
+        watch_list = self._watch_list_parser.parse(
+            '{'
+            '    "DEFINITIONS": {'
+            '        "WatchList1": {'
+            # This pattern is in both added and deleted lines, so no match.
+            '            "more": r"userSelect == o\.userSelect",'
+            '        },'
+            '        "WatchList2": {'
+            '            "more": r"boxOrient\(o\.boxOrient\)",'
+            '        },'
+            '        "WatchList3": {'
+            '            "less": r"unsigned orient"'
+            '        },'
+            '     },'
+            '    "CC_RULES": {'
+            '        "WatchList1": [ "eric@webkit.org", ],'
+            '        "WatchList2": [ "levin@chromium.org", ],'
+            '    },'
+            '    "MESSAGE_RULES": {'
+            '        "WatchList3": ["Test message."],'
+            '    },'
+            '}')
+        cc_and_messages = watch_list.determine_cc_and_messages(DIFF_TEST_DATA)
+        self.assertEquals({
+                'cc_list': ['levin@chromium.org'],
+                'messages': ["Test message."],
+                }, cc_and_messages)
+
+    def test_complex_match(self):
+        watch_list = self._watch_list_parser.parse(
+            '{'
+            '    "DEFINITIONS": {'
+            '        "WatchList1": {'
+            '            "filename": r"WebCore/rendering/style/StyleRareInheritedData\.cpp",'
+            '            "in_added_lines": r"\&\& boxOrient == o\.boxOrient;",'
+            '            "in_deleted_lines": r"\&\& userSelect == o\.userSelect;",'
+            '            "more": r"boxOrient\(o\.boxOrient\)",'
+            '        },'
+            '        "WatchList2": {'
+            '            "filename": r"WebCore/rendering/style/StyleRareInheritedData\.cpp",'
+            '            "in_added_lines": r"RenderStyle::initialBoxOrient",'
+            '            "less": r"userSelect;"'
+            '        },'
+            # WatchList3 won't match because these two patterns aren't in the same file.
+            '        "WatchList3": {'
+            '            "in_added_lines": r"RenderStyle::initialBoxOrient",'
+            '            "in_deleted_lines": r"unsigned orient: 1;",'
+            '        },'
+            '     },'
+            '    "CC_RULES": {'
+            '        "WatchList1": [ "eric@webkit.org", ],'
+            '        "WatchList3": [ "abarth@webkit.org", ],'
+            '    },'
+            '    "MESSAGE_RULES": {'
+            '        "WatchList2": ["This is a test message."],'
+            '    },'
+            '}')
+        cc_and_messages = watch_list.determine_cc_and_messages(DIFF_TEST_DATA)
+        self.assertEquals({
+                'cc_list': ['eric@webkit.org'],
+                'messages': ["This is a test message."],
+                }, cc_and_messages)
diff --git a/Tools/Scripts/webkitpy/common/watchlist/watchlistloader.py b/Tools/Scripts/webkitpy/common/watchlist/watchlistloader.py
new file mode 100644
index 0000000..aa816e3
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/watchlist/watchlistloader.py
@@ -0,0 +1,43 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from webkitpy.common.watchlist.watchlistparser import WatchListParser
+
+
+class WatchListLoader(object):
+    def __init__(self, filesystem):
+        self._filesystem = filesystem
+
+    def load(self):
+        config_path = self._filesystem.dirname(self._filesystem.path_to_module('webkitpy.common.config'))
+        watch_list_full_path = self._filesystem.join(config_path, 'watchlist')
+        if not self._filesystem.exists(watch_list_full_path):
+            raise Exception('Watch list file (%s) not found.' % watch_list_full_path)
+
+        watch_list_contents = self._filesystem.read_text_file(watch_list_full_path)
+        return WatchListParser().parse(watch_list_contents)
diff --git a/Tools/Scripts/webkitpy/common/watchlist/watchlistloader_unittest.py b/Tools/Scripts/webkitpy/common/watchlist/watchlistloader_unittest.py
new file mode 100644
index 0000000..8d3fa98
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/watchlist/watchlistloader_unittest.py
@@ -0,0 +1,45 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#    * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+'''Unit tests for watchlistloader.py.'''
+
+from webkitpy.common import webkitunittest
+from webkitpy.common.system import filesystem_mock
+from webkitpy.common.system import filesystem
+from webkitpy.common.system.outputcapture import OutputCapture
+from webkitpy.common.watchlist.watchlistloader import WatchListLoader
+
+
+class WatchListLoaderTest(webkitunittest.TestCase):
+    def test_watch_list_not_found(self):
+        loader = WatchListLoader(filesystem_mock.MockFileSystem())
+        self.assertRaisesRegexp(Exception, r'Watch list file \(.*/watchlist\) not found\.', loader.load)
+
+    def test_watch_list_load(self):
+        # Test parsing of the checked-in watch list.
+        OutputCapture().assert_outputs(self, WatchListLoader(filesystem.FileSystem()).load, expected_logs="")
diff --git a/Tools/Scripts/webkitpy/common/watchlist/watchlistparser.py b/Tools/Scripts/webkitpy/common/watchlist/watchlistparser.py
new file mode 100644
index 0000000..c72eab3
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/watchlist/watchlistparser.py
@@ -0,0 +1,181 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+import difflib
+import logging
+import re
+
+from webkitpy.common.watchlist.amountchangedpattern import AmountChangedPattern
+from webkitpy.common.watchlist.changedlinepattern import ChangedLinePattern
+from webkitpy.common.watchlist.filenamepattern import FilenamePattern
+from webkitpy.common.watchlist.watchlist import WatchList
+from webkitpy.common.watchlist.watchlistrule import WatchListRule
+from webkitpy.common.config.committers import CommitterList
+
+
+_log = logging.getLogger(__name__)
+
+
+class WatchListParser(object):
+    _DEFINITIONS = 'DEFINITIONS'
+    _CC_RULES = 'CC_RULES'
+    _MESSAGE_RULES = 'MESSAGE_RULES'
+    _INVALID_DEFINITION_NAME_REGEX = r'\|'
+
+    def __init__(self, log_error=None):
+        self._log_error = log_error or _log.error
+        self._section_parsers = {
+            self._DEFINITIONS: self._parse_definition_section,
+            self._CC_RULES: self._parse_cc_rules,
+            self._MESSAGE_RULES: self._parse_message_rules,
+        }
+        self._definition_pattern_parsers = {
+            'filename': FilenamePattern,
+            'in_added_lines': (lambda compiled_regex: ChangedLinePattern(compiled_regex, 0)),
+            'in_deleted_lines': (lambda compiled_regex: ChangedLinePattern(compiled_regex, 1)),
+            'less': (lambda compiled_regex: AmountChangedPattern(compiled_regex, 1)),
+            'more': (lambda compiled_regex: AmountChangedPattern(compiled_regex, 0)),
+        }
+
+    def parse(self, watch_list_contents):
+        watch_list = WatchList()
+
+        # Change the watch list text into a dictionary.
+        dictionary = self._eval_watch_list(watch_list_contents)
+
+        # Parse the top level sections in the watch list.
+        for section in dictionary:
+            parser = self._section_parsers.get(section)
+            if not parser:
+                self._log_error(('Unknown section "%s" in watch list.'
+                                + self._suggest_words(section, self._section_parsers.keys()))
+                               % section)
+                continue
+            parser(dictionary[section], watch_list)
+
+        self._validate(watch_list)
+        return watch_list
+
+    def _eval_watch_list(self, watch_list_contents):
+        return eval(watch_list_contents, {'__builtins__': None}, None)
+
+    def _suggest_words(self, invalid_word, valid_words):
+        close_matches = difflib.get_close_matches(invalid_word, valid_words)
+        if not close_matches:
+            return ''
+        return '\n\nPerhaps it should be %s.' % (' or '.join(close_matches))
+
+    def _parse_definition_section(self, definition_section, watch_list):
+        definitions = {}
+        for name in definition_section:
+            invalid_character = re.search(self._INVALID_DEFINITION_NAME_REGEX, name)
+            if invalid_character:
+                self._log_error('Invalid character "%s" in definition "%s".' % (invalid_character.group(0), name))
+                continue
+
+            definition = definition_section[name]
+            definitions[name] = []
+            for pattern_type in definition:
+                pattern_parser = self._definition_pattern_parsers.get(pattern_type)
+                if not pattern_parser:
+                    self._log_error(('Unknown pattern type "%s" in definition "%s".'
+                                     + self._suggest_words(pattern_type, self._definition_pattern_parsers.keys()))
+                                    % (pattern_type, name))
+                    continue
+
+                try:
+                    compiled_regex = re.compile(definition[pattern_type])
+                except Exception, e:
+                    self._log_error('The regex "%s" is invalid due to "%s".' % (definition[pattern_type], str(e)))
+                    continue
+
+                pattern = pattern_parser(compiled_regex)
+                definitions[name].append(pattern)
+            if not definitions[name]:
+                self._log_error('The definition "%s" has no patterns, so it should be deleted.' % name)
+                continue
+        watch_list.definitions = definitions
+
+    def _parse_rules(self, rules_section):
+        rules = []
+        for complex_definition in rules_section:
+            instructions = rules_section[complex_definition]
+            if not instructions:
+                self._log_error('A rule for definition "%s" is empty, so it should be deleted.' % complex_definition)
+                continue
+            rules.append(WatchListRule(complex_definition, instructions))
+        return rules
+
+    def _parse_cc_rules(self, cc_section, watch_list):
+        watch_list.cc_rules = self._parse_rules(cc_section)
+
+    def _parse_message_rules(self, message_section, watch_list):
+        watch_list.message_rules = self._parse_rules(message_section)
+
+    def _validate(self, watch_list):
+        cc_definitions_set = self._rule_definitions_as_set(watch_list.cc_rules)
+        messages_definitions_set = self._rule_definitions_as_set(watch_list.message_rules)
+        self._verify_all_definitions_are_used(watch_list, cc_definitions_set.union(messages_definitions_set))
+
+        self._validate_definitions(cc_definitions_set, self._CC_RULES, watch_list)
+        self._validate_definitions(messages_definitions_set, self._MESSAGE_RULES, watch_list)
+
+        accounts = CommitterList()
+        for cc_rule in watch_list.cc_rules:
+            # Copy the instructions since we'll be remove items from the original list and
+            # modifying a list while iterating through it leads to undefined behavior.
+            intructions_copy = cc_rule.instructions()[:]
+            for email in intructions_copy:
+                if not accounts.account_by_login(email):
+                    cc_rule.remove_instruction(email)
+                    self._log_error("The email alias %s which is in the watchlist is not listed as a contributor in committers.py" % email)
+                    continue
+
+    def _verify_all_definitions_are_used(self, watch_list, used_definitions):
+        definitions_not_used = set(watch_list.definitions.keys())
+        definitions_not_used.difference_update(used_definitions)
+        if definitions_not_used:
+            self._log_error('The following definitions are not used and should be removed: %s' % (', '.join(definitions_not_used)))
+
+    def _validate_definitions(self, definitions, rules_section_name, watch_list):
+        declared_definitions = watch_list.definitions.keys()
+        definition_set = set(definitions)
+        definition_set.difference_update(declared_definitions)
+
+        if definition_set:
+            suggestions = ''
+            if len(definition_set) == 1:
+                suggestions = self._suggest_words(set().union(definition_set).pop(), declared_definitions)
+            self._log_error('In section "%s", the following definitions are not used and should be removed: %s%s' % (rules_section_name, ', '.join(definition_set), suggestions))
+
+    def _rule_definitions_as_set(self, rules):
+        definition_set = set()
+        for rule in rules:
+            definition_set = definition_set.union(rule.definitions_to_match)
+        return definition_set
diff --git a/Tools/Scripts/webkitpy/common/watchlist/watchlistparser_unittest.py b/Tools/Scripts/webkitpy/common/watchlist/watchlistparser_unittest.py
new file mode 100644
index 0000000..3bd4dc2
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/watchlist/watchlistparser_unittest.py
@@ -0,0 +1,259 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#    * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+'''Unit tests for watchlistparser.py.'''
+
+
+import logging
+import sys
+
+
+from webkitpy.common import webkitunittest
+from webkitpy.common.system.outputcapture import OutputCapture
+from webkitpy.common.watchlist.watchlistparser import WatchListParser
+
+
+class WatchListParserTest(webkitunittest.TestCase):
+    def setUp(self):
+        webkitunittest.TestCase.setUp(self)
+        self._watch_list_parser = WatchListParser()
+
+    def test_bad_section(self):
+        watch_list = ('{"FOO": {}}')
+        OutputCapture().assert_outputs(self, self._watch_list_parser.parse, args=[watch_list],
+                                       expected_logs='Unknown section "FOO" in watch list.\n')
+
+    def test_section_typo(self):
+        watch_list = ('{"DEFINTIONS": {}}')
+        OutputCapture().assert_outputs(self, self._watch_list_parser.parse, args=[watch_list],
+                                       expected_logs='Unknown section "DEFINTIONS" in watch list.'
+                                       + '\n\nPerhaps it should be DEFINITIONS.\n')
+
+    def test_bad_definition(self):
+        watch_list = (
+            '{'
+            '    "DEFINITIONS": {'
+            '        "WatchList1|A": {'
+            '            "filename": r".*\\MyFileName\\.cpp",'
+            '        },'
+            '     },'
+            '}')
+
+        OutputCapture().assert_outputs(self, self._watch_list_parser.parse, args=[watch_list],
+                                       expected_logs='Invalid character "|" in definition "WatchList1|A".\n')
+
+    def test_bad_filename_regex(self):
+        watch_list = (
+            '{'
+            '    "DEFINITIONS": {'
+            '        "WatchList1": {'
+            '            "filename": r"*",'
+            '            "more": r"RefCounted",'
+            '        },'
+            '     },'
+            '    "CC_RULES": {'
+            '        "WatchList1": ["levin@chromium.org"],'
+            '     },'
+            '}')
+
+        OutputCapture().assert_outputs(self, self._watch_list_parser.parse, args=[watch_list],
+                                       expected_logs='The regex "*" is invalid due to "nothing to repeat".\n')
+
+    def test_bad_more_regex(self):
+        watch_list = (
+            '{'
+            '    "DEFINITIONS": {'
+            '        "WatchList1": {'
+            '            "filename": r"aFileName\\.cpp",'
+            '            "more": r"*",'
+            '        },'
+            '     },'
+            '    "CC_RULES": {'
+            '        "WatchList1": ["levin@chromium.org"],'
+            '     },'
+            '}')
+
+        OutputCapture().assert_outputs(self, self._watch_list_parser.parse, args=[watch_list],
+                                       expected_logs='The regex "*" is invalid due to "nothing to repeat".\n')
+
+    def test_bad_match_type(self):
+        watch_list = (
+            '{'
+            '    "DEFINITIONS": {'
+            '        "WatchList1": {'
+            '            "nothing_matches_this": r".*\\MyFileName\\.cpp",'
+            '            "filename": r".*\\MyFileName\\.cpp",'
+            '        },'
+            '     },'
+            '    "CC_RULES": {'
+            '        "WatchList1": ["levin@chromium.org"],'
+            '     },'
+            '}')
+
+        OutputCapture().assert_outputs(self, self._watch_list_parser.parse, args=[watch_list],
+                                       expected_logs='Unknown pattern type "nothing_matches_this" in definition "WatchList1".\n')
+
+    def test_match_type_typo(self):
+        watch_list = (
+            '{'
+            '    "DEFINITIONS": {'
+            '        "WatchList1": {'
+            '            "iflename": r".*\\MyFileName\\.cpp",'
+            '            "more": r"RefCounted",'
+            '        },'
+            '     },'
+            '    "CC_RULES": {'
+            '        "WatchList1": ["levin@chromium.org"],'
+            '     },'
+            '}')
+
+        OutputCapture().assert_outputs(self, self._watch_list_parser.parse, args=[watch_list],
+                                       expected_logs='Unknown pattern type "iflename" in definition "WatchList1".'
+                                       + '\n\nPerhaps it should be filename.\n')
+
+    def test_empty_definition(self):
+        watch_list = (
+            '{'
+            '    "DEFINITIONS": {'
+            '        "WatchList1": {'
+            '        },'
+            '     },'
+            '    "CC_RULES": {'
+            '        "WatchList1": ["levin@chromium.org"],'
+            '     },'
+            '}')
+
+        OutputCapture().assert_outputs(self, self._watch_list_parser.parse, args=[watch_list],
+                                       expected_logs='The definition "WatchList1" has no patterns, so it should be deleted.\n')
+
+    def test_empty_cc_rule(self):
+        watch_list = (
+            '{'
+            '    "DEFINITIONS": {'
+            '        "WatchList1": {'
+            '            "filename": r".*\\MyFileName\\.cpp",'
+            '        },'
+            '     },'
+            '    "CC_RULES": {'
+            '        "WatchList1": [],'
+            '     },'
+            '}')
+
+        OutputCapture().assert_outputs(self, self._watch_list_parser.parse, args=[watch_list],
+                                       expected_logs='A rule for definition "WatchList1" is empty, so it should be deleted.\n'
+                                       + 'The following definitions are not used and should be removed: WatchList1\n')
+
+    def test_cc_rule_with_invalid_email(self):
+        watch_list = (
+            '{'
+            '    "DEFINITIONS": {'
+            '        "WatchList1": {'
+            '            "filename": r".*\\MyFileName\\.cpp",'
+            '        },'
+            '     },'
+            '    "CC_RULES": {'
+            '        "WatchList1": ["levin+bad+email@chromium.org"],'
+            '     },'
+            '}')
+
+        OutputCapture().assert_outputs(self, self._watch_list_parser.parse, args=[watch_list],
+                                       expected_logs='The email alias levin+bad+email@chromium.org which is'
+                                       + ' in the watchlist is not listed as a contributor in committers.py\n')
+
+    def test_empty_message_rule(self):
+        watch_list = (
+            '{'
+            '    "DEFINITIONS": {'
+            '        "WatchList1": {'
+            '            "filename": r".*\\MyFileName\\.cpp",'
+            '        },'
+            '     },'
+            '    "MESSAGE_RULES": {'
+            '        "WatchList1": ['
+            '        ],'
+            '     },'
+            '}')
+
+        OutputCapture().assert_outputs(self, self._watch_list_parser.parse, args=[watch_list],
+                                       expected_logs='A rule for definition "WatchList1" is empty, so it should be deleted.\n'
+                                       + 'The following definitions are not used and should be removed: WatchList1\n')
+
+    def test_unused_defintion(self):
+        watch_list = (
+            '{'
+            '    "DEFINITIONS": {'
+            '        "WatchList1": {'
+            '            "filename": r".*\\MyFileName\\.cpp",'
+            '        },'
+            '     },'
+            '}')
+
+        OutputCapture().assert_outputs(self, self._watch_list_parser.parse, args=[watch_list],
+                                       expected_logs='The following definitions are not used and should be removed: WatchList1\n')
+
+    def test_cc_rule_with_undefined_defintion(self):
+        watch_list = (
+            '{'
+            '    "CC_RULES": {'
+            '        "WatchList1": ["levin@chromium.org"]'
+            '     },'
+            '}')
+
+        OutputCapture().assert_outputs(self, self._watch_list_parser.parse, args=[watch_list],
+                                       expected_logs='In section "CC_RULES", the following definitions are not used and should be removed: WatchList1\n')
+
+    def test_message_rule_with_undefined_defintion(self):
+        watch_list = (
+            '{'
+            '    "MESSAGE_RULES": {'
+            '        "WatchList1": ["The message."]'
+            '     },'
+            '}')
+
+        OutputCapture().assert_outputs(self, self._watch_list_parser.parse, args=[watch_list],
+                                       expected_logs='In section "MESSAGE_RULES", the following definitions are not used and should be removed: WatchList1\n')
+
+    def test_cc_rule_with_undefined_defintion_with_suggestion(self):
+        watch_list = (
+            '{'
+            '    "DEFINITIONS": {'
+            '        "WatchList1": {'
+            '            "filename": r".*\\MyFileName\\.cpp",'
+            '        },'
+            '     },'
+            '    "CC_RULES": {'
+            '        "WatchList": ["levin@chromium.org"]'
+            '     },'
+            '    "MESSAGE_RULES": {'
+            '        "WatchList1": ["levin@chromium.org"]'
+            '     },'
+            '}')
+
+        OutputCapture().assert_outputs(self, self._watch_list_parser.parse, args=[watch_list],
+                                       expected_logs='In section "CC_RULES", the following definitions are not used and should be removed: WatchList'
+                                       + '\n\nPerhaps it should be WatchList1.\n')
diff --git a/Tools/Scripts/webkitpy/common/watchlist/watchlistrule.py b/Tools/Scripts/webkitpy/common/watchlist/watchlistrule.py
new file mode 100644
index 0000000..6987508
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/watchlist/watchlistrule.py
@@ -0,0 +1,46 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+class WatchListRule:
+    '''A rule with instructions to do when the rule is satisified.'''
+    def __init__(self, complex_definition, instructions):
+        self.definitions_to_match = complex_definition.split('|')
+        self._instructions = instructions
+
+    def match(self, matching_definitions):
+        for test_definition in self.definitions_to_match:
+            if test_definition in matching_definitions:
+                return True
+        return False
+
+    def instructions(self):
+        return self._instructions
+
+    def remove_instruction(self, instruction):
+        self._instructions.remove(instruction)
diff --git a/Tools/Scripts/webkitpy/common/watchlist/watchlistrule_unittest.py b/Tools/Scripts/webkitpy/common/watchlist/watchlistrule_unittest.py
new file mode 100644
index 0000000..92aaf34
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/watchlist/watchlistrule_unittest.py
@@ -0,0 +1,62 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+import unittest
+from webkitpy.common.watchlist.watchlistrule import WatchListRule
+
+
+class WatchListRuleTest(unittest.TestCase):
+    def test_instruction_list(self):
+        instructions = ['a', 'b']
+        rule = WatchListRule('definition1', instructions[:])
+        self.assertEqual(instructions, rule.instructions())
+
+    def test_remove_instruction(self):
+        instructions = ['a', 'b']
+        rule = WatchListRule('definition1', instructions[:])
+        rule.remove_instruction('b')
+        self.assertEqual(['a'], rule.instructions())
+
+    def test_simple_definition(self):
+        definition_name = 'definition1'
+        rule = WatchListRule(definition_name, [])
+        self.assertTrue(rule.match([definition_name]))
+        self.assertFalse(rule.match([definition_name + '1']))
+
+    def test_complex_definition(self):
+        definition_name1 = 'definition1'
+        definition_name2 = 'definition2'
+        definition_name3 = 'definition3'
+        rule = WatchListRule(definition_name1 + '|' + definition_name2 + '|' + definition_name3, [])
+        self.assertTrue(rule.match([definition_name1]))
+        self.assertTrue(rule.match([definition_name2]))
+        self.assertTrue(rule.match([definition_name3]))
+        self.assertFalse(rule.match([definition_name1 + '1']))
+        self.assertFalse(rule.match([definition_name2 + '1']))
+        self.assertFalse(rule.match([definition_name3 + '1']))
diff --git a/Tools/Scripts/webkitpy/common/webkitunittest.py b/Tools/Scripts/webkitpy/common/webkitunittest.py
new file mode 100644
index 0000000..7b650a1
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/webkitunittest.py
@@ -0,0 +1,49 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+'''Basic unit test functionality.'''
+
+import re
+import unittest
+
+
+class TestCase(unittest.TestCase):
+    def setUp(self):
+        # For versions of Python before 2.7.
+        if not 'assertRaisesRegexp' in dir(self):
+            self.assertRaisesRegexp = self._assertRaisesRegexp
+
+    def _assertRaisesRegexp(self, expected_exception, regex_message, callable, *args):
+        try:
+            callable(*args)
+            self.assertTrue(False, 'No assert raised.')
+        except Exception, exception:
+            self.assertTrue(issubclass(exception.__class__, expected_exception),
+                            'Exception type was unexpected.')
+            self.assertTrue(re.match(regex_message, exception.__str__()),
+                            'Expected regex "%s"\nGot "%s"' % (regex_message, exception.__str__()))
diff --git a/Tools/Scripts/webkitpy/layout_tests/__init__.py b/Tools/Scripts/webkitpy/layout_tests/__init__.py
new file mode 100644
index 0000000..ef65bee
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/__init__.py
@@ -0,0 +1 @@
+# Required for Python to search this directory for module files
diff --git a/Tools/Scripts/webkitpy/layout_tests/controllers/__init__.py b/Tools/Scripts/webkitpy/layout_tests/controllers/__init__.py
new file mode 100644
index 0000000..ef65bee
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/controllers/__init__.py
@@ -0,0 +1 @@
+# Required for Python to search this directory for module files
diff --git a/Tools/Scripts/webkitpy/layout_tests/controllers/layout_test_finder.py b/Tools/Scripts/webkitpy/layout_tests/controllers/layout_test_finder.py
new file mode 100644
index 0000000..6447c8f
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/controllers/layout_test_finder.py
@@ -0,0 +1,176 @@
+# Copyright (C) 2012 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import errno
+import logging
+import re
+
+from webkitpy.layout_tests.models import test_expectations
+
+
+_log = logging.getLogger(__name__)
+
+
+class LayoutTestFinder(object):
+    def __init__(self, port, options):
+        self._port = port
+        self._options = options
+        self._filesystem = self._port.host.filesystem
+        self.LAYOUT_TESTS_DIRECTORY = 'LayoutTests'
+
+    def find_tests(self, options, args):
+        paths = self._strip_test_dir_prefixes(args)
+        if options.test_list:
+            paths += self._strip_test_dir_prefixes(self._read_test_names_from_file(options.test_list, self._port.TEST_PATH_SEPARATOR))
+        paths = set(paths)
+        test_files = self._port.tests(paths)
+        return (paths, test_files)
+
+    def _strip_test_dir_prefixes(self, paths):
+        return [self._strip_test_dir_prefix(path) for path in paths if path]
+
+    def _strip_test_dir_prefix(self, path):
+        # Handle both "LayoutTests/foo/bar.html" and "LayoutTests\foo\bar.html" if
+        # the filesystem uses '\\' as a directory separator.
+        if path.startswith(self.LAYOUT_TESTS_DIRECTORY + self._port.TEST_PATH_SEPARATOR):
+            return path[len(self.LAYOUT_TESTS_DIRECTORY + self._port.TEST_PATH_SEPARATOR):]
+        if path.startswith(self.LAYOUT_TESTS_DIRECTORY + self._filesystem.sep):
+            return path[len(self.LAYOUT_TESTS_DIRECTORY + self._filesystem.sep):]
+        return path
+
+    def _read_test_names_from_file(self, filenames, test_path_separator):
+        fs = self._filesystem
+        tests = []
+        for filename in filenames:
+            try:
+                if test_path_separator != fs.sep:
+                    filename = filename.replace(test_path_separator, fs.sep)
+                file_contents = fs.read_text_file(filename).split('\n')
+                for line in file_contents:
+                    line = self._strip_comments(line)
+                    if line:
+                        tests.append(line)
+            except IOError, e:
+                if e.errno == errno.ENOENT:
+                    _log.critical('')
+                    _log.critical('--test-list file "%s" not found' % file)
+                raise
+        return tests
+
+    @staticmethod
+    def _strip_comments(line):
+        commentIndex = line.find('//')
+        if commentIndex is -1:
+            commentIndex = len(line)
+
+        line = re.sub(r'\s+', ' ', line[:commentIndex].strip())
+        if line == '':
+            return None
+        else:
+            return line
+
+    def skip_tests(self, paths, all_tests_list, expectations, http_tests):
+        all_tests = set(all_tests_list)
+
+        tests_to_skip = expectations.get_tests_with_result_type(test_expectations.SKIP)
+        if self._options.skip_failing_tests:
+            tests_to_skip.update(expectations.get_tests_with_result_type(test_expectations.FAIL))
+            tests_to_skip.update(expectations.get_tests_with_result_type(test_expectations.FLAKY))
+
+        if self._options.skipped == 'only':
+            tests_to_skip = all_tests - tests_to_skip
+        elif self._options.skipped == 'ignore':
+            tests_to_skip = set()
+        elif self._options.skipped != 'always':
+            # make sure we're explicitly running any tests passed on the command line; equivalent to 'default'.
+            tests_to_skip -= paths
+
+        # unless of course we don't want to run the HTTP tests :)
+        if not self._options.http:
+            tests_to_skip.update(set(http_tests))
+
+        return tests_to_skip
+
+    def split_into_chunks(self, test_names):
+        """split into a list to run and a set to skip, based on --run-chunk and --run-part."""
+        if not self._options.run_chunk and not self._options.run_part:
+            return test_names, set()
+
+        # If the user specifies they just want to run a subset of the tests,
+        # just grab a subset of the non-skipped tests.
+        chunk_value = self._options.run_chunk or self._options.run_part
+        try:
+            (chunk_num, chunk_len) = chunk_value.split(":")
+            chunk_num = int(chunk_num)
+            assert(chunk_num >= 0)
+            test_size = int(chunk_len)
+            assert(test_size > 0)
+        except AssertionError:
+            _log.critical("invalid chunk '%s'" % chunk_value)
+            return (None, None)
+
+        # Get the number of tests
+        num_tests = len(test_names)
+
+        # Get the start offset of the slice.
+        if self._options.run_chunk:
+            chunk_len = test_size
+            # In this case chunk_num can be really large. We need
+            # to make the slave fit in the current number of tests.
+            slice_start = (chunk_num * chunk_len) % num_tests
+        else:
+            # Validate the data.
+            assert(test_size <= num_tests)
+            assert(chunk_num <= test_size)
+
+            # To count the chunk_len, and make sure we don't skip
+            # some tests, we round to the next value that fits exactly
+            # all the parts.
+            rounded_tests = num_tests
+            if rounded_tests % test_size != 0:
+                rounded_tests = (num_tests + test_size - (num_tests % test_size))
+
+            chunk_len = rounded_tests / test_size
+            slice_start = chunk_len * (chunk_num - 1)
+            # It does not mind if we go over test_size.
+
+        # Get the end offset of the slice.
+        slice_end = min(num_tests, slice_start + chunk_len)
+
+        tests_to_run = test_names[slice_start:slice_end]
+
+        _log.debug('chunk slice [%d:%d] of %d is %d tests' % (slice_start, slice_end, num_tests, (slice_end - slice_start)))
+
+        # If we reached the end and we don't have enough tests, we run some
+        # from the beginning.
+        if slice_end - slice_start < chunk_len:
+            extra = chunk_len - (slice_end - slice_start)
+            _log.debug('   last chunk is partial, appending [0:%d]' % extra)
+            tests_to_run.extend(test_names[0:extra])
+
+        return (tests_to_run, set(test_names) - set(tests_to_run))
diff --git a/Tools/Scripts/webkitpy/layout_tests/controllers/layout_test_runner.py b/Tools/Scripts/webkitpy/layout_tests/controllers/layout_test_runner.py
new file mode 100644
index 0000000..17cbe31
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/controllers/layout_test_runner.py
@@ -0,0 +1,631 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import logging
+import math
+import re
+import threading
+import time
+
+from webkitpy.common import message_pool
+from webkitpy.layout_tests.controllers import single_test_runner
+from webkitpy.layout_tests.models import test_expectations
+from webkitpy.layout_tests.models import test_failures
+from webkitpy.layout_tests.models import test_results
+from webkitpy.tool import grammar
+
+
+_log = logging.getLogger(__name__)
+
+
+TestExpectations = test_expectations.TestExpectations
+
+# Export this so callers don't need to know about message pools.
+WorkerException = message_pool.WorkerException
+
+
+class TestRunInterruptedException(Exception):
+    """Raised when a test run should be stopped immediately."""
+    def __init__(self, reason):
+        Exception.__init__(self)
+        self.reason = reason
+        self.msg = reason
+
+    def __reduce__(self):
+        return self.__class__, (self.reason,)
+
+
+class LayoutTestRunner(object):
+    def __init__(self, options, port, printer, results_directory, expectations, test_is_slow_fn):
+        self._options = options
+        self._port = port
+        self._printer = printer
+        self._results_directory = results_directory
+        self._expectations = None
+        self._test_is_slow = test_is_slow_fn
+        self._sharder = Sharder(self._port.split_test, self._port.TEST_PATH_SEPARATOR, self._options.max_locked_shards)
+
+        self._current_result_summary = None
+        self._needs_http = None
+        self._needs_websockets = None
+        self._retrying = False
+        self._test_files_list = []
+        self._all_results = []
+        self._group_stats = {}
+        self._worker_stats = {}
+        self._filesystem = self._port.host.filesystem
+
+    def test_key(self, test_name):
+        return self._sharder.test_key(test_name)
+
+    def run_tests(self, test_inputs, expectations, result_summary, num_workers, needs_http, needs_websockets, retrying):
+        """Returns a tuple of (interrupted, keyboard_interrupted, thread_timings, test_timings, individual_test_timings):
+            interrupted is whether the run was interrupted
+            keyboard_interrupted is whether the interruption was because someone typed Ctrl^C
+            thread_timings is a list of dicts with the total runtime
+                of each thread with 'name', 'num_tests', 'total_time' properties
+            test_timings is a list of timings for each sharded subdirectory
+                of the form [time, directory_name, num_tests]
+            individual_test_timings is a list of run times for each test
+                in the form {filename:filename, test_run_time:test_run_time}
+            result_summary: summary object to populate with the results
+        """
+        self._current_result_summary = result_summary
+        self._expectations = expectations
+        self._needs_http = needs_http
+        self._needs_websockets = needs_websockets
+        self._retrying = retrying
+        self._test_files_list = [test_input.test_name for test_input in test_inputs]
+        self._printer.num_tests = len(self._test_files_list)
+        self._printer.num_completed = 0
+
+        self._all_results = []
+        self._group_stats = {}
+        self._worker_stats = {}
+        self._has_http_lock = False
+        self._remaining_locked_shards = []
+
+        keyboard_interrupted = False
+        interrupted = False
+
+        self._printer.write_update('Sharding tests ...')
+        locked_shards, unlocked_shards = self._sharder.shard_tests(test_inputs, int(self._options.child_processes), self._options.fully_parallel)
+
+        # FIXME: We don't have a good way to coordinate the workers so that
+        # they don't try to run the shards that need a lock if we don't actually
+        # have the lock. The easiest solution at the moment is to grab the
+        # lock at the beginning of the run, and then run all of the locked
+        # shards first. This minimizes the time spent holding the lock, but
+        # means that we won't be running tests while we're waiting for the lock.
+        # If this becomes a problem in practice we'll need to change this.
+
+        all_shards = locked_shards + unlocked_shards
+        self._remaining_locked_shards = locked_shards
+        if self._port.requires_http_server() or (locked_shards and self._options.http):
+            self.start_servers_with_lock(2 * min(num_workers, len(locked_shards)))
+
+        num_workers = min(num_workers, len(all_shards))
+        self._printer.print_workers_and_shards(num_workers, len(all_shards), len(locked_shards))
+
+        if self._options.dry_run:
+            return (keyboard_interrupted, interrupted, self._worker_stats.values(), self._group_stats, self._all_results)
+
+        self._printer.write_update('Starting %s ...' % grammar.pluralize('worker', num_workers))
+
+        try:
+            with message_pool.get(self, self._worker_factory, num_workers, self._port.worker_startup_delay_secs(), self._port.host) as pool:
+                pool.run(('test_list', shard.name, shard.test_inputs) for shard in all_shards)
+        except KeyboardInterrupt:
+            self._printer.flush()
+            self._printer.writeln('Interrupted, exiting ...')
+            keyboard_interrupted = True
+        except TestRunInterruptedException, e:
+            _log.warning(e.reason)
+            interrupted = True
+        except Exception, e:
+            _log.debug('%s("%s") raised, exiting' % (e.__class__.__name__, str(e)))
+            raise
+        finally:
+            self.stop_servers_with_lock()
+
+        # FIXME: should this be a class instead of a tuple?
+        return (interrupted, keyboard_interrupted, self._worker_stats.values(), self._group_stats, self._all_results)
+
+    def _worker_factory(self, worker_connection):
+        results_directory = self._results_directory
+        if self._retrying:
+            self._filesystem.maybe_make_directory(self._filesystem.join(self._results_directory, 'retries'))
+            results_directory = self._filesystem.join(self._results_directory, 'retries')
+        return Worker(worker_connection, results_directory, self._options)
+
+    def _mark_interrupted_tests_as_skipped(self, result_summary):
+        for test_name in self._test_files_list:
+            if test_name not in result_summary.results:
+                result = test_results.TestResult(test_name, [test_failures.FailureEarlyExit()])
+                # FIXME: We probably need to loop here if there are multiple iterations.
+                # FIXME: Also, these results are really neither expected nor unexpected. We probably
+                # need a third type of result.
+                result_summary.add(result, expected=False, test_is_slow=self._test_is_slow(test_name))
+
+    def _interrupt_if_at_failure_limits(self, result_summary):
+        # Note: The messages in this method are constructed to match old-run-webkit-tests
+        # so that existing buildbot grep rules work.
+        def interrupt_if_at_failure_limit(limit, failure_count, result_summary, message):
+            if limit and failure_count >= limit:
+                message += " %d tests run." % (result_summary.expected + result_summary.unexpected)
+                self._mark_interrupted_tests_as_skipped(result_summary)
+                raise TestRunInterruptedException(message)
+
+        interrupt_if_at_failure_limit(
+            self._options.exit_after_n_failures,
+            result_summary.unexpected_failures,
+            result_summary,
+            "Exiting early after %d failures." % result_summary.unexpected_failures)
+        interrupt_if_at_failure_limit(
+            self._options.exit_after_n_crashes_or_timeouts,
+            result_summary.unexpected_crashes + result_summary.unexpected_timeouts,
+            result_summary,
+            # This differs from ORWT because it does not include WebProcess crashes.
+            "Exiting early after %d crashes and %d timeouts." % (result_summary.unexpected_crashes, result_summary.unexpected_timeouts))
+
+    def _update_summary_with_result(self, result_summary, result):
+        if result.type == test_expectations.SKIP:
+            exp_str = got_str = 'SKIP'
+            expected = True
+        else:
+            expected = self._expectations.matches_an_expected_result(result.test_name, result.type, self._options.pixel_tests or result.reftest_type)
+            exp_str = self._expectations.get_expectations_string(result.test_name)
+            got_str = self._expectations.expectation_to_string(result.type)
+
+        result_summary.add(result, expected, self._test_is_slow(result.test_name))
+
+        self._printer.print_finished_test(result, expected, exp_str, got_str)
+
+        self._interrupt_if_at_failure_limits(result_summary)
+
+    def start_servers_with_lock(self, number_of_servers):
+        self._printer.write_update('Acquiring http lock ...')
+        self._port.acquire_http_lock()
+        if self._needs_http:
+            self._printer.write_update('Starting HTTP server ...')
+            self._port.start_http_server(number_of_servers=number_of_servers)
+        if self._needs_websockets:
+            self._printer.write_update('Starting WebSocket server ...')
+            self._port.start_websocket_server()
+        self._has_http_lock = True
+
+    def stop_servers_with_lock(self):
+        if self._has_http_lock:
+            if self._needs_http:
+                self._printer.write_update('Stopping HTTP server ...')
+                self._port.stop_http_server()
+            if self._needs_websockets:
+                self._printer.write_update('Stopping WebSocket server ...')
+                self._port.stop_websocket_server()
+            self._printer.write_update('Releasing server lock ...')
+            self._port.release_http_lock()
+            self._has_http_lock = False
+
+    def handle(self, name, source, *args):
+        method = getattr(self, '_handle_' + name)
+        if method:
+            return method(source, *args)
+        raise AssertionError('unknown message %s received from %s, args=%s' % (name, source, repr(args)))
+
+    def _handle_started_test(self, worker_name, test_input, test_timeout_sec):
+        self._printer.print_started_test(test_input.test_name)
+
+    def _handle_finished_test_list(self, worker_name, list_name, num_tests, elapsed_time):
+        self._group_stats[list_name] = (num_tests, elapsed_time)
+
+        def find(name, test_lists):
+            for i in range(len(test_lists)):
+                if test_lists[i].name == name:
+                    return i
+            return -1
+
+        index = find(list_name, self._remaining_locked_shards)
+        if index >= 0:
+            self._remaining_locked_shards.pop(index)
+            if not self._remaining_locked_shards and not self._port.requires_http_server():
+                self.stop_servers_with_lock()
+
+    def _handle_finished_test(self, worker_name, result, elapsed_time, log_messages=[]):
+        self._worker_stats.setdefault(worker_name, {'name': worker_name, 'num_tests': 0, 'total_time': 0})
+        self._worker_stats[worker_name]['total_time'] += elapsed_time
+        self._worker_stats[worker_name]['num_tests'] += 1
+        self._all_results.append(result)
+        self._update_summary_with_result(self._current_result_summary, result)
+
+
+class Worker(object):
+    def __init__(self, caller, results_directory, options):
+        self._caller = caller
+        self._worker_number = caller.worker_number
+        self._name = caller.name
+        self._results_directory = results_directory
+        self._options = options
+
+        # The remaining fields are initialized in start()
+        self._host = None
+        self._port = None
+        self._batch_size = None
+        self._batch_count = None
+        self._filesystem = None
+        self._driver = None
+        self._tests_run_file = None
+        self._tests_run_filename = None
+
+    def __del__(self):
+        self.stop()
+
+    def start(self):
+        """This method is called when the object is starting to be used and it is safe
+        for the object to create state that does not need to be pickled (usually this means
+        it is called in a child process)."""
+        self._host = self._caller.host
+        self._filesystem = self._host.filesystem
+        self._port = self._host.port_factory.get(self._options.platform, self._options)
+
+        self._batch_count = 0
+        self._batch_size = self._options.batch_size or 0
+        tests_run_filename = self._filesystem.join(self._results_directory, "tests_run%d.txt" % self._worker_number)
+        self._tests_run_file = self._filesystem.open_text_file_for_writing(tests_run_filename)
+
+    def handle(self, name, source, test_list_name, test_inputs):
+        assert name == 'test_list'
+        start_time = time.time()
+        for test_input in test_inputs:
+            self._run_test(test_input)
+        elapsed_time = time.time() - start_time
+        self._caller.post('finished_test_list', test_list_name, len(test_inputs), elapsed_time)
+
+    def _update_test_input(self, test_input):
+        if test_input.reference_files is None:
+            # Lazy initialization.
+            test_input.reference_files = self._port.reference_files(test_input.test_name)
+        if test_input.reference_files:
+            test_input.should_run_pixel_test = True
+        else:
+            test_input.should_run_pixel_test = self._port.should_run_as_pixel_test(test_input)
+
+    def _run_test(self, test_input):
+        self._batch_count += 1
+
+        stop_when_done = False
+        if self._batch_size > 0 and self._batch_count >= self._batch_size:
+            self._batch_count = 0
+            stop_when_done = True
+
+        self._update_test_input(test_input)
+        test_timeout_sec = self._timeout(test_input)
+        start = time.time()
+        self._caller.post('started_test', test_input, test_timeout_sec)
+
+        result = self._run_test_with_timeout(test_input, test_timeout_sec, stop_when_done)
+
+        elapsed_time = time.time() - start
+        self._caller.post('finished_test', result, elapsed_time)
+
+        self._clean_up_after_test(test_input, result)
+
+    def stop(self):
+        _log.debug("%s cleaning up" % self._name)
+        self._kill_driver()
+        if self._tests_run_file:
+            self._tests_run_file.close()
+            self._tests_run_file = None
+
+    def _timeout(self, test_input):
+        """Compute the appropriate timeout value for a test."""
+        # The DumpRenderTree watchdog uses 2.5x the timeout; we want to be
+        # larger than that. We also add a little more padding if we're
+        # running tests in a separate thread.
+        #
+        # Note that we need to convert the test timeout from a
+        # string value in milliseconds to a float for Python.
+        driver_timeout_sec = 3.0 * float(test_input.timeout) / 1000.0
+        if not self._options.run_singly:
+            return driver_timeout_sec
+
+        thread_padding_sec = 1.0
+        thread_timeout_sec = driver_timeout_sec + thread_padding_sec
+        return thread_timeout_sec
+
+    def _kill_driver(self):
+        # Be careful about how and when we kill the driver; if driver.stop()
+        # raises an exception, this routine may get re-entered via __del__.
+        driver = self._driver
+        self._driver = None
+        if driver:
+            _log.debug("%s killing driver" % self._name)
+            driver.stop()
+
+    def _run_test_with_timeout(self, test_input, timeout, stop_when_done):
+        if self._options.run_singly:
+            return self._run_test_in_another_thread(test_input, timeout, stop_when_done)
+        return self._run_test_in_this_thread(test_input, stop_when_done)
+
+    def _clean_up_after_test(self, test_input, result):
+        test_name = test_input.test_name
+        self._tests_run_file.write(test_name + "\n")
+
+        if result.failures:
+            # Check and kill DumpRenderTree if we need to.
+            if any([f.driver_needs_restart() for f in result.failures]):
+                self._kill_driver()
+                # Reset the batch count since the shell just bounced.
+                self._batch_count = 0
+
+            # Print the error message(s).
+            _log.debug("%s %s failed:" % (self._name, test_name))
+            for f in result.failures:
+                _log.debug("%s  %s" % (self._name, f.message()))
+        elif result.type == test_expectations.SKIP:
+            _log.debug("%s %s skipped" % (self._name, test_name))
+        else:
+            _log.debug("%s %s passed" % (self._name, test_name))
+
+    def _run_test_in_another_thread(self, test_input, thread_timeout_sec, stop_when_done):
+        """Run a test in a separate thread, enforcing a hard time limit.
+
+        Since we can only detect the termination of a thread, not any internal
+        state or progress, we can only run per-test timeouts when running test
+        files singly.
+
+        Args:
+          test_input: Object containing the test filename and timeout
+          thread_timeout_sec: time to wait before killing the driver process.
+        Returns:
+          A TestResult
+        """
+        worker = self
+
+        driver = self._port.create_driver(self._worker_number)
+
+        class SingleTestThread(threading.Thread):
+            def __init__(self):
+                threading.Thread.__init__(self)
+                self.result = None
+
+            def run(self):
+                self.result = worker._run_single_test(driver, test_input, stop_when_done)
+
+        thread = SingleTestThread()
+        thread.start()
+        thread.join(thread_timeout_sec)
+        result = thread.result
+        failures = []
+        if thread.isAlive():
+            # If join() returned with the thread still running, the
+            # DumpRenderTree is completely hung and there's nothing
+            # more we can do with it.  We have to kill all the
+            # DumpRenderTrees to free it up. If we're running more than
+            # one DumpRenderTree thread, we'll end up killing the other
+            # DumpRenderTrees too, introducing spurious crashes. We accept
+            # that tradeoff in order to avoid losing the rest of this
+            # thread's results.
+            _log.error('Test thread hung: killing all DumpRenderTrees')
+            failures = [test_failures.FailureTimeout()]
+
+        driver.stop()
+
+        if not result:
+            result = test_results.TestResult(test_input.test_name, failures=failures, test_run_time=0)
+        return result
+
+    def _run_test_in_this_thread(self, test_input, stop_when_done):
+        """Run a single test file using a shared DumpRenderTree process.
+
+        Args:
+          test_input: Object containing the test filename, uri and timeout
+
+        Returns: a TestResult object.
+        """
+        if self._driver and self._driver.has_crashed():
+            self._kill_driver()
+        if not self._driver:
+            self._driver = self._port.create_driver(self._worker_number)
+        return self._run_single_test(self._driver, test_input, stop_when_done)
+
+    def _run_single_test(self, driver, test_input, stop_when_done):
+        return single_test_runner.run_single_test(self._port, self._options,
+            test_input, driver, self._name, stop_when_done)
+
+
+class TestShard(object):
+    """A test shard is a named list of TestInputs."""
+
+    def __init__(self, name, test_inputs):
+        self.name = name
+        self.test_inputs = test_inputs
+        self.requires_lock = test_inputs[0].requires_lock
+
+    def __repr__(self):
+        return "TestShard(name='%s', test_inputs=%s, requires_lock=%s'" % (self.name, self.test_inputs, self.requires_lock)
+
+    def __eq__(self, other):
+        return self.name == other.name and self.test_inputs == other.test_inputs
+
+
+class Sharder(object):
+    def __init__(self, test_split_fn, test_path_separator, max_locked_shards):
+        self._split = test_split_fn
+        self._sep = test_path_separator
+        self._max_locked_shards = max_locked_shards
+
+    def shard_tests(self, test_inputs, num_workers, fully_parallel):
+        """Groups tests into batches.
+        This helps ensure that tests that depend on each other (aka bad tests!)
+        continue to run together as most cross-tests dependencies tend to
+        occur within the same directory.
+        Return:
+            Two list of TestShards. The first contains tests that must only be
+            run under the server lock, the second can be run whenever.
+        """
+
+        # FIXME: Move all of the sharding logic out of manager into its
+        # own class or module. Consider grouping it with the chunking logic
+        # in prepare_lists as well.
+        if num_workers == 1:
+            return self._shard_in_two(test_inputs)
+        elif fully_parallel:
+            return self._shard_every_file(test_inputs)
+        return self._shard_by_directory(test_inputs, num_workers)
+
+    def _shard_in_two(self, test_inputs):
+        """Returns two lists of shards, one with all the tests requiring a lock and one with the rest.
+
+        This is used when there's only one worker, to minimize the per-shard overhead."""
+        locked_inputs = []
+        unlocked_inputs = []
+        for test_input in test_inputs:
+            if test_input.requires_lock:
+                locked_inputs.append(test_input)
+            else:
+                unlocked_inputs.append(test_input)
+
+        locked_shards = []
+        unlocked_shards = []
+        if locked_inputs:
+            locked_shards = [TestShard('locked_tests', locked_inputs)]
+        if unlocked_inputs:
+            unlocked_shards = [TestShard('unlocked_tests', unlocked_inputs)]
+
+        return locked_shards, unlocked_shards
+
+    def _shard_every_file(self, test_inputs):
+        """Returns two lists of shards, each shard containing a single test file.
+
+        This mode gets maximal parallelism at the cost of much higher flakiness."""
+        locked_shards = []
+        unlocked_shards = []
+        for test_input in test_inputs:
+            # Note that we use a '.' for the shard name; the name doesn't really
+            # matter, and the only other meaningful value would be the filename,
+            # which would be really redundant.
+            if test_input.requires_lock:
+                locked_shards.append(TestShard('.', [test_input]))
+            else:
+                unlocked_shards.append(TestShard('.', [test_input]))
+
+        return locked_shards, unlocked_shards
+
+    def _shard_by_directory(self, test_inputs, num_workers):
+        """Returns two lists of shards, each shard containing all the files in a directory.
+
+        This is the default mode, and gets as much parallelism as we can while
+        minimizing flakiness caused by inter-test dependencies."""
+        locked_shards = []
+        unlocked_shards = []
+        tests_by_dir = {}
+        # FIXME: Given that the tests are already sorted by directory,
+        # we can probably rewrite this to be clearer and faster.
+        for test_input in test_inputs:
+            directory = self._split(test_input.test_name)[0]
+            tests_by_dir.setdefault(directory, [])
+            tests_by_dir[directory].append(test_input)
+
+        for directory, test_inputs in tests_by_dir.iteritems():
+            shard = TestShard(directory, test_inputs)
+            if test_inputs[0].requires_lock:
+                locked_shards.append(shard)
+            else:
+                unlocked_shards.append(shard)
+
+        # Sort the shards by directory name.
+        locked_shards.sort(key=lambda shard: shard.name)
+        unlocked_shards.sort(key=lambda shard: shard.name)
+
+        # Put a ceiling on the number of locked shards, so that we
+        # don't hammer the servers too badly.
+
+        # FIXME: For now, limit to one shard or set it
+        # with the --max-locked-shards. After testing to make sure we
+        # can handle multiple shards, we should probably do something like
+        # limit this to no more than a quarter of all workers, e.g.:
+        # return max(math.ceil(num_workers / 4.0), 1)
+        return (self._resize_shards(locked_shards, self._max_locked_shards, 'locked_shard'),
+                unlocked_shards)
+
+    def _resize_shards(self, old_shards, max_new_shards, shard_name_prefix):
+        """Takes a list of shards and redistributes the tests into no more
+        than |max_new_shards| new shards."""
+
+        # This implementation assumes that each input shard only contains tests from a
+        # single directory, and that tests in each shard must remain together; as a
+        # result, a given input shard is never split between output shards.
+        #
+        # Each output shard contains the tests from one or more input shards and
+        # hence may contain tests from multiple directories.
+
+        def divide_and_round_up(numerator, divisor):
+            return int(math.ceil(float(numerator) / divisor))
+
+        def extract_and_flatten(shards):
+            test_inputs = []
+            for shard in shards:
+                test_inputs.extend(shard.test_inputs)
+            return test_inputs
+
+        def split_at(seq, index):
+            return (seq[:index], seq[index:])
+
+        num_old_per_new = divide_and_round_up(len(old_shards), max_new_shards)
+        new_shards = []
+        remaining_shards = old_shards
+        while remaining_shards:
+            some_shards, remaining_shards = split_at(remaining_shards, num_old_per_new)
+            new_shards.append(TestShard('%s_%d' % (shard_name_prefix, len(new_shards) + 1), extract_and_flatten(some_shards)))
+        return new_shards
+
+    def test_key(self, test_name):
+        """Turns a test name into a list with two sublists, the natural key of the
+        dirname, and the natural key of the basename.
+
+        This can be used when sorting paths so that files in a directory.
+        directory are kept together rather than being mixed in with files in
+        subdirectories."""
+        dirname, basename = self._split(test_name)
+        return (self.natural_sort_key(dirname + self._sep), self.natural_sort_key(basename))
+
+    @staticmethod
+    def natural_sort_key(string_to_split):
+        """ Turns a string into a list of string and number chunks, i.e. "z23a" -> ["z", 23, "a"]
+
+        This can be used to implement "natural sort" order. See:
+        http://www.codinghorror.com/blog/2007/12/sorting-for-humans-natural-sort-order.html
+        http://nedbatchelder.com/blog/200712.html#e20071211T054956
+        """
+        def tryint(val):
+            try:
+                return int(val)
+            except ValueError:
+                return val
+
+        return [tryint(chunk) for chunk in re.split('(\d+)', string_to_split)]
diff --git a/Tools/Scripts/webkitpy/layout_tests/controllers/layout_test_runner_unittest.py b/Tools/Scripts/webkitpy/layout_tests/controllers/layout_test_runner_unittest.py
new file mode 100644
index 0000000..4efa4e0
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/controllers/layout_test_runner_unittest.py
@@ -0,0 +1,382 @@
+#!/usr/bin/python
+# Copyright (C) 2012 Google Inc. All rights reserved.
+# Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Szeged
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from webkitpy.common.host_mock import MockHost
+from webkitpy.layout_tests import run_webkit_tests
+from webkitpy.layout_tests.models import test_expectations
+from webkitpy.layout_tests.models import test_failures
+from webkitpy.layout_tests.models.result_summary import ResultSummary
+from webkitpy.layout_tests.models.test_input import TestInput
+from webkitpy.layout_tests.models.test_results import TestResult
+from webkitpy.layout_tests.controllers.layout_test_runner import LayoutTestRunner, Sharder, TestRunInterruptedException
+
+
+TestExpectations = test_expectations.TestExpectations
+
+
+class FakePrinter(object):
+    num_completed = 0
+    num_tests = 0
+
+    def print_workers_and_shards(self, num_workers, num_shards, num_locked_shards):
+        pass
+
+    def print_started_test(self, test_name):
+        pass
+
+    def print_finished_test(self, result, expected, exp_str, got_str):
+        pass
+
+    def write(self, msg):
+        pass
+
+    def write_update(self, msg):
+        pass
+
+    def flush(self):
+        pass
+
+
+class LockCheckingRunner(LayoutTestRunner):
+    def __init__(self, port, options, printer, tester, http_lock):
+        super(LockCheckingRunner, self).__init__(options, port, printer, port.results_directory(), TestExpectations(port, []), lambda test_name: False)
+        self._finished_list_called = False
+        self._tester = tester
+        self._should_have_http_lock = http_lock
+
+    def handle_finished_list(self, source, list_name, num_tests, elapsed_time):
+        if not self._finished_list_called:
+            self._tester.assertEquals(list_name, 'locked_tests')
+            self._tester.assertTrue(self._remaining_locked_shards)
+            self._tester.assertTrue(self._has_http_lock is self._should_have_http_lock)
+
+        super(LockCheckingRunner, self).handle_finished_list(source, list_name, num_tests, elapsed_time)
+
+        if not self._finished_list_called:
+            self._tester.assertEquals(self._remaining_locked_shards, [])
+            self._tester.assertFalse(self._has_http_lock)
+            self._finished_list_called = True
+
+
+class LayoutTestRunnerTests(unittest.TestCase):
+    def _runner(self, port=None):
+        # FIXME: we shouldn't have to use run_webkit_tests.py to get the options we need.
+        options = run_webkit_tests.parse_args(['--platform', 'test-mac-snowleopard'])[0]
+        options.child_processes = '1'
+
+        host = MockHost()
+        port = port or host.port_factory.get(options.platform, options=options)
+        return LockCheckingRunner(port, options, FakePrinter(), self, True)
+
+    def _result_summary(self, runner, tests):
+        return ResultSummary(TestExpectations(runner._port, tests), tests, 1, set())
+
+    def _run_tests(self, runner, tests):
+        test_inputs = [TestInput(test, 6000) for test in tests]
+        expectations = TestExpectations(runner._port, tests)
+        runner.run_tests(test_inputs, expectations, self._result_summary(runner, tests),
+            num_workers=1, needs_http=any('http' in test for test in tests), needs_websockets=any(['websocket' in test for test in tests]), retrying=False)
+
+    def test_http_locking(self):
+        runner = self._runner()
+        self._run_tests(runner, ['http/tests/passes/text.html', 'passes/text.html'])
+
+    def test_perf_locking(self):
+        runner = self._runner()
+        self._run_tests(runner, ['http/tests/passes/text.html', 'perf/foo/test.html'])
+
+    def test_interrupt_if_at_failure_limits(self):
+        runner = self._runner()
+        runner._options.exit_after_n_failures = None
+        runner._options.exit_after_n_crashes_or_times = None
+        test_names = ['passes/text.html', 'passes/image.html']
+        runner._test_files_list = test_names
+
+        result_summary = self._result_summary(runner, test_names)
+        result_summary.unexpected_failures = 100
+        result_summary.unexpected_crashes = 50
+        result_summary.unexpected_timeouts = 50
+        # No exception when the exit_after* options are None.
+        runner._interrupt_if_at_failure_limits(result_summary)
+
+        # No exception when we haven't hit the limit yet.
+        runner._options.exit_after_n_failures = 101
+        runner._options.exit_after_n_crashes_or_timeouts = 101
+        runner._interrupt_if_at_failure_limits(result_summary)
+
+        # Interrupt if we've exceeded either limit:
+        runner._options.exit_after_n_crashes_or_timeouts = 10
+        self.assertRaises(TestRunInterruptedException, runner._interrupt_if_at_failure_limits, result_summary)
+        self.assertEquals(result_summary.results['passes/text.html'].type, test_expectations.SKIP)
+        self.assertEquals(result_summary.results['passes/image.html'].type, test_expectations.SKIP)
+
+        runner._options.exit_after_n_crashes_or_timeouts = None
+        runner._options.exit_after_n_failures = 10
+        exception = self.assertRaises(TestRunInterruptedException, runner._interrupt_if_at_failure_limits, result_summary)
+
+    def test_update_summary_with_result(self):
+        # Reftests expected to be image mismatch should be respected when pixel_tests=False.
+        runner = self._runner()
+        runner._options.pixel_tests = False
+        test = 'failures/expected/reftest.html'
+        expectations = TestExpectations(runner._port, tests=[test])
+        runner._expectations = expectations
+
+        result_summary = ResultSummary(expectations, [test], 1, set())
+        result = TestResult(test_name=test, failures=[test_failures.FailureReftestMismatchDidNotOccur()], reftest_type=['!='])
+        runner._update_summary_with_result(result_summary, result)
+        self.assertEquals(1, result_summary.expected)
+        self.assertEquals(0, result_summary.unexpected)
+
+        result_summary = ResultSummary(expectations, [test], 1, set())
+        result = TestResult(test_name=test, failures=[], reftest_type=['=='])
+        runner._update_summary_with_result(result_summary, result)
+        self.assertEquals(0, result_summary.expected)
+        self.assertEquals(1, result_summary.unexpected)
+
+    def test_servers_started(self):
+
+        def start_http_server(number_of_servers=None):
+            self.http_started = True
+
+        def start_websocket_server():
+            self.websocket_started = True
+
+        def stop_http_server():
+            self.http_stopped = True
+
+        def stop_websocket_server():
+            self.websocket_stopped = True
+
+        host = MockHost()
+        port = host.port_factory.get('test-mac-leopard')
+        port.start_http_server = start_http_server
+        port.start_websocket_server = start_websocket_server
+        port.stop_http_server = stop_http_server
+        port.stop_websocket_server = stop_websocket_server
+
+        self.http_started = self.http_stopped = self.websocket_started = self.websocket_stopped = False
+        runner = self._runner(port=port)
+        runner._needs_http = True
+        runner._needs_websockets = False
+        runner.start_servers_with_lock(number_of_servers=4)
+        self.assertEquals(self.http_started, True)
+        self.assertEquals(self.websocket_started, False)
+        runner.stop_servers_with_lock()
+        self.assertEquals(self.http_stopped, True)
+        self.assertEquals(self.websocket_stopped, False)
+
+        self.http_started = self.http_stopped = self.websocket_started = self.websocket_stopped = False
+        runner._needs_http = True
+        runner._needs_websockets = True
+        runner.start_servers_with_lock(number_of_servers=4)
+        self.assertEquals(self.http_started, True)
+        self.assertEquals(self.websocket_started, True)
+        runner.stop_servers_with_lock()
+        self.assertEquals(self.http_stopped, True)
+        self.assertEquals(self.websocket_stopped, True)
+
+        self.http_started = self.http_stopped = self.websocket_started = self.websocket_stopped = False
+        runner._needs_http = False
+        runner._needs_websockets = False
+        runner.start_servers_with_lock(number_of_servers=4)
+        self.assertEquals(self.http_started, False)
+        self.assertEquals(self.websocket_started, False)
+        runner.stop_servers_with_lock()
+        self.assertEquals(self.http_stopped, False)
+        self.assertEquals(self.websocket_stopped, False)
+
+
+class SharderTests(unittest.TestCase):
+
+    test_list = [
+        "http/tests/websocket/tests/unicode.htm",
+        "animations/keyframes.html",
+        "http/tests/security/view-source-no-refresh.html",
+        "http/tests/websocket/tests/websocket-protocol-ignored.html",
+        "fast/css/display-none-inline-style-change-crash.html",
+        "http/tests/xmlhttprequest/supported-xml-content-types.html",
+        "dom/html/level2/html/HTMLAnchorElement03.html",
+        "ietestcenter/Javascript/11.1.5_4-4-c-1.html",
+        "dom/html/level2/html/HTMLAnchorElement06.html",
+        "perf/object-keys.html",
+    ]
+
+    def get_test_input(self, test_file):
+        return TestInput(test_file, requires_lock=(test_file.startswith('http') or test_file.startswith('perf')))
+
+    def get_shards(self, num_workers, fully_parallel, test_list=None, max_locked_shards=1):
+        def split(test_name):
+            idx = test_name.rfind('/')
+            if idx != -1:
+                return (test_name[0:idx], test_name[idx + 1:])
+
+        self.sharder = Sharder(split, '/', max_locked_shards)
+        test_list = test_list or self.test_list
+        return self.sharder.shard_tests([self.get_test_input(test) for test in test_list], num_workers, fully_parallel)
+
+    def assert_shards(self, actual_shards, expected_shard_names):
+        self.assertEquals(len(actual_shards), len(expected_shard_names))
+        for i, shard in enumerate(actual_shards):
+            expected_shard_name, expected_test_names = expected_shard_names[i]
+            self.assertEquals(shard.name, expected_shard_name)
+            self.assertEquals([test_input.test_name for test_input in shard.test_inputs],
+                              expected_test_names)
+
+    def test_shard_by_dir(self):
+        locked, unlocked = self.get_shards(num_workers=2, fully_parallel=False)
+
+        # Note that although there are tests in multiple dirs that need locks,
+        # they are crammed into a single shard in order to reduce the # of
+        # workers hitting the server at once.
+        self.assert_shards(locked,
+             [('locked_shard_1',
+               ['http/tests/security/view-source-no-refresh.html',
+                'http/tests/websocket/tests/unicode.htm',
+                'http/tests/websocket/tests/websocket-protocol-ignored.html',
+                'http/tests/xmlhttprequest/supported-xml-content-types.html',
+                'perf/object-keys.html'])])
+        self.assert_shards(unlocked,
+            [('animations', ['animations/keyframes.html']),
+             ('dom/html/level2/html', ['dom/html/level2/html/HTMLAnchorElement03.html',
+                                      'dom/html/level2/html/HTMLAnchorElement06.html']),
+             ('fast/css', ['fast/css/display-none-inline-style-change-crash.html']),
+             ('ietestcenter/Javascript', ['ietestcenter/Javascript/11.1.5_4-4-c-1.html'])])
+
+    def test_shard_every_file(self):
+        locked, unlocked = self.get_shards(num_workers=2, fully_parallel=True)
+        self.assert_shards(locked,
+            [('.', ['http/tests/websocket/tests/unicode.htm']),
+             ('.', ['http/tests/security/view-source-no-refresh.html']),
+             ('.', ['http/tests/websocket/tests/websocket-protocol-ignored.html']),
+             ('.', ['http/tests/xmlhttprequest/supported-xml-content-types.html']),
+             ('.', ['perf/object-keys.html'])]),
+        self.assert_shards(unlocked,
+            [('.', ['animations/keyframes.html']),
+             ('.', ['fast/css/display-none-inline-style-change-crash.html']),
+             ('.', ['dom/html/level2/html/HTMLAnchorElement03.html']),
+             ('.', ['ietestcenter/Javascript/11.1.5_4-4-c-1.html']),
+             ('.', ['dom/html/level2/html/HTMLAnchorElement06.html'])])
+
+    def test_shard_in_two(self):
+        locked, unlocked = self.get_shards(num_workers=1, fully_parallel=False)
+        self.assert_shards(locked,
+            [('locked_tests',
+              ['http/tests/websocket/tests/unicode.htm',
+               'http/tests/security/view-source-no-refresh.html',
+               'http/tests/websocket/tests/websocket-protocol-ignored.html',
+               'http/tests/xmlhttprequest/supported-xml-content-types.html',
+               'perf/object-keys.html'])])
+        self.assert_shards(unlocked,
+            [('unlocked_tests',
+              ['animations/keyframes.html',
+               'fast/css/display-none-inline-style-change-crash.html',
+               'dom/html/level2/html/HTMLAnchorElement03.html',
+               'ietestcenter/Javascript/11.1.5_4-4-c-1.html',
+               'dom/html/level2/html/HTMLAnchorElement06.html'])])
+
+    def test_shard_in_two_has_no_locked_shards(self):
+        locked, unlocked = self.get_shards(num_workers=1, fully_parallel=False,
+             test_list=['animations/keyframe.html'])
+        self.assertEquals(len(locked), 0)
+        self.assertEquals(len(unlocked), 1)
+
+    def test_shard_in_two_has_no_unlocked_shards(self):
+        locked, unlocked = self.get_shards(num_workers=1, fully_parallel=False,
+             test_list=['http/tests/websocket/tests/unicode.htm'])
+        self.assertEquals(len(locked), 1)
+        self.assertEquals(len(unlocked), 0)
+
+    def test_multiple_locked_shards(self):
+        locked, unlocked = self.get_shards(num_workers=4, fully_parallel=False, max_locked_shards=2)
+        self.assert_shards(locked,
+            [('locked_shard_1',
+              ['http/tests/security/view-source-no-refresh.html',
+               'http/tests/websocket/tests/unicode.htm',
+               'http/tests/websocket/tests/websocket-protocol-ignored.html']),
+             ('locked_shard_2',
+              ['http/tests/xmlhttprequest/supported-xml-content-types.html',
+               'perf/object-keys.html'])])
+
+        locked, unlocked = self.get_shards(num_workers=4, fully_parallel=False)
+        self.assert_shards(locked,
+            [('locked_shard_1',
+              ['http/tests/security/view-source-no-refresh.html',
+               'http/tests/websocket/tests/unicode.htm',
+               'http/tests/websocket/tests/websocket-protocol-ignored.html',
+               'http/tests/xmlhttprequest/supported-xml-content-types.html',
+               'perf/object-keys.html'])])
+
+
+class NaturalCompareTest(unittest.TestCase):
+    def assert_cmp(self, x, y, result):
+        self.assertEquals(cmp(Sharder.natural_sort_key(x), Sharder.natural_sort_key(y)), result)
+
+    def test_natural_compare(self):
+        self.assert_cmp('a', 'a', 0)
+        self.assert_cmp('ab', 'a', 1)
+        self.assert_cmp('a', 'ab', -1)
+        self.assert_cmp('', '', 0)
+        self.assert_cmp('', 'ab', -1)
+        self.assert_cmp('1', '2', -1)
+        self.assert_cmp('2', '1', 1)
+        self.assert_cmp('1', '10', -1)
+        self.assert_cmp('2', '10', -1)
+        self.assert_cmp('foo_1.html', 'foo_2.html', -1)
+        self.assert_cmp('foo_1.1.html', 'foo_2.html', -1)
+        self.assert_cmp('foo_1.html', 'foo_10.html', -1)
+        self.assert_cmp('foo_2.html', 'foo_10.html', -1)
+        self.assert_cmp('foo_23.html', 'foo_10.html', 1)
+        self.assert_cmp('foo_23.html', 'foo_100.html', -1)
+
+
+class KeyCompareTest(unittest.TestCase):
+    def setUp(self):
+        def split(test_name):
+            idx = test_name.rfind('/')
+            if idx != -1:
+                return (test_name[0:idx], test_name[idx + 1:])
+
+        self.sharder = Sharder(split, '/', 1)
+
+    def assert_cmp(self, x, y, result):
+        self.assertEquals(cmp(self.sharder.test_key(x), self.sharder.test_key(y)), result)
+
+    def test_test_key(self):
+        self.assert_cmp('/a', '/a', 0)
+        self.assert_cmp('/a', '/b', -1)
+        self.assert_cmp('/a2', '/a10', -1)
+        self.assert_cmp('/a2/foo', '/a10/foo', -1)
+        self.assert_cmp('/a/foo11', '/a/foo2', 1)
+        self.assert_cmp('/ab', '/a/a/b', -1)
+        self.assert_cmp('/a/a/b', '/ab', 1)
+        self.assert_cmp('/foo-bar/baz', '/foo/baz', -1)
diff --git a/Tools/Scripts/webkitpy/layout_tests/controllers/manager.py b/Tools/Scripts/webkitpy/layout_tests/controllers/manager.py
new file mode 100644
index 0000000..636edd2
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/controllers/manager.py
@@ -0,0 +1,612 @@
+#!/usr/bin/env python
+# Copyright (C) 2010 Google Inc. All rights reserved.
+# Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Szeged
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""
+The Manager runs a series of tests (TestType interface) against a set
+of test files.  If a test file fails a TestType, it returns a list of TestFailure
+objects to the Manager. The Manager then aggregates the TestFailures to
+create a final report.
+"""
+
+import errno
+import logging
+import math
+import Queue
+import random
+import re
+import sys
+import time
+
+from webkitpy.common import message_pool
+from webkitpy.layout_tests.controllers.layout_test_finder import LayoutTestFinder
+from webkitpy.layout_tests.controllers.layout_test_runner import LayoutTestRunner, TestRunInterruptedException, WorkerException
+from webkitpy.layout_tests.controllers.test_result_writer import TestResultWriter
+from webkitpy.layout_tests.layout_package import json_layout_results_generator
+from webkitpy.layout_tests.layout_package import json_results_generator
+from webkitpy.layout_tests.models import test_expectations
+from webkitpy.layout_tests.models import test_failures
+from webkitpy.layout_tests.models import test_results
+from webkitpy.layout_tests.models.test_input import TestInput
+from webkitpy.layout_tests.models.result_summary import ResultSummary
+from webkitpy.layout_tests.views import printing
+
+from webkitpy.tool import grammar
+
+_log = logging.getLogger(__name__)
+
+# Builder base URL where we have the archived test results.
+BUILDER_BASE_URL = "http://build.chromium.org/buildbot/layout_test_results/"
+
+TestExpectations = test_expectations.TestExpectations
+
+
+def interpret_test_failures(port, test_name, failures):
+    """Interpret test failures and returns a test result as dict.
+
+    Args:
+        port: interface to port-specific hooks
+        test_name: test name relative to layout_tests directory
+        failures: list of test failures
+    Returns:
+        A dictionary like {'is_missing_text': True, ...}
+    """
+    test_dict = {}
+    failure_types = [type(failure) for failure in failures]
+    # FIXME: get rid of all this is_* values once there is a 1:1 map between
+    # TestFailure type and test_expectations.EXPECTATION.
+    if test_failures.FailureMissingAudio in failure_types:
+        test_dict['is_missing_audio'] = True
+
+    if test_failures.FailureMissingResult in failure_types:
+        test_dict['is_missing_text'] = True
+
+    if test_failures.FailureMissingImage in failure_types or test_failures.FailureMissingImageHash in failure_types:
+        test_dict['is_missing_image'] = True
+
+    for failure in failures:
+        if isinstance(failure, test_failures.FailureImageHashMismatch) or isinstance(failure, test_failures.FailureReftestMismatch):
+            test_dict['image_diff_percent'] = failure.diff_percent
+
+    return test_dict
+
+
+def use_trac_links_in_results_html(port_obj):
+    # We only use trac links on the buildbots.
+    # Use existence of builder_name as a proxy for knowing we're on a bot.
+    return port_obj.get_option("builder_name")
+
+
+# FIXME: This should be on the Manager class (since that's the only caller)
+# or split off from Manager onto another helper class, but should not be a free function.
+# Most likely this should be made into its own class, and this super-long function
+# split into many helper functions.
+def summarize_results(port_obj, expectations, result_summary, retry_summary, test_timings, only_unexpected, interrupted):
+    """Summarize failing results as a dict.
+
+    FIXME: split this data structure into a separate class?
+
+    Args:
+        port_obj: interface to port-specific hooks
+        expectations: test_expectations.TestExpectations object
+        result_summary: summary object from initial test runs
+        retry_summary: summary object from final test run of retried tests
+        test_timings: a list of TestResult objects which contain test runtimes in seconds
+        only_unexpected: whether to return a summary only for the unexpected results
+    Returns:
+        A dictionary containing a summary of the unexpected results from the
+        run, with the following fields:
+        'version': a version indicator
+        'fixable': The number of fixable tests (NOW - PASS)
+        'skipped': The number of skipped tests (NOW & SKIPPED)
+        'num_regressions': The number of non-flaky failures
+        'num_flaky': The number of flaky failures
+        'num_missing': The number of tests with missing results
+        'num_passes': The number of unexpected passes
+        'tests': a dict of tests -> {'expected': '...', 'actual': '...'}
+    """
+    results = {}
+    results['version'] = 3
+
+    tbe = result_summary.tests_by_expectation
+    tbt = result_summary.tests_by_timeline
+    results['fixable'] = len(tbt[test_expectations.NOW] - tbe[test_expectations.PASS])
+    results['skipped'] = len(tbt[test_expectations.NOW] & tbe[test_expectations.SKIP])
+
+    num_passes = 0
+    num_flaky = 0
+    num_missing = 0
+    num_regressions = 0
+    keywords = {}
+    for expecation_string, expectation_enum in TestExpectations.EXPECTATIONS.iteritems():
+        keywords[expectation_enum] = expecation_string.upper()
+
+    for modifier_string, modifier_enum in TestExpectations.MODIFIERS.iteritems():
+        keywords[modifier_enum] = modifier_string.upper()
+
+    tests = {}
+    original_results = result_summary.unexpected_results if only_unexpected else result_summary.results
+
+    for test_name, result in original_results.iteritems():
+        # Note that if a test crashed in the original run, we ignore
+        # whether or not it crashed when we retried it (if we retried it),
+        # and always consider the result not flaky.
+        expected = expectations.get_expectations_string(test_name)
+        result_type = result.type
+        actual = [keywords[result_type]]
+
+        if result_type == test_expectations.SKIP:
+            continue
+
+        test_dict = {}
+        if result.has_stderr:
+            test_dict['has_stderr'] = True
+
+        if result.reftest_type:
+            test_dict.update(reftest_type=list(result.reftest_type))
+
+        if expectations.has_modifier(test_name, test_expectations.WONTFIX):
+            test_dict['wontfix'] = True
+
+        if result_type == test_expectations.PASS:
+            num_passes += 1
+            # FIXME: include passing tests that have stderr output.
+            if expected == 'PASS':
+                continue
+        elif result_type == test_expectations.CRASH:
+            num_regressions += 1
+        elif result_type == test_expectations.MISSING:
+            if test_name in result_summary.unexpected_results:
+                num_missing += 1
+        elif test_name in result_summary.unexpected_results:
+            if test_name not in retry_summary.unexpected_results:
+                actual.extend(expectations.get_expectations_string(test_name).split(" "))
+                num_flaky += 1
+            else:
+                retry_result_type = retry_summary.unexpected_results[test_name].type
+                if result_type != retry_result_type:
+                    actual.append(keywords[retry_result_type])
+                    num_flaky += 1
+                else:
+                    num_regressions += 1
+
+        test_dict['expected'] = expected
+        test_dict['actual'] = " ".join(actual)
+        # FIXME: Set this correctly once https://webkit.org/b/37739 is fixed
+        # and only set it if there actually is stderr data.
+
+        test_dict.update(interpret_test_failures(port_obj, test_name, result.failures))
+
+        # Store test hierarchically by directory. e.g.
+        # foo/bar/baz.html: test_dict
+        # foo/bar/baz1.html: test_dict
+        #
+        # becomes
+        # foo: {
+        #     bar: {
+        #         baz.html: test_dict,
+        #         baz1.html: test_dict
+        #     }
+        # }
+        parts = test_name.split('/')
+        current_map = tests
+        for i, part in enumerate(parts):
+            if i == (len(parts) - 1):
+                current_map[part] = test_dict
+                break
+            if part not in current_map:
+                current_map[part] = {}
+            current_map = current_map[part]
+
+    results['tests'] = tests
+    results['num_passes'] = num_passes
+    results['num_flaky'] = num_flaky
+    results['num_missing'] = num_missing
+    results['num_regressions'] = num_regressions
+    results['uses_expectations_file'] = port_obj.uses_test_expectations_file()
+    results['interrupted'] = interrupted  # Does results.html have enough information to compute this itself? (by checking total number of results vs. total number of tests?)
+    results['layout_tests_dir'] = port_obj.layout_tests_dir()
+    results['has_wdiff'] = port_obj.wdiff_available()
+    results['has_pretty_patch'] = port_obj.pretty_patch_available()
+    results['pixel_tests_enabled'] = port_obj.get_option('pixel_tests')
+
+    try:
+        # We only use the svn revision for using trac links in the results.html file,
+        # Don't do this by default since it takes >100ms.
+        # FIXME: Do we really need to populate this both here and in the json_results_generator?
+        if use_trac_links_in_results_html(port_obj):
+            port_obj.host.initialize_scm()
+            results['revision'] = port_obj.host.scm().head_svn_revision()
+    except Exception, e:
+        _log.warn("Failed to determine svn revision for checkout (cwd: %s, webkit_base: %s), leaving 'revision' key blank in full_results.json.\n%s" % (port_obj._filesystem.getcwd(), port_obj.path_from_webkit_base(), e))
+        # Handle cases where we're running outside of version control.
+        import traceback
+        _log.debug('Failed to learn head svn revision:')
+        _log.debug(traceback.format_exc())
+        results['revision'] = ""
+
+    return results
+
+
+class Manager(object):
+    """A class for managing running a series of tests on a series of layout
+    test files."""
+
+    def __init__(self, port, options, printer):
+        """Initialize test runner data structures.
+
+        Args:
+          port: an object implementing port-specific
+          options: a dictionary of command line options
+          printer: a Printer object to record updates to.
+        """
+        self._port = port
+        self._filesystem = port.host.filesystem
+        self._options = options
+        self._printer = printer
+        self._expectations = None
+
+        self.HTTP_SUBDIR = 'http' + port.TEST_PATH_SEPARATOR
+        self.PERF_SUBDIR = 'perf'
+        self.WEBSOCKET_SUBDIR = 'websocket' + port.TEST_PATH_SEPARATOR
+        self.LAYOUT_TESTS_DIRECTORY = 'LayoutTests'
+
+        # disable wss server. need to install pyOpenSSL on buildbots.
+        # self._websocket_secure_server = websocket_server.PyWebSocket(
+        #        options.results_directory, use_tls=True, port=9323)
+
+        self._paths = set()
+        self._test_names = None
+        self._retrying = False
+        self._results_directory = self._port.results_directory()
+        self._finder = LayoutTestFinder(self._port, self._options)
+        self._runner = LayoutTestRunner(self._options, self._port, self._printer, self._results_directory, self._expectations, self._test_is_slow)
+
+    def _collect_tests(self, args):
+        return self._finder.find_tests(self._options, args)
+
+    def _is_http_test(self, test):
+        return self.HTTP_SUBDIR in test or self._is_websocket_test(test)
+
+    def _is_websocket_test(self, test):
+        return self.WEBSOCKET_SUBDIR in test
+
+    def _http_tests(self):
+        return set(test for test in self._test_names if self._is_http_test(test))
+
+    def _websocket_tests(self):
+        return set(test for test in self._test_files if self._is_websocket(test))
+
+    def _is_perf_test(self, test):
+        return self.PERF_SUBDIR == test or (self.PERF_SUBDIR + self._port.TEST_PATH_SEPARATOR) in test
+
+    def _prepare_lists(self):
+        tests_to_skip = self._finder.skip_tests(self._paths, self._test_names, self._expectations, self._http_tests())
+        self._test_names = list(set(self._test_names) - tests_to_skip)
+
+        # Create a sorted list of test files so the subset chunk,
+        # if used, contains alphabetically consecutive tests.
+        if self._options.randomize_order:
+            random.shuffle(self._test_names)
+        else:
+            self._test_names.sort(key=self._runner.test_key)
+
+        self._test_names, tests_in_other_chunks = self._finder.split_into_chunks(self._test_names)
+        self._expectations.add_skipped_tests(tests_in_other_chunks)
+        tests_to_skip.update(tests_in_other_chunks)
+
+        if self._options.repeat_each > 1:
+            list_with_repetitions = []
+            for test in self._test_names:
+                list_with_repetitions += ([test] * self._options.repeat_each)
+            self._test_names = list_with_repetitions
+
+        if self._options.iterations > 1:
+            self._test_names = self._test_names * self._options.iterations
+
+        iterations = self._options.repeat_each * self._options.iterations
+        return ResultSummary(self._expectations, set(self._test_names), iterations, tests_to_skip)
+
+    def _test_input_for_file(self, test_file):
+        return TestInput(test_file,
+            self._options.slow_time_out_ms if self._test_is_slow(test_file) else self._options.time_out_ms,
+            self._test_requires_lock(test_file))
+
+    def _test_requires_lock(self, test_file):
+        """Return True if the test needs to be locked when
+        running multiple copies of NRWTs. Perf tests are locked
+        because heavy load caused by running other tests in parallel
+        might cause some of them to timeout."""
+        return self._is_http_test(test_file) or self._is_perf_test(test_file)
+
+    def _test_is_slow(self, test_file):
+        return self._expectations.has_modifier(test_file, test_expectations.SLOW)
+
+    def needs_servers(self):
+        return any(self._test_requires_lock(test_name) for test_name in self._test_names) and self._options.http
+
+    def _set_up_run(self):
+        self._printer.write_update("Checking build ...")
+        if not self._port.check_build(self.needs_servers()):
+            _log.error("Build check failed")
+            return False
+
+        # This must be started before we check the system dependencies,
+        # since the helper may do things to make the setup correct.
+        if self._options.pixel_tests:
+            self._printer.write_update("Starting pixel test helper ...")
+            self._port.start_helper()
+
+        # Check that the system dependencies (themes, fonts, ...) are correct.
+        if not self._options.nocheck_sys_deps:
+            self._printer.write_update("Checking system dependencies ...")
+            if not self._port.check_sys_deps(self.needs_servers()):
+                self._port.stop_helper()
+                return False
+
+        if self._options.clobber_old_results:
+            self._clobber_old_results()
+
+        # Create the output directory if it doesn't already exist.
+        self._port.host.filesystem.maybe_make_directory(self._results_directory)
+
+        self._port.setup_test_run()
+        return True
+
+    def run(self, args):
+        """Run all our tests on all our test files and return the number of unexpected results (0 == success)."""
+        self._printer.write_update("Collecting tests ...")
+        try:
+            self._paths, self._test_names = self._collect_tests(args)
+        except IOError as exception:
+            # This is raised if --test-list doesn't exist
+            return -1
+
+        self._printer.write_update("Parsing expectations ...")
+        self._expectations = test_expectations.TestExpectations(self._port, self._test_names)
+
+        num_all_test_files_found = len(self._test_names)
+        result_summary = self._prepare_lists()
+
+        # Check to make sure we're not skipping every test.
+        if not self._test_names:
+            _log.critical('No tests to run.')
+            return -1
+
+        self._printer.print_found(num_all_test_files_found, len(self._test_names), self._options.repeat_each, self._options.iterations)
+        self._printer.print_expected(result_summary, self._expectations.get_tests_with_result_type)
+
+        if not self._set_up_run():
+            return -1
+
+        start_time = time.time()
+
+        interrupted, keyboard_interrupted, thread_timings, test_timings, individual_test_timings = \
+            self._run_tests(self._test_names, result_summary, int(self._options.child_processes))
+
+        # We exclude the crashes from the list of results to retry, because
+        # we want to treat even a potentially flaky crash as an error.
+
+        failures = self._get_failures(result_summary, include_crashes=self._port.should_retry_crashes(), include_missing=False)
+        retry_summary = result_summary
+        while (len(failures) and self._options.retry_failures and not self._retrying and not interrupted and not keyboard_interrupted):
+            _log.info('')
+            _log.info("Retrying %d unexpected failure(s) ..." % len(failures))
+            _log.info('')
+            self._retrying = True
+            retry_summary = ResultSummary(self._expectations, failures.keys(), 1, set())
+            # Note that we intentionally ignore the return value here.
+            self._run_tests(failures.keys(), retry_summary, 1)
+            failures = self._get_failures(retry_summary, include_crashes=True, include_missing=True)
+
+        end_time = time.time()
+
+        # Some crash logs can take a long time to be written out so look
+        # for new logs after the test run finishes.
+        self._look_for_new_crash_logs(result_summary, start_time)
+        self._look_for_new_crash_logs(retry_summary, start_time)
+        self._clean_up_run()
+
+        unexpected_results = summarize_results(self._port, self._expectations, result_summary, retry_summary, individual_test_timings, only_unexpected=True, interrupted=interrupted)
+
+        self._printer.print_results(end_time - start_time, thread_timings, test_timings, individual_test_timings, result_summary, unexpected_results)
+
+        # Re-raise a KeyboardInterrupt if necessary so the caller can handle it.
+        if keyboard_interrupted:
+            raise KeyboardInterrupt
+
+        # FIXME: remove record_results. It's just used for testing. There's no need
+        # for it to be a commandline argument.
+        if (self._options.record_results and not self._options.dry_run and not keyboard_interrupted):
+            self._port.print_leaks_summary()
+            # Write the same data to log files and upload generated JSON files to appengine server.
+            summarized_results = summarize_results(self._port, self._expectations, result_summary, retry_summary, individual_test_timings, only_unexpected=False, interrupted=interrupted)
+            self._upload_json_files(summarized_results, result_summary, individual_test_timings)
+
+        # Write the summary to disk (results.html) and display it if requested.
+        if not self._options.dry_run:
+            self._copy_results_html_file()
+            if self._options.show_results:
+                self._show_results_html_file(result_summary)
+
+        return self._port.exit_code_from_summarized_results(unexpected_results)
+
+    def _run_tests(self, tests, result_summary, num_workers):
+        test_inputs = [self._test_input_for_file(test) for test in tests]
+        needs_http = self._port.requires_http_server() or any(self._is_http_test(test) for test in tests)
+        needs_websockets = any(self._is_websocket_test(test) for test in tests)
+        return self._runner.run_tests(test_inputs, self._expectations, result_summary, num_workers, needs_http, needs_websockets, self._retrying)
+
+    def _clean_up_run(self):
+        """Restores the system after we're done running tests."""
+        _log.debug("flushing stdout")
+        sys.stdout.flush()
+        _log.debug("flushing stderr")
+        sys.stderr.flush()
+        _log.debug("stopping helper")
+        self._port.stop_helper()
+        _log.debug("cleaning up port")
+        self._port.clean_up_test_run()
+
+    def _look_for_new_crash_logs(self, result_summary, start_time):
+        """Since crash logs can take a long time to be written out if the system is
+           under stress do a second pass at the end of the test run.
+
+           result_summary: the results of the test run
+           start_time: time the tests started at.  We're looking for crash
+               logs after that time.
+        """
+        crashed_processes = []
+        for test, result in result_summary.unexpected_results.iteritems():
+            if (result.type != test_expectations.CRASH):
+                continue
+            for failure in result.failures:
+                if not isinstance(failure, test_failures.FailureCrash):
+                    continue
+                crashed_processes.append([test, failure.process_name, failure.pid])
+
+        crash_logs = self._port.look_for_new_crash_logs(crashed_processes, start_time)
+        if crash_logs:
+            for test, crash_log in crash_logs.iteritems():
+                writer = TestResultWriter(self._port._filesystem, self._port, self._port.results_directory(), test)
+                writer.write_crash_log(crash_log)
+
+    def _clobber_old_results(self):
+        # Just clobber the actual test results directories since the other
+        # files in the results directory are explicitly used for cross-run
+        # tracking.
+        self._printer.write_update("Clobbering old results in %s" %
+                                   self._results_directory)
+        layout_tests_dir = self._port.layout_tests_dir()
+        possible_dirs = self._port.test_dirs()
+        for dirname in possible_dirs:
+            if self._filesystem.isdir(self._filesystem.join(layout_tests_dir, dirname)):
+                self._filesystem.rmtree(self._filesystem.join(self._results_directory, dirname))
+
+    def _get_failures(self, result_summary, include_crashes, include_missing):
+        """Filters a dict of results and returns only the failures.
+
+        Args:
+          result_summary: the results of the test run
+          include_crashes: whether crashes are included in the output.
+            We use False when finding the list of failures to retry
+            to see if the results were flaky. Although the crashes may also be
+            flaky, we treat them as if they aren't so that they're not ignored.
+        Returns:
+          a dict of files -> results
+        """
+        failed_results = {}
+        for test, result in result_summary.unexpected_results.iteritems():
+            if (result.type == test_expectations.PASS or
+                (result.type == test_expectations.CRASH and not include_crashes) or
+                (result.type == test_expectations.MISSING and not include_missing)):
+                continue
+            failed_results[test] = result.type
+
+        return failed_results
+
+    def _char_for_result(self, result):
+        result = result.lower()
+        if result in TestExpectations.EXPECTATIONS:
+            result_enum_value = TestExpectations.EXPECTATIONS[result]
+        else:
+            result_enum_value = TestExpectations.MODIFIERS[result]
+        return json_layout_results_generator.JSONLayoutResultsGenerator.FAILURE_TO_CHAR[result_enum_value]
+
+    def _upload_json_files(self, summarized_results, result_summary, individual_test_timings):
+        """Writes the results of the test run as JSON files into the results
+        dir and upload the files to the appengine server.
+
+        Args:
+          unexpected_results: dict of unexpected results
+          summarized_results: dict of results
+          result_summary: full summary object
+          individual_test_timings: list of test times (used by the flakiness
+            dashboard).
+        """
+        _log.debug("Writing JSON files in %s." % self._results_directory)
+
+        times_trie = json_results_generator.test_timings_trie(self._port, individual_test_timings)
+        times_json_path = self._filesystem.join(self._results_directory, "times_ms.json")
+        json_results_generator.write_json(self._filesystem, times_trie, times_json_path)
+
+        full_results_path = self._filesystem.join(self._results_directory, "full_results.json")
+        # We write full_results.json out as jsonp because we need to load it from a file url and Chromium doesn't allow that.
+        json_results_generator.write_json(self._filesystem, summarized_results, full_results_path, callback="ADD_RESULTS")
+
+        generator = json_layout_results_generator.JSONLayoutResultsGenerator(
+            self._port, self._options.builder_name, self._options.build_name,
+            self._options.build_number, self._results_directory,
+            BUILDER_BASE_URL, individual_test_timings,
+            self._expectations, result_summary, self._test_names,
+            self._options.test_results_server,
+            "layout-tests",
+            self._options.master_name)
+
+        _log.debug("Finished writing JSON files.")
+
+        json_files = ["incremental_results.json", "full_results.json", "times_ms.json"]
+
+        generator.upload_json_files(json_files)
+
+        incremental_results_path = self._filesystem.join(self._results_directory, "incremental_results.json")
+
+        # Remove these files from the results directory so they don't take up too much space on the buildbot.
+        # The tools use the version we uploaded to the results server anyway.
+        self._filesystem.remove(times_json_path)
+        self._filesystem.remove(incremental_results_path)
+
+    def _num_digits(self, num):
+        """Returns the number of digits needed to represent the length of a
+        sequence."""
+        ndigits = 1
+        if len(num):
+            ndigits = int(math.log10(len(num))) + 1
+        return ndigits
+
+    def _copy_results_html_file(self):
+        base_dir = self._port.path_from_webkit_base('LayoutTests', 'fast', 'harness')
+        results_file = self._filesystem.join(base_dir, 'results.html')
+        # FIXME: What should we do if this doesn't exist (e.g., in unit tests)?
+        if self._filesystem.exists(results_file):
+            self._filesystem.copyfile(results_file, self._filesystem.join(self._results_directory, "results.html"))
+
+    def _show_results_html_file(self, result_summary):
+        """Shows the results.html page."""
+        if self._options.full_results_html:
+            test_files = result_summary.failures.keys()
+        else:
+            unexpected_failures = self._get_failures(result_summary, include_crashes=True, include_missing=True)
+            test_files = unexpected_failures.keys()
+
+        if not len(test_files):
+            return
+
+        results_filename = self._filesystem.join(self._results_directory, "results.html")
+        self._port.show_results_html_file(results_filename)
diff --git a/Tools/Scripts/webkitpy/layout_tests/controllers/manager_unittest.py b/Tools/Scripts/webkitpy/layout_tests/controllers/manager_unittest.py
new file mode 100644
index 0000000..e94d133
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/controllers/manager_unittest.py
@@ -0,0 +1,204 @@
+#!/usr/bin/python
+# Copyright (C) 2010 Google Inc. All rights reserved.
+# Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Szeged
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Unit tests for manager.py."""
+
+import sys
+import time
+import unittest
+
+from webkitpy.common.host_mock import MockHost
+from webkitpy.layout_tests.controllers.manager import Manager, interpret_test_failures, summarize_results
+from webkitpy.layout_tests.models import test_expectations
+from webkitpy.layout_tests.models import test_failures
+from webkitpy.layout_tests.models import test_results
+from webkitpy.layout_tests.models.result_summary import ResultSummary
+from webkitpy.thirdparty.mock import Mock
+from webkitpy.tool.mocktool import MockOptions
+
+
+class ManagerTest(unittest.TestCase):
+    def test_needs_servers(self):
+        def get_manager_with_tests(test_names):
+            port = Mock()  # FIXME: Use a tighter mock.
+            port.TEST_PATH_SEPARATOR = '/'
+            manager = Manager(port, options=MockOptions(http=True, max_locked_shards=1), printer=Mock())
+            manager._test_names = test_names
+            return manager
+
+        manager = get_manager_with_tests(['fast/html'])
+        self.assertFalse(manager.needs_servers())
+
+        manager = get_manager_with_tests(['http/tests/misc'])
+        self.assertTrue(manager.needs_servers())
+
+    def integration_test_needs_servers(self):
+        def get_manager_with_tests(test_names):
+            host = MockHost()
+            port = host.port_factory.get()
+            manager = Manager(port, options=MockOptions(test_list=None, http=True, max_locked_shards=1), printer=Mock())
+            manager._collect_tests(test_names)
+            return manager
+
+        manager = get_manager_with_tests(['fast/html'])
+        self.assertFalse(manager.needs_servers())
+
+        manager = get_manager_with_tests(['http/tests/mime'])
+        self.assertTrue(manager.needs_servers())
+
+        if sys.platform == 'win32':
+            manager = get_manager_with_tests(['fast\\html'])
+            self.assertFalse(manager.needs_servers())
+
+            manager = get_manager_with_tests(['http\\tests\\mime'])
+            self.assertTrue(manager.needs_servers())
+
+    def test_look_for_new_crash_logs(self):
+        def get_manager_with_tests(test_names):
+            host = MockHost()
+            port = host.port_factory.get('test-mac-leopard')
+            manager = Manager(port, options=MockOptions(test_list=None, http=True, max_locked_shards=1), printer=Mock())
+            manager._collect_tests(test_names)
+            return manager
+        host = MockHost()
+        port = host.port_factory.get('test-mac-leopard')
+        tests = ['failures/expected/crash.html']
+        expectations = test_expectations.TestExpectations(port, tests)
+        rs = ResultSummary(expectations, tests, 1, set())
+        manager = get_manager_with_tests(tests)
+        manager._look_for_new_crash_logs(rs, time.time())
+
+
+class ResultSummaryTest(unittest.TestCase):
+
+    def setUp(self):
+        host = MockHost()
+        self.port = host.port_factory.get(port_name='test')
+
+    def test_interpret_test_failures(self):
+        test_dict = interpret_test_failures(self.port, 'foo/reftest.html',
+            [test_failures.FailureImageHashMismatch(diff_percent=0.42)])
+        self.assertEqual(test_dict['image_diff_percent'], 0.42)
+
+        test_dict = interpret_test_failures(self.port, 'foo/reftest.html',
+            [test_failures.FailureReftestMismatch(self.port.abspath_for_test('foo/reftest-expected.html'))])
+        self.assertTrue('image_diff_percent' in test_dict)
+
+        test_dict = interpret_test_failures(self.port, 'foo/reftest.html',
+            [test_failures.FailureReftestMismatchDidNotOccur(self.port.abspath_for_test('foo/reftest-expected-mismatch.html'))])
+        self.assertEqual(len(test_dict), 0)
+
+        test_dict = interpret_test_failures(self.port, 'foo/audio-test.html',
+            [test_failures.FailureMissingAudio()])
+        self.assertTrue('is_missing_audio' in test_dict)
+
+        test_dict = interpret_test_failures(self.port, 'foo/text-test.html',
+            [test_failures.FailureMissingResult()])
+        self.assertTrue('is_missing_text' in test_dict)
+
+        test_dict = interpret_test_failures(self.port, 'foo/pixel-test.html',
+            [test_failures.FailureMissingImage()])
+        self.assertTrue('is_missing_image' in test_dict)
+
+        test_dict = interpret_test_failures(self.port, 'foo/pixel-test.html',
+            [test_failures.FailureMissingImageHash()])
+        self.assertTrue('is_missing_image' in test_dict)
+
+    def get_result(self, test_name, result_type=test_expectations.PASS, run_time=0):
+        failures = []
+        if result_type == test_expectations.TIMEOUT:
+            failures = [test_failures.FailureTimeout()]
+        elif result_type == test_expectations.CRASH:
+            failures = [test_failures.FailureCrash()]
+        return test_results.TestResult(test_name, failures=failures, test_run_time=run_time)
+
+    def get_result_summary(self, port, test_names, expectations_str):
+        port.expectations_dict = lambda: {'': expectations_str}
+        expectations = test_expectations.TestExpectations(port, test_names)
+        return test_names, ResultSummary(expectations, test_names, 1, set()), expectations
+
+    # FIXME: Use this to test more of summarize_results. This was moved from printing_unittest.py.
+    def summarized_results(self, port, expected, passing, flaky, extra_tests=[], extra_expectations=None):
+        tests = ['passes/text.html', 'failures/expected/timeout.html', 'failures/expected/crash.html', 'failures/expected/wontfix.html']
+        if extra_tests:
+            tests.extend(extra_tests)
+
+        expectations = ''
+        if extra_expectations:
+            expectations += extra_expectations
+
+        test_is_slow = False
+        paths, rs, exp = self.get_result_summary(port, tests, expectations)
+        if expected:
+            rs.add(self.get_result('passes/text.html', test_expectations.PASS), expected, test_is_slow)
+            rs.add(self.get_result('failures/expected/timeout.html', test_expectations.TIMEOUT), expected, test_is_slow)
+            rs.add(self.get_result('failures/expected/crash.html', test_expectations.CRASH), expected, test_is_slow)
+        elif passing:
+            rs.add(self.get_result('passes/text.html'), expected, test_is_slow)
+            rs.add(self.get_result('failures/expected/timeout.html'), expected, test_is_slow)
+            rs.add(self.get_result('failures/expected/crash.html'), expected, test_is_slow)
+        else:
+            rs.add(self.get_result('passes/text.html', test_expectations.TIMEOUT), expected, test_is_slow)
+            rs.add(self.get_result('failures/expected/timeout.html', test_expectations.CRASH), expected, test_is_slow)
+            rs.add(self.get_result('failures/expected/crash.html', test_expectations.TIMEOUT), expected, test_is_slow)
+
+        for test in extra_tests:
+            rs.add(self.get_result(test, test_expectations.CRASH), expected, test_is_slow)
+
+        retry = rs
+        if flaky:
+            paths, retry, exp = self.get_result_summary(port, tests, expectations)
+            retry.add(self.get_result('passes/text.html'), True, test_is_slow)
+            retry.add(self.get_result('failures/expected/timeout.html'), True, test_is_slow)
+            retry.add(self.get_result('failures/expected/crash.html'), True, test_is_slow)
+        unexpected_results = summarize_results(port, exp, rs, retry, test_timings={}, only_unexpected=True, interrupted=False)
+        expected_results = summarize_results(port, exp, rs, retry, test_timings={}, only_unexpected=False, interrupted=False)
+        return expected_results, unexpected_results
+
+    def test_no_svn_revision(self):
+        host = MockHost(initialize_scm_by_default=False)
+        port = host.port_factory.get('test')
+        expected_results, unexpected_results = self.summarized_results(port, expected=False, passing=False, flaky=False)
+        self.assertTrue('revision' not in unexpected_results)
+
+    def test_svn_revision(self):
+        host = MockHost(initialize_scm_by_default=False)
+        port = host.port_factory.get('test')
+        port._options.builder_name = 'dummy builder'
+        expected_results, unexpected_results = self.summarized_results(port, expected=False, passing=False, flaky=False)
+        self.assertNotEquals(unexpected_results['revision'], '')
+
+    def test_summarized_results_wontfix(self):
+        host = MockHost()
+        port = host.port_factory.get('test')
+        port._options.builder_name = 'dummy builder'
+        port._filesystem.write_text_file(port._filesystem.join(port.layout_tests_dir(), "failures/expected/wontfix.html"), "Dummy test contents")
+        expected_results, unexpected_results = self.summarized_results(port, expected=False, passing=False, flaky=False, extra_tests=['failures/expected/wontfix.html'], extra_expectations='Bug(x) failures/expected/wontfix.html [ WontFix ]\n')
+        self.assertTrue(expected_results['tests']['failures']['expected']['wontfix.html']['wontfix'])
diff --git a/Tools/Scripts/webkitpy/layout_tests/controllers/single_test_runner.py b/Tools/Scripts/webkitpy/layout_tests/controllers/single_test_runner.py
new file mode 100644
index 0000000..28e9d63
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/controllers/single_test_runner.py
@@ -0,0 +1,339 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+import logging
+import re
+import time
+
+from webkitpy.layout_tests.controllers import test_result_writer
+from webkitpy.layout_tests.port.driver import DriverInput, DriverOutput
+from webkitpy.layout_tests.models import test_expectations
+from webkitpy.layout_tests.models import test_failures
+from webkitpy.layout_tests.models.test_results import TestResult
+
+
+_log = logging.getLogger(__name__)
+
+
+def run_single_test(port, options, test_input, driver, worker_name, stop_when_done):
+    runner = SingleTestRunner(options, port, driver, test_input, worker_name, stop_when_done)
+    return runner.run()
+
+
+class SingleTestRunner(object):
+    (ALONGSIDE_TEST, PLATFORM_DIR, VERSION_DIR, UPDATE) = ('alongside', 'platform', 'version', 'update')
+
+    def __init__(self, options, port, driver, test_input, worker_name, stop_when_done):
+        self._options = options
+        self._port = port
+        self._filesystem = port.host.filesystem
+        self._driver = driver
+        self._timeout = test_input.timeout
+        self._worker_name = worker_name
+        self._test_name = test_input.test_name
+        self._should_run_pixel_test = test_input.should_run_pixel_test
+        self._reference_files = test_input.reference_files
+        self._stop_when_done = stop_when_done
+
+        if self._reference_files:
+            # Detect and report a test which has a wrong combination of expectation files.
+            # For example, if 'foo.html' has two expectation files, 'foo-expected.html' and
+            # 'foo-expected.txt', we should warn users. One test file must be used exclusively
+            # in either layout tests or reftests, but not in both.
+            for suffix in ('.txt', '.png', '.wav'):
+                expected_filename = self._port.expected_filename(self._test_name, suffix)
+                if self._filesystem.exists(expected_filename):
+                    _log.error('%s is a reftest, but has an unused expectation file. Please remove %s.',
+                        self._test_name, expected_filename)
+
+    def _expected_driver_output(self):
+        return DriverOutput(self._port.expected_text(self._test_name),
+                                 self._port.expected_image(self._test_name),
+                                 self._port.expected_checksum(self._test_name),
+                                 self._port.expected_audio(self._test_name))
+
+    def _should_fetch_expected_checksum(self):
+        return self._should_run_pixel_test and not (self._options.new_baseline or self._options.reset_results)
+
+    def _driver_input(self):
+        # The image hash is used to avoid doing an image dump if the
+        # checksums match, so it should be set to a blank value if we
+        # are generating a new baseline.  (Otherwise, an image from a
+        # previous run will be copied into the baseline."""
+        image_hash = None
+        if self._should_fetch_expected_checksum():
+            image_hash = self._port.expected_checksum(self._test_name)
+        return DriverInput(self._test_name, self._timeout, image_hash, self._should_run_pixel_test)
+
+    def run(self):
+        if self._reference_files:
+            if self._port.get_option('no_ref_tests') or self._options.reset_results:
+                reftest_type = set([reference_file[0] for reference_file in self._reference_files])
+                result = TestResult(self._test_name, reftest_type=reftest_type)
+                result.type = test_expectations.SKIP
+                return result
+            return self._run_reftest()
+        if self._options.reset_results:
+            return self._run_rebaseline()
+        return self._run_compare_test()
+
+    def _run_compare_test(self):
+        driver_output = self._driver.run_test(self._driver_input(), self._stop_when_done)
+        expected_driver_output = self._expected_driver_output()
+
+        if self._options.ignore_metrics:
+            expected_driver_output.strip_metrics()
+            driver_output.strip_metrics()
+
+        test_result = self._compare_output(expected_driver_output, driver_output)
+        if self._options.new_test_results:
+            self._add_missing_baselines(test_result, driver_output)
+        test_result_writer.write_test_result(self._filesystem, self._port, self._test_name, driver_output, expected_driver_output, test_result.failures)
+        return test_result
+
+    def _run_rebaseline(self):
+        driver_output = self._driver.run_test(self._driver_input(), self._stop_when_done)
+        failures = self._handle_error(driver_output)
+        test_result_writer.write_test_result(self._filesystem, self._port, self._test_name, driver_output, None, failures)
+        # FIXME: It the test crashed or timed out, it might be better to avoid
+        # to write new baselines.
+        self._overwrite_baselines(driver_output)
+        return TestResult(self._test_name, failures, driver_output.test_time, driver_output.has_stderr())
+
+    _render_tree_dump_pattern = re.compile(r"^layer at \(\d+,\d+\) size \d+x\d+\n")
+
+    def _add_missing_baselines(self, test_result, driver_output):
+        missingImage = test_result.has_failure_matching_types(test_failures.FailureMissingImage, test_failures.FailureMissingImageHash)
+        if test_result.has_failure_matching_types(test_failures.FailureMissingResult):
+            self._save_baseline_data(driver_output.text, '.txt', self._location_for_new_baseline(driver_output.text, '.txt'))
+        if test_result.has_failure_matching_types(test_failures.FailureMissingAudio):
+            self._save_baseline_data(driver_output.audio, '.wav', self._location_for_new_baseline(driver_output.audio, '.wav'))
+        if missingImage:
+            self._save_baseline_data(driver_output.image, '.png', self._location_for_new_baseline(driver_output.image, '.png'))
+
+    def _location_for_new_baseline(self, data, extension):
+        if self._options.add_platform_exceptions:
+            return self.VERSION_DIR
+        if extension == '.png':
+            return self.PLATFORM_DIR
+        if extension == '.wav':
+            return self.ALONGSIDE_TEST
+        if extension == '.txt' and self._render_tree_dump_pattern.match(data):
+            return self.PLATFORM_DIR
+        return self.ALONGSIDE_TEST
+
+    def _overwrite_baselines(self, driver_output):
+        location = self.VERSION_DIR if self._options.add_platform_exceptions else self.UPDATE
+        self._save_baseline_data(driver_output.text, '.txt', location)
+        self._save_baseline_data(driver_output.audio, '.wav', location)
+        if self._should_run_pixel_test:
+            self._save_baseline_data(driver_output.image, '.png', location)
+
+    def _save_baseline_data(self, data, extension, location):
+        if data is None:
+            return
+        port = self._port
+        fs = self._filesystem
+        if location == self.ALONGSIDE_TEST:
+            output_dir = fs.dirname(port.abspath_for_test(self._test_name))
+        elif location == self.VERSION_DIR:
+            output_dir = fs.join(port.baseline_version_dir(), fs.dirname(self._test_name))
+        elif location == self.PLATFORM_DIR:
+            output_dir = fs.join(port.baseline_platform_dir(), fs.dirname(self._test_name))
+        elif location == self.UPDATE:
+            output_dir = fs.dirname(port.expected_filename(self._test_name, extension))
+        else:
+            raise AssertionError('unrecognized baseline location: %s' % location)
+
+        fs.maybe_make_directory(output_dir)
+        output_basename = fs.basename(fs.splitext(self._test_name)[0] + "-expected" + extension)
+        output_path = fs.join(output_dir, output_basename)
+        _log.info('Writing new expected result "%s"' % port.relative_test_filename(output_path))
+        port.update_baseline(output_path, data)
+
+    def _handle_error(self, driver_output, reference_filename=None):
+        """Returns test failures if some unusual errors happen in driver's run.
+
+        Args:
+          driver_output: The output from the driver.
+          reference_filename: The full path to the reference file which produced the driver_output.
+              This arg is optional and should be used only in reftests until we have a better way to know
+              which html file is used for producing the driver_output.
+        """
+        failures = []
+        fs = self._filesystem
+        if driver_output.timeout:
+            failures.append(test_failures.FailureTimeout(bool(reference_filename)))
+
+        if reference_filename:
+            testname = self._port.relative_test_filename(reference_filename)
+        else:
+            testname = self._test_name
+
+        if driver_output.crash:
+            failures.append(test_failures.FailureCrash(bool(reference_filename),
+                                                       driver_output.crashed_process_name,
+                                                       driver_output.crashed_pid))
+            if driver_output.error:
+                _log.debug("%s %s crashed, (stderr lines):" % (self._worker_name, testname))
+            else:
+                _log.debug("%s %s crashed, (no stderr)" % (self._worker_name, testname))
+        elif driver_output.error:
+            _log.debug("%s %s output stderr lines:" % (self._worker_name, testname))
+        for line in driver_output.error.splitlines():
+            _log.debug("  %s" % line)
+        return failures
+
+    def _compare_output(self, expected_driver_output, driver_output):
+        failures = []
+        failures.extend(self._handle_error(driver_output))
+
+        if driver_output.crash:
+            # Don't continue any more if we already have a crash.
+            # In case of timeouts, we continue since we still want to see the text and image output.
+            return TestResult(self._test_name, failures, driver_output.test_time, driver_output.has_stderr())
+
+        failures.extend(self._compare_text(expected_driver_output.text, driver_output.text))
+        failures.extend(self._compare_audio(expected_driver_output.audio, driver_output.audio))
+        if self._should_run_pixel_test:
+            failures.extend(self._compare_image(expected_driver_output, driver_output))
+        return TestResult(self._test_name, failures, driver_output.test_time, driver_output.has_stderr())
+
+    def _compare_text(self, expected_text, actual_text):
+        failures = []
+        if (expected_text and actual_text and
+            # Assuming expected_text is already normalized.
+            self._port.do_text_results_differ(expected_text, self._get_normalized_output_text(actual_text))):
+            failures.append(test_failures.FailureTextMismatch())
+        elif actual_text and not expected_text:
+            failures.append(test_failures.FailureMissingResult())
+        return failures
+
+    def _compare_audio(self, expected_audio, actual_audio):
+        failures = []
+        if (expected_audio and actual_audio and
+            self._port.do_audio_results_differ(expected_audio, actual_audio)):
+            failures.append(test_failures.FailureAudioMismatch())
+        elif actual_audio and not expected_audio:
+            failures.append(test_failures.FailureMissingAudio())
+        return failures
+
+    def _get_normalized_output_text(self, output):
+        """Returns the normalized text output, i.e. the output in which
+        the end-of-line characters are normalized to "\n"."""
+        # Running tests on Windows produces "\r\n".  The "\n" part is helpfully
+        # changed to "\r\n" by our system (Python/Cygwin), resulting in
+        # "\r\r\n", when, in fact, we wanted to compare the text output with
+        # the normalized text expectation files.
+        return output.replace("\r\r\n", "\r\n").replace("\r\n", "\n")
+
+    # FIXME: This function also creates the image diff. Maybe that work should
+    # be handled elsewhere?
+    def _compare_image(self, expected_driver_output, driver_output):
+        failures = []
+        # If we didn't produce a hash file, this test must be text-only.
+        if driver_output.image_hash is None:
+            return failures
+        if not expected_driver_output.image:
+            failures.append(test_failures.FailureMissingImage())
+        elif not expected_driver_output.image_hash:
+            failures.append(test_failures.FailureMissingImageHash())
+        elif driver_output.image_hash != expected_driver_output.image_hash:
+            diff_result = self._port.diff_image(expected_driver_output.image, driver_output.image)
+            err_str = diff_result[2]
+            if err_str:
+                _log.warning('  %s : %s' % (self._test_name, err_str))
+                failures.append(test_failures.FailureImageHashMismatch())
+                driver_output.error = (driver_output.error or '') + err_str
+            else:
+                driver_output.image_diff = diff_result[0]
+                if driver_output.image_diff:
+                    failures.append(test_failures.FailureImageHashMismatch(diff_result[1]))
+                else:
+                    # See https://bugs.webkit.org/show_bug.cgi?id=69444 for why this isn't a full failure.
+                    _log.warning('  %s -> pixel hash failed (but diff passed)' % self._test_name)
+        return failures
+
+    def _run_reftest(self):
+        test_output = self._driver.run_test(self._driver_input(), self._stop_when_done)
+        total_test_time = 0
+        reference_output = None
+        test_result = None
+
+        # A reftest can have multiple match references and multiple mismatch references;
+        # the test fails if any mismatch matches and all of the matches don't match.
+        # To minimize the number of references we have to check, we run all of the mismatches first,
+        # then the matches, and short-circuit out as soon as we can.
+        # Note that sorting by the expectation sorts "!=" before "==" so this is easy to do.
+
+        putAllMismatchBeforeMatch = sorted
+        for expectation, reference_filename in putAllMismatchBeforeMatch(self._reference_files):
+            reference_test_name = self._port.relative_test_filename(reference_filename)
+            reference_output = self._driver.run_test(DriverInput(reference_test_name, self._timeout, None, should_run_pixel_test=True), self._stop_when_done)
+            test_result = self._compare_output_with_reference(reference_output, test_output, reference_filename, expectation == '!=')
+
+            if (expectation == '!=' and test_result.failures) or (expectation == '==' and not test_result.failures):
+                break
+            total_test_time += test_result.test_run_time
+
+        assert(reference_output)
+        test_result_writer.write_test_result(self._filesystem, self._port, self._test_name, test_output, reference_output, test_result.failures)
+        reftest_type = set([reference_file[0] for reference_file in self._reference_files])
+        return TestResult(self._test_name, test_result.failures, total_test_time + test_result.test_run_time, test_result.has_stderr, reftest_type=reftest_type)
+
+    def _compare_output_with_reference(self, reference_driver_output, actual_driver_output, reference_filename, mismatch):
+        total_test_time = reference_driver_output.test_time + actual_driver_output.test_time
+        has_stderr = reference_driver_output.has_stderr() or actual_driver_output.has_stderr()
+        failures = []
+        failures.extend(self._handle_error(actual_driver_output))
+        if failures:
+            # Don't continue any more if we already have crash or timeout.
+            return TestResult(self._test_name, failures, total_test_time, has_stderr)
+        failures.extend(self._handle_error(reference_driver_output, reference_filename=reference_filename))
+        if failures:
+            return TestResult(self._test_name, failures, total_test_time, has_stderr)
+
+        if not reference_driver_output.image_hash and not actual_driver_output.image_hash:
+            failures.append(test_failures.FailureReftestNoImagesGenerated(reference_filename))
+        elif mismatch:
+            if reference_driver_output.image_hash == actual_driver_output.image_hash:
+                diff_result = self._port.diff_image(reference_driver_output.image, actual_driver_output.image, tolerance=0)
+                if not diff_result[0]:
+                    failures.append(test_failures.FailureReftestMismatchDidNotOccur(reference_filename))
+                else:
+                    _log.warning("  %s -> ref test hashes matched but diff failed" % self._test_name)
+
+        elif reference_driver_output.image_hash != actual_driver_output.image_hash:
+            diff_result = self._port.diff_image(reference_driver_output.image, actual_driver_output.image, tolerance=0)
+            if diff_result[0]:
+                failures.append(test_failures.FailureReftestMismatch(reference_filename))
+            else:
+                _log.warning("  %s -> ref test hashes didn't match but diff passed" % self._test_name)
+
+        return TestResult(self._test_name, failures, total_test_time, has_stderr)
diff --git a/Tools/Scripts/webkitpy/layout_tests/controllers/test_result_writer.py b/Tools/Scripts/webkitpy/layout_tests/controllers/test_result_writer.py
new file mode 100644
index 0000000..be178ab
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/controllers/test_result_writer.py
@@ -0,0 +1,269 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+import logging
+
+from webkitpy.layout_tests.models import test_failures
+
+
+_log = logging.getLogger(__name__)
+
+
+def write_test_result(filesystem, port, test_name, driver_output,
+                      expected_driver_output, failures):
+    """Write the test result to the result output directory."""
+    root_output_dir = port.results_directory()
+    writer = TestResultWriter(filesystem, port, root_output_dir, test_name)
+
+    if driver_output.error:
+        writer.write_stderr(driver_output.error)
+
+    for failure in failures:
+        # FIXME: Instead of this long 'if' block, each failure class might
+        # have a responsibility for writing a test result.
+        if isinstance(failure, (test_failures.FailureMissingResult,
+                                test_failures.FailureTextMismatch)):
+            writer.write_text_files(driver_output.text, expected_driver_output.text)
+            writer.create_text_diff_and_write_result(driver_output.text, expected_driver_output.text)
+        elif isinstance(failure, test_failures.FailureMissingImage):
+            writer.write_image_files(driver_output.image, expected_image=None)
+        elif isinstance(failure, test_failures.FailureMissingImageHash):
+            writer.write_image_files(driver_output.image, expected_driver_output.image)
+        elif isinstance(failure, test_failures.FailureImageHashMismatch):
+            writer.write_image_files(driver_output.image, expected_driver_output.image)
+            writer.write_image_diff_files(driver_output.image_diff)
+        elif isinstance(failure, (test_failures.FailureAudioMismatch,
+                                  test_failures.FailureMissingAudio)):
+            writer.write_audio_files(driver_output.audio, expected_driver_output.audio)
+        elif isinstance(failure, test_failures.FailureCrash):
+            crashed_driver_output = expected_driver_output if failure.is_reftest else driver_output
+            writer.write_crash_log(crashed_driver_output.crash_log)
+        elif isinstance(failure, test_failures.FailureReftestMismatch):
+            writer.write_image_files(driver_output.image, expected_driver_output.image)
+            # FIXME: This work should be done earlier in the pipeline (e.g., when we compare images for non-ref tests).
+            # FIXME: We should always have 2 images here.
+            if driver_output.image and expected_driver_output.image:
+                diff_image, diff_percent, err_str = port.diff_image(expected_driver_output.image, driver_output.image, tolerance=0)
+                if diff_image:
+                    writer.write_image_diff_files(diff_image)
+                    failure.diff_percent = diff_percent
+                else:
+                    _log.warn('ref test mismatch did not produce an image diff.')
+            writer.write_reftest(failure.reference_filename)
+        elif isinstance(failure, test_failures.FailureReftestMismatchDidNotOccur):
+            writer.write_image_files(driver_output.image, expected_image=None)
+            writer.write_reftest(failure.reference_filename)
+        else:
+            assert isinstance(failure, (test_failures.FailureTimeout, test_failures.FailureReftestNoImagesGenerated))
+
+
+class TestResultWriter(object):
+    """A class which handles all writing operations to the result directory."""
+
+    # Filename pieces when writing failures to the test results directory.
+    FILENAME_SUFFIX_ACTUAL = "-actual"
+    FILENAME_SUFFIX_EXPECTED = "-expected"
+    FILENAME_SUFFIX_DIFF = "-diff"
+    FILENAME_SUFFIX_STDERR = "-stderr"
+    FILENAME_SUFFIX_CRASH_LOG = "-crash-log"
+    FILENAME_SUFFIX_WDIFF = "-wdiff.html"
+    FILENAME_SUFFIX_PRETTY_PATCH = "-pretty-diff.html"
+    FILENAME_SUFFIX_IMAGE_DIFF = "-diff.png"
+    FILENAME_SUFFIX_IMAGE_DIFFS_HTML = "-diffs.html"
+
+    def __init__(self, filesystem, port, root_output_dir, test_name):
+        self._filesystem = filesystem
+        self._port = port
+        self._root_output_dir = root_output_dir
+        self._test_name = test_name
+
+    def _make_output_directory(self):
+        """Creates the output directory (if needed) for a given test filename."""
+        fs = self._filesystem
+        output_filename = fs.join(self._root_output_dir, self._test_name)
+        fs.maybe_make_directory(fs.dirname(output_filename))
+
+    def output_filename(self, modifier):
+        """Returns a filename inside the output dir that contains modifier.
+
+        For example, if test name is "fast/dom/foo.html" and modifier is "-expected.txt",
+        the return value is "/<path-to-root-output-dir>/fast/dom/foo-expected.txt".
+
+        Args:
+          modifier: a string to replace the extension of filename with
+
+        Return:
+          The absolute path to the output filename
+        """
+        fs = self._filesystem
+        output_filename = fs.join(self._root_output_dir, self._test_name)
+        return fs.splitext(output_filename)[0] + modifier
+
+    def _write_binary_file(self, path, contents):
+        if contents is not None:
+            self._make_output_directory()
+            self._filesystem.write_binary_file(path, contents)
+
+    def _write_text_file(self, path, contents):
+        if contents is not None:
+            self._make_output_directory()
+            self._filesystem.write_text_file(path, contents)
+
+    def _output_testname(self, modifier):
+        fs = self._filesystem
+        return fs.splitext(fs.basename(self._test_name))[0] + modifier
+
+    def write_output_files(self, file_type, output, expected):
+        """Writes the test output, the expected output in the results directory.
+
+        The full output filename of the actual, for example, will be
+          <filename>-actual<file_type>
+        For instance,
+          my_test-actual.txt
+
+        Args:
+          file_type: A string describing the test output file type, e.g. ".txt"
+          output: A string containing the test output
+          expected: A string containing the expected test output
+        """
+        actual_filename = self.output_filename(self.FILENAME_SUFFIX_ACTUAL + file_type)
+        expected_filename = self.output_filename(self.FILENAME_SUFFIX_EXPECTED + file_type)
+
+        self._write_binary_file(actual_filename, output)
+        self._write_binary_file(expected_filename, expected)
+
+    def write_stderr(self, error):
+        filename = self.output_filename(self.FILENAME_SUFFIX_STDERR + ".txt")
+        self._write_binary_file(filename, error)
+
+    def write_crash_log(self, crash_log):
+        filename = self.output_filename(self.FILENAME_SUFFIX_CRASH_LOG + ".txt")
+        self._write_text_file(filename, crash_log)
+
+    def write_text_files(self, actual_text, expected_text):
+        self.write_output_files(".txt", actual_text, expected_text)
+
+    def create_text_diff_and_write_result(self, actual_text, expected_text):
+        # FIXME: This function is actually doing the diffs as well as writing results.
+        # It might be better to extract code which does 'diff' and make it a separate function.
+        if not actual_text or not expected_text:
+            return
+
+        file_type = '.txt'
+        actual_filename = self.output_filename(self.FILENAME_SUFFIX_ACTUAL + file_type)
+        expected_filename = self.output_filename(self.FILENAME_SUFFIX_EXPECTED + file_type)
+        # We treat diff output as binary. Diff output may contain multiple files
+        # in conflicting encodings.
+        diff = self._port.diff_text(expected_text, actual_text, expected_filename, actual_filename)
+        diff_filename = self.output_filename(self.FILENAME_SUFFIX_DIFF + file_type)
+        self._write_binary_file(diff_filename, diff)
+
+        # Shell out to wdiff to get colored inline diffs.
+        if self._port.wdiff_available():
+            wdiff = self._port.wdiff_text(expected_filename, actual_filename)
+            wdiff_filename = self.output_filename(self.FILENAME_SUFFIX_WDIFF)
+            self._write_binary_file(wdiff_filename, wdiff)
+
+        # Use WebKit's PrettyPatch.rb to get an HTML diff.
+        if self._port.pretty_patch_available():
+            pretty_patch = self._port.pretty_patch_text(diff_filename)
+            pretty_patch_filename = self.output_filename(self.FILENAME_SUFFIX_PRETTY_PATCH)
+            self._write_binary_file(pretty_patch_filename, pretty_patch)
+
+    def write_audio_files(self, actual_audio, expected_audio):
+        self.write_output_files('.wav', actual_audio, expected_audio)
+
+    def write_image_files(self, actual_image, expected_image):
+        self.write_output_files('.png', actual_image, expected_image)
+
+    def write_image_diff_files(self, image_diff):
+        diff_filename = self.output_filename(self.FILENAME_SUFFIX_IMAGE_DIFF)
+        self._write_binary_file(diff_filename, image_diff)
+
+        diffs_html_filename = self.output_filename(self.FILENAME_SUFFIX_IMAGE_DIFFS_HTML)
+        # FIXME: old-run-webkit-tests shows the diff percentage as the text contents of the "diff" link.
+        # FIXME: old-run-webkit-tests include a link to the test file.
+        html = """<!DOCTYPE HTML>
+<html>
+<head>
+<title>%(title)s</title>
+<style>.label{font-weight:bold}</style>
+</head>
+<body>
+Difference between images: <a href="%(diff_filename)s">diff</a><br>
+<div class=imageText></div>
+<div class=imageContainer data-prefix="%(prefix)s">Loading...</div>
+<script>
+(function() {
+    var preloadedImageCount = 0;
+    function preloadComplete() {
+        ++preloadedImageCount;
+        if (preloadedImageCount < 2)
+            return;
+        toggleImages();
+        setInterval(toggleImages, 2000)
+    }
+
+    function preloadImage(url) {
+        image = new Image();
+        image.addEventListener('load', preloadComplete);
+        image.src = url;
+        return image;
+    }
+
+    function toggleImages() {
+        if (text.textContent == 'Expected Image') {
+            text.textContent = 'Actual Image';
+            container.replaceChild(actualImage, container.firstChild);
+        } else {
+            text.textContent = 'Expected Image';
+            container.replaceChild(expectedImage, container.firstChild);
+        }
+    }
+
+    var text = document.querySelector('.imageText');
+    var container = document.querySelector('.imageContainer');
+    var actualImage = preloadImage(container.getAttribute('data-prefix') + '-actual.png');
+    var expectedImage = preloadImage(container.getAttribute('data-prefix') + '-expected.png');
+})();
+</script>
+</body>
+</html>
+""" % {
+            'title': self._test_name,
+            'diff_filename': self._output_testname(self.FILENAME_SUFFIX_IMAGE_DIFF),
+            'prefix': self._output_testname(''),
+        }
+        self._filesystem.write_text_file(diffs_html_filename, html)
+
+    def write_reftest(self, src_filepath):
+        fs = self._filesystem
+        dst_dir = fs.dirname(fs.join(self._root_output_dir, self._test_name))
+        dst_filepath = fs.join(dst_dir, fs.basename(src_filepath))
+        self._write_text_file(dst_filepath, fs.read_text_file(src_filepath))
diff --git a/Tools/Scripts/webkitpy/layout_tests/controllers/test_result_writer_unittest.py b/Tools/Scripts/webkitpy/layout_tests/controllers/test_result_writer_unittest.py
new file mode 100644
index 0000000..dfd6041
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/controllers/test_result_writer_unittest.py
@@ -0,0 +1,60 @@
+#!/usr/bin/env python
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from webkitpy.common.host_mock import MockHost
+from webkitpy.layout_tests.controllers import test_result_writer
+from webkitpy.layout_tests.models import test_failures
+from webkitpy.layout_tests.port.driver import DriverOutput
+from webkitpy.layout_tests.port.test import TestPort
+
+
+class TestResultWriterTest(unittest.TestCase):
+
+    def test_reftest_diff_image(self):
+        """A write_test_result should call port.diff_image with tolerance=0 in case of FailureReftestMismatch."""
+        used_tolerance_values = []
+
+        class ImageDiffTestPort(TestPort):
+            def diff_image(self, expected_contents, actual_contents, tolerance=None):
+                used_tolerance_values.append(tolerance)
+                return (True, 1, None)
+
+        host = MockHost()
+        port = ImageDiffTestPort(host)
+        test_name = 'failures/unexpected/reftest.html'
+        test_reference_file = host.filesystem.join(port.layout_tests_dir(), 'failures/unexpected/reftest-expected.html')
+        driver_output1 = DriverOutput('text1', 'image1', 'imagehash1', 'audio1')
+        driver_output2 = DriverOutput('text2', 'image2', 'imagehash2', 'audio2')
+        failures = [test_failures.FailureReftestMismatch(test_reference_file)]
+        test_result_writer.write_test_result(host.filesystem, ImageDiffTestPort(host), test_name,
+                                             driver_output1, driver_output2, failures)
+        self.assertEqual([0], used_tolerance_values)
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/__init__.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/__init__.py
new file mode 100644
index 0000000..ef65bee
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/layout_package/__init__.py
@@ -0,0 +1 @@
+# Required for Python to search this directory for module files
diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/json_layout_results_generator.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/json_layout_results_generator.py
new file mode 100644
index 0000000..f277c93
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/layout_package/json_layout_results_generator.py
@@ -0,0 +1,176 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import logging
+
+from webkitpy.layout_tests.layout_package import json_results_generator
+from webkitpy.layout_tests.models import test_expectations
+from webkitpy.layout_tests.models import test_failures
+
+class JSONLayoutResultsGenerator(json_results_generator.JSONResultsGeneratorBase):
+    """A JSON results generator for layout tests."""
+
+    LAYOUT_TESTS_PATH = "LayoutTests"
+
+    # Additional JSON fields.
+    WONTFIX = "wontfixCounts"
+
+    FAILURE_TO_CHAR = {test_expectations.PASS: json_results_generator.JSONResultsGeneratorBase.PASS_RESULT,
+                       test_expectations.SKIP: json_results_generator.JSONResultsGeneratorBase.SKIP_RESULT,
+                       test_expectations.CRASH: "C",
+                       test_expectations.TIMEOUT: "T",
+                       test_expectations.IMAGE: "I",
+                       test_expectations.TEXT: "F",
+                       test_expectations.AUDIO: "A",
+                       test_expectations.MISSING: "O",
+                       test_expectations.IMAGE_PLUS_TEXT: "Z"}
+
+    def __init__(self, port, builder_name, build_name, build_number,
+        results_file_base_path, builder_base_url,
+        test_timings, expectations, result_summary, all_tests,
+        test_results_server=None, test_type="", master_name=""):
+        """Modifies the results.json file. Grabs it off the archive directory
+        if it is not found locally.
+
+        Args:
+          result_summary: ResultsSummary object storing the summary of the test
+              results.
+        """
+        super(JSONLayoutResultsGenerator, self).__init__(
+            port, builder_name, build_name, build_number, results_file_base_path,
+            builder_base_url, {}, port.repository_paths(),
+            test_results_server, test_type, master_name)
+
+        self._expectations = expectations
+
+        self._result_summary = result_summary
+        self._failures = dict((test_name, result_summary.results[test_name].type) for test_name in result_summary.failures)
+        self._all_tests = all_tests
+        self._test_timings = dict((test_tuple.test_name, test_tuple.test_run_time) for test_tuple in test_timings)
+
+        self.generate_json_output()
+
+    def _get_path_relative_to_layout_test_root(self, test):
+        """Returns the path of the test relative to the layout test root.
+        For example, for:
+          src/third_party/WebKit/LayoutTests/fast/forms/foo.html
+        We would return
+          fast/forms/foo.html
+        """
+        index = test.find(self.LAYOUT_TESTS_PATH)
+        if index is not -1:
+            index += len(self.LAYOUT_TESTS_PATH)
+
+        if index is -1:
+            # Already a relative path.
+            relativePath = test
+        else:
+            relativePath = test[index + 1:]
+
+        # Make sure all paths are unix-style.
+        return relativePath.replace('\\', '/')
+
+    # override
+    def _get_test_timing(self, test_name):
+        if test_name in self._test_timings:
+            # Floor for now to get time in seconds.
+            return int(self._test_timings[test_name])
+        return 0
+
+    # override
+    def _get_failed_test_names(self):
+        return set(self._failures.keys())
+
+    # override
+    def _get_modifier_char(self, test_name):
+        if test_name not in self._all_tests:
+            return self.NO_DATA_RESULT
+
+        if test_name in self._failures:
+            return self.FAILURE_TO_CHAR[self._failures[test_name]]
+
+        return self.PASS_RESULT
+
+    # override
+    def _get_result_char(self, test_name):
+        return self._get_modifier_char(test_name)
+
+    # override
+    def _insert_failure_summaries(self, results_for_builder):
+        summary = self._result_summary
+
+        self._insert_item_into_raw_list(results_for_builder,
+            len((set(summary.failures.keys()) |
+                summary.tests_by_expectation[test_expectations.SKIP]) &
+                summary.tests_by_timeline[test_expectations.NOW]),
+            self.FIXABLE_COUNT)
+        self._insert_item_into_raw_list(results_for_builder,
+            self._get_failure_summary_entry(test_expectations.NOW),
+            self.FIXABLE)
+        self._insert_item_into_raw_list(results_for_builder,
+            len(self._expectations.get_tests_with_timeline(
+                test_expectations.NOW)), self.ALL_FIXABLE_COUNT)
+        self._insert_item_into_raw_list(results_for_builder,
+            self._get_failure_summary_entry(test_expectations.WONTFIX),
+            self.WONTFIX)
+
+    # override
+    def _normalize_results_json(self, test, test_name, tests):
+        super(JSONLayoutResultsGenerator, self)._normalize_results_json(
+            test, test_name, tests)
+
+        # Remove tests that don't exist anymore.
+        full_path = self._filesystem.join(self._port.layout_tests_dir(), test_name)
+        full_path = self._filesystem.normpath(full_path)
+        if not self._filesystem.exists(full_path):
+            del tests[test_name]
+
+    def _get_failure_summary_entry(self, timeline):
+        """Creates a summary object to insert into the JSON.
+
+        Args:
+          summary   ResultSummary object with test results
+          timeline  current test_expectations timeline to build entry for
+                    (e.g., test_expectations.NOW, etc.)
+        """
+        entry = {}
+        summary = self._result_summary
+        timeline_tests = summary.tests_by_timeline[timeline]
+        entry[self.SKIP_RESULT] = len(
+            summary.tests_by_expectation[test_expectations.SKIP] &
+            timeline_tests)
+        entry[self.PASS_RESULT] = len(
+            summary.tests_by_expectation[test_expectations.PASS] &
+            timeline_tests)
+        for failure_type in summary.tests_by_expectation.keys():
+            if failure_type not in self.FAILURE_TO_CHAR:
+                continue
+            count = len(summary.tests_by_expectation[failure_type] &
+                        timeline_tests)
+            entry[self.FAILURE_TO_CHAR[failure_type]] = count
+        return entry
diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator.py
new file mode 100644
index 0000000..73834f0
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator.py
@@ -0,0 +1,660 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import json
+import logging
+import subprocess
+import sys
+import time
+import urllib2
+import xml.dom.minidom
+
+from webkitpy.common.checkout.scm.detection import SCMDetector
+from webkitpy.common.net.file_uploader import FileUploader
+
+# A JSON results generator for generic tests.
+# FIXME: move this code out of the layout_package directory.
+
+_log = logging.getLogger(__name__)
+
+_JSON_PREFIX = "ADD_RESULTS("
+_JSON_SUFFIX = ");"
+
+
+def has_json_wrapper(string):
+    return string.startswith(_JSON_PREFIX) and string.endswith(_JSON_SUFFIX)
+
+
+def strip_json_wrapper(json_content):
+    # FIXME: Kill this code once the server returns json instead of jsonp.
+    if has_json_wrapper(json_content):
+        return json_content[len(_JSON_PREFIX):len(json_content) - len(_JSON_SUFFIX)]
+    return json_content
+
+
+def load_json(filesystem, file_path):
+    content = filesystem.read_text_file(file_path)
+    content = strip_json_wrapper(content)
+    return json.loads(content)
+
+
+def write_json(filesystem, json_object, file_path, callback=None):
+    # Specify separators in order to get compact encoding.
+    json_string = json.dumps(json_object, separators=(',', ':'))
+    if callback:
+        json_string = callback + "(" + json_string + ");"
+    filesystem.write_text_file(file_path, json_string)
+
+
+def convert_trie_to_flat_paths(trie, prefix=None):
+    """Converts the directory structure in the given trie to flat paths, prepending a prefix to each."""
+    result = {}
+    for name, data in trie.iteritems():
+        if prefix:
+            name = prefix + "/" + name
+
+        if len(data) and not "results" in data:
+            result.update(convert_trie_to_flat_paths(data, name))
+        else:
+            result[name] = data
+
+    return result
+
+
+def add_path_to_trie(path, value, trie):
+    """Inserts a single flat directory path and associated value into a directory trie structure."""
+    if not "/" in path:
+        trie[path] = value
+        return
+
+    directory, slash, rest = path.partition("/")
+    if not directory in trie:
+        trie[directory] = {}
+    add_path_to_trie(rest, value, trie[directory])
+
+def test_timings_trie(port, individual_test_timings):
+    """Breaks a test name into chunks by directory and puts the test time as a value in the lowest part, e.g.
+    foo/bar/baz.html: 1ms
+    foo/bar/baz1.html: 3ms
+
+    becomes
+    foo: {
+        bar: {
+            baz.html: 1,
+            baz1.html: 3
+        }
+    }
+    """
+    trie = {}
+    for test_result in individual_test_timings:
+        test = test_result.test_name
+
+        add_path_to_trie(test, int(1000 * test_result.test_run_time), trie)
+
+    return trie
+
+# FIXME: We already have a TestResult class in test_results.py
+class TestResult(object):
+    """A simple class that represents a single test result."""
+
+    # Test modifier constants.
+    (NONE, FAILS, FLAKY, DISABLED) = range(4)
+
+    def __init__(self, test, failed=False, elapsed_time=0):
+        self.test_name = test
+        self.failed = failed
+        self.test_run_time = elapsed_time
+
+        test_name = test
+        try:
+            test_name = test.split('.')[1]
+        except IndexError:
+            _log.warn("Invalid test name: %s.", test)
+            pass
+
+        if test_name.startswith('FAILS_'):
+            self.modifier = self.FAILS
+        elif test_name.startswith('FLAKY_'):
+            self.modifier = self.FLAKY
+        elif test_name.startswith('DISABLED_'):
+            self.modifier = self.DISABLED
+        else:
+            self.modifier = self.NONE
+
+    def fixable(self):
+        return self.failed or self.modifier == self.DISABLED
+
+
+class JSONResultsGeneratorBase(object):
+    """A JSON results generator for generic tests."""
+
+    MAX_NUMBER_OF_BUILD_RESULTS_TO_LOG = 750
+    # Min time (seconds) that will be added to the JSON.
+    MIN_TIME = 1
+
+    # Note that in non-chromium tests those chars are used to indicate
+    # test modifiers (FAILS, FLAKY, etc) but not actual test results.
+    PASS_RESULT = "P"
+    SKIP_RESULT = "X"
+    FAIL_RESULT = "F"
+    FLAKY_RESULT = "L"
+    NO_DATA_RESULT = "N"
+
+    MODIFIER_TO_CHAR = {TestResult.NONE: PASS_RESULT,
+                        TestResult.DISABLED: SKIP_RESULT,
+                        TestResult.FAILS: FAIL_RESULT,
+                        TestResult.FLAKY: FLAKY_RESULT}
+
+    VERSION = 4
+    VERSION_KEY = "version"
+    RESULTS = "results"
+    TIMES = "times"
+    BUILD_NUMBERS = "buildNumbers"
+    TIME = "secondsSinceEpoch"
+    TESTS = "tests"
+
+    FIXABLE_COUNT = "fixableCount"
+    FIXABLE = "fixableCounts"
+    ALL_FIXABLE_COUNT = "allFixableCount"
+
+    RESULTS_FILENAME = "results.json"
+    TIMES_MS_FILENAME = "times_ms.json"
+    INCREMENTAL_RESULTS_FILENAME = "incremental_results.json"
+
+    URL_FOR_TEST_LIST_JSON = "http://%s/testfile?builder=%s&name=%s&testlistjson=1&testtype=%s&master=%s"
+
+    # FIXME: Remove generate_incremental_results once the reference to it in
+    # http://src.chromium.org/viewvc/chrome/trunk/tools/build/scripts/slave/gtest_slave_utils.py
+    # has been removed.
+    def __init__(self, port, builder_name, build_name, build_number,
+        results_file_base_path, builder_base_url,
+        test_results_map, svn_repositories=None,
+        test_results_server=None,
+        test_type="",
+        master_name="",
+        generate_incremental_results=None):
+        """Modifies the results.json file. Grabs it off the archive directory
+        if it is not found locally.
+
+        Args
+          port: port-specific wrapper
+          builder_name: the builder name (e.g. Webkit).
+          build_name: the build name (e.g. webkit-rel).
+          build_number: the build number.
+          results_file_base_path: Absolute path to the directory containing the
+              results json file.
+          builder_base_url: the URL where we have the archived test results.
+              If this is None no archived results will be retrieved.
+          test_results_map: A dictionary that maps test_name to TestResult.
+          svn_repositories: A (json_field_name, svn_path) pair for SVN
+              repositories that tests rely on.  The SVN revision will be
+              included in the JSON with the given json_field_name.
+          test_results_server: server that hosts test results json.
+          test_type: test type string (e.g. 'layout-tests').
+          master_name: the name of the buildbot master.
+        """
+        self._port = port
+        self._filesystem = port._filesystem
+        self._executive = port._executive
+        self._builder_name = builder_name
+        self._build_name = build_name
+        self._build_number = build_number
+        self._builder_base_url = builder_base_url
+        self._results_directory = results_file_base_path
+
+        self._test_results_map = test_results_map
+        self._test_results = test_results_map.values()
+
+        self._svn_repositories = svn_repositories
+        if not self._svn_repositories:
+            self._svn_repositories = {}
+
+        self._test_results_server = test_results_server
+        self._test_type = test_type
+        self._master_name = master_name
+
+        self._archived_results = None
+
+    def generate_json_output(self):
+        json_object = self.get_json()
+        if json_object:
+            file_path = self._filesystem.join(self._results_directory, self.INCREMENTAL_RESULTS_FILENAME)
+            write_json(self._filesystem, json_object, file_path)
+
+    def generate_times_ms_file(self):
+        # FIXME: rename to generate_times_ms_file. This needs to be coordinated with
+        # changing the calls to this on the chromium build slaves.
+        times = test_timings_trie(self._port, self._test_results_map.values())
+        file_path = self._filesystem.join(self._results_directory, self.TIMES_MS_FILENAME)
+        write_json(self._filesystem, times, file_path)
+
+    def get_json(self):
+        """Gets the results for the results.json file."""
+        results_json = {}
+
+        if not results_json:
+            results_json, error = self._get_archived_json_results()
+            if error:
+                # If there was an error don't write a results.json
+                # file at all as it would lose all the information on the
+                # bot.
+                _log.error("Archive directory is inaccessible. Not "
+                           "modifying or clobbering the results.json "
+                           "file: " + str(error))
+                return None
+
+        builder_name = self._builder_name
+        if results_json and builder_name not in results_json:
+            _log.debug("Builder name (%s) is not in the results.json file."
+                       % builder_name)
+
+        self._convert_json_to_current_version(results_json)
+
+        if builder_name not in results_json:
+            results_json[builder_name] = (
+                self._create_results_for_builder_json())
+
+        results_for_builder = results_json[builder_name]
+
+        if builder_name:
+            self._insert_generic_metadata(results_for_builder)
+
+        self._insert_failure_summaries(results_for_builder)
+
+        # Update the all failing tests with result type and time.
+        tests = results_for_builder[self.TESTS]
+        all_failing_tests = self._get_failed_test_names()
+        all_failing_tests.update(convert_trie_to_flat_paths(tests))
+
+        for test in all_failing_tests:
+            self._insert_test_time_and_result(test, tests)
+
+        return results_json
+
+    def set_archived_results(self, archived_results):
+        self._archived_results = archived_results
+
+    def upload_json_files(self, json_files):
+        """Uploads the given json_files to the test_results_server (if the
+        test_results_server is given)."""
+        if not self._test_results_server:
+            return
+
+        if not self._master_name:
+            _log.error("--test-results-server was set, but --master-name was not.  Not uploading JSON files.")
+            return
+
+        _log.info("Uploading JSON files for builder: %s", self._builder_name)
+        attrs = [("builder", self._builder_name),
+                 ("testtype", self._test_type),
+                 ("master", self._master_name)]
+
+        files = [(file, self._filesystem.join(self._results_directory, file))
+            for file in json_files]
+
+        url = "http://%s/testfile/upload" % self._test_results_server
+        # Set uploading timeout in case appengine server is having problems.
+        # 120 seconds are more than enough to upload test results.
+        uploader = FileUploader(url, 120)
+        try:
+            response = uploader.upload_as_multipart_form_data(self._filesystem, files, attrs)
+            if response:
+                if response.code == 200:
+                    _log.info("JSON uploaded.")
+                else:
+                    _log.debug("JSON upload failed, %d: '%s'" % (response.code, response.read()))
+            else:
+                _log.error("JSON upload failed; no response returned")
+        except Exception, err:
+            _log.error("Upload failed: %s" % err)
+            return
+
+
+    def _get_test_timing(self, test_name):
+        """Returns test timing data (elapsed time) in second
+        for the given test_name."""
+        if test_name in self._test_results_map:
+            # Floor for now to get time in seconds.
+            return int(self._test_results_map[test_name].test_run_time)
+        return 0
+
+    def _get_failed_test_names(self):
+        """Returns a set of failed test names."""
+        return set([r.test_name for r in self._test_results if r.failed])
+
+    def _get_modifier_char(self, test_name):
+        """Returns a single char (e.g. SKIP_RESULT, FAIL_RESULT,
+        PASS_RESULT, NO_DATA_RESULT, etc) that indicates the test modifier
+        for the given test_name.
+        """
+        if test_name not in self._test_results_map:
+            return self.__class__.NO_DATA_RESULT
+
+        test_result = self._test_results_map[test_name]
+        if test_result.modifier in self.MODIFIER_TO_CHAR.keys():
+            return self.MODIFIER_TO_CHAR[test_result.modifier]
+
+        return self.__class__.PASS_RESULT
+
+    def _get_result_char(self, test_name):
+        """Returns a single char (e.g. SKIP_RESULT, FAIL_RESULT,
+        PASS_RESULT, NO_DATA_RESULT, etc) that indicates the test result
+        for the given test_name.
+        """
+        if test_name not in self._test_results_map:
+            return self.__class__.NO_DATA_RESULT
+
+        test_result = self._test_results_map[test_name]
+        if test_result.modifier == TestResult.DISABLED:
+            return self.__class__.SKIP_RESULT
+
+        if test_result.failed:
+            return self.__class__.FAIL_RESULT
+
+        return self.__class__.PASS_RESULT
+
+    def _get_svn_revision(self, in_directory):
+        """Returns the svn revision for the given directory.
+
+        Args:
+          in_directory: The directory where svn is to be run.
+        """
+
+        # FIXME: We initialize this here in order to engage the stupid windows hacks :).
+        # We can't reuse an existing scm object because the specific directories may
+        # be part of other checkouts.
+        self._port.host.initialize_scm()
+        scm = SCMDetector(self._filesystem, self._executive).detect_scm_system(in_directory)
+        if scm:
+            return scm.svn_revision(in_directory)
+        return ""
+
+    def _get_archived_json_results(self):
+        """Download JSON file that only contains test
+        name list from test-results server. This is for generating incremental
+        JSON so the file generated has info for tests that failed before but
+        pass or are skipped from current run.
+
+        Returns (archived_results, error) tuple where error is None if results
+        were successfully read.
+        """
+        results_json = {}
+        old_results = None
+        error = None
+
+        if not self._test_results_server:
+            return {}, None
+
+        results_file_url = (self.URL_FOR_TEST_LIST_JSON %
+            (urllib2.quote(self._test_results_server),
+             urllib2.quote(self._builder_name),
+             self.RESULTS_FILENAME,
+             urllib2.quote(self._test_type),
+             urllib2.quote(self._master_name)))
+
+        try:
+            # FIXME: We should talk to the network via a Host object.
+            results_file = urllib2.urlopen(results_file_url)
+            info = results_file.info()
+            old_results = results_file.read()
+        except urllib2.HTTPError, http_error:
+            # A non-4xx status code means the bot is hosed for some reason
+            # and we can't grab the results.json file off of it.
+            if (http_error.code < 400 and http_error.code >= 500):
+                error = http_error
+        except urllib2.URLError, url_error:
+            error = url_error
+
+        if old_results:
+            # Strip the prefix and suffix so we can get the actual JSON object.
+            old_results = strip_json_wrapper(old_results)
+
+            try:
+                results_json = json.loads(old_results)
+            except:
+                _log.debug("results.json was not valid JSON. Clobbering.")
+                # The JSON file is not valid JSON. Just clobber the results.
+                results_json = {}
+        else:
+            _log.debug('Old JSON results do not exist. Starting fresh.')
+            results_json = {}
+
+        return results_json, error
+
+    def _insert_failure_summaries(self, results_for_builder):
+        """Inserts aggregate pass/failure statistics into the JSON.
+        This method reads self._test_results and generates
+        FIXABLE, FIXABLE_COUNT and ALL_FIXABLE_COUNT entries.
+
+        Args:
+          results_for_builder: Dictionary containing the test results for a
+              single builder.
+        """
+        # Insert the number of tests that failed or skipped.
+        fixable_count = len([r for r in self._test_results if r.fixable()])
+        self._insert_item_into_raw_list(results_for_builder,
+            fixable_count, self.FIXABLE_COUNT)
+
+        # Create a test modifiers (FAILS, FLAKY etc) summary dictionary.
+        entry = {}
+        for test_name in self._test_results_map.iterkeys():
+            result_char = self._get_modifier_char(test_name)
+            entry[result_char] = entry.get(result_char, 0) + 1
+
+        # Insert the pass/skip/failure summary dictionary.
+        self._insert_item_into_raw_list(results_for_builder, entry,
+                                        self.FIXABLE)
+
+        # Insert the number of all the tests that are supposed to pass.
+        all_test_count = len(self._test_results)
+        self._insert_item_into_raw_list(results_for_builder,
+            all_test_count, self.ALL_FIXABLE_COUNT)
+
+    def _insert_item_into_raw_list(self, results_for_builder, item, key):
+        """Inserts the item into the list with the given key in the results for
+        this builder. Creates the list if no such list exists.
+
+        Args:
+          results_for_builder: Dictionary containing the test results for a
+              single builder.
+          item: Number or string to insert into the list.
+          key: Key in results_for_builder for the list to insert into.
+        """
+        if key in results_for_builder:
+            raw_list = results_for_builder[key]
+        else:
+            raw_list = []
+
+        raw_list.insert(0, item)
+        raw_list = raw_list[:self.MAX_NUMBER_OF_BUILD_RESULTS_TO_LOG]
+        results_for_builder[key] = raw_list
+
+    def _insert_item_run_length_encoded(self, item, encoded_results):
+        """Inserts the item into the run-length encoded results.
+
+        Args:
+          item: String or number to insert.
+          encoded_results: run-length encoded results. An array of arrays, e.g.
+              [[3,'A'],[1,'Q']] encodes AAAQ.
+        """
+        if len(encoded_results) and item == encoded_results[0][1]:
+            num_results = encoded_results[0][0]
+            if num_results <= self.MAX_NUMBER_OF_BUILD_RESULTS_TO_LOG:
+                encoded_results[0][0] = num_results + 1
+        else:
+            # Use a list instead of a class for the run-length encoding since
+            # we want the serialized form to be concise.
+            encoded_results.insert(0, [1, item])
+
+    def _insert_generic_metadata(self, results_for_builder):
+        """ Inserts generic metadata (such as version number, current time etc)
+        into the JSON.
+
+        Args:
+          results_for_builder: Dictionary containing the test results for
+              a single builder.
+        """
+        self._insert_item_into_raw_list(results_for_builder,
+            self._build_number, self.BUILD_NUMBERS)
+
+        # Include SVN revisions for the given repositories.
+        for (name, path) in self._svn_repositories:
+            # Note: for JSON file's backward-compatibility we use 'chrome' rather
+            # than 'chromium' here.
+            if name == 'chromium':
+                name = 'chrome'
+            self._insert_item_into_raw_list(results_for_builder,
+                self._get_svn_revision(path),
+                name + 'Revision')
+
+        self._insert_item_into_raw_list(results_for_builder,
+            int(time.time()),
+            self.TIME)
+
+    def _insert_test_time_and_result(self, test_name, tests):
+        """ Insert a test item with its results to the given tests dictionary.
+
+        Args:
+          tests: Dictionary containing test result entries.
+        """
+
+        result = self._get_result_char(test_name)
+        time = self._get_test_timing(test_name)
+
+        this_test = tests
+        for segment in test_name.split("/"):
+            if segment not in this_test:
+                this_test[segment] = {}
+            this_test = this_test[segment]
+
+        if not len(this_test):
+            self._populate_results_and_times_json(this_test)
+
+        if self.RESULTS in this_test:
+            self._insert_item_run_length_encoded(result, this_test[self.RESULTS])
+        else:
+            this_test[self.RESULTS] = [[1, result]]
+
+        if self.TIMES in this_test:
+            self._insert_item_run_length_encoded(time, this_test[self.TIMES])
+        else:
+            this_test[self.TIMES] = [[1, time]]
+
+    def _convert_json_to_current_version(self, results_json):
+        """If the JSON does not match the current version, converts it to the
+        current version and adds in the new version number.
+        """
+        if self.VERSION_KEY in results_json:
+            archive_version = results_json[self.VERSION_KEY]
+            if archive_version == self.VERSION:
+                return
+        else:
+            archive_version = 3
+
+        # version 3->4
+        if archive_version == 3:
+            num_results = len(results_json.values())
+            for builder, results in results_json.iteritems():
+                self._convert_tests_to_trie(results)
+
+        results_json[self.VERSION_KEY] = self.VERSION
+
+    def _convert_tests_to_trie(self, results):
+        if not self.TESTS in results:
+            return
+
+        test_results = results[self.TESTS]
+        test_results_trie = {}
+        for test in test_results.iterkeys():
+            single_test_result = test_results[test]
+            add_path_to_trie(test, single_test_result, test_results_trie)
+
+        results[self.TESTS] = test_results_trie
+
+    def _populate_results_and_times_json(self, results_and_times):
+        results_and_times[self.RESULTS] = []
+        results_and_times[self.TIMES] = []
+        return results_and_times
+
+    def _create_results_for_builder_json(self):
+        results_for_builder = {}
+        results_for_builder[self.TESTS] = {}
+        return results_for_builder
+
+    def _remove_items_over_max_number_of_builds(self, encoded_list):
+        """Removes items from the run-length encoded list after the final
+        item that exceeds the max number of builds to track.
+
+        Args:
+          encoded_results: run-length encoded results. An array of arrays, e.g.
+              [[3,'A'],[1,'Q']] encodes AAAQ.
+        """
+        num_builds = 0
+        index = 0
+        for result in encoded_list:
+            num_builds = num_builds + result[0]
+            index = index + 1
+            if num_builds > self.MAX_NUMBER_OF_BUILD_RESULTS_TO_LOG:
+                return encoded_list[:index]
+        return encoded_list
+
+    def _normalize_results_json(self, test, test_name, tests):
+        """ Prune tests where all runs pass or tests that no longer exist and
+        truncate all results to maxNumberOfBuilds.
+
+        Args:
+          test: ResultsAndTimes object for this test.
+          test_name: Name of the test.
+          tests: The JSON object with all the test results for this builder.
+        """
+        test[self.RESULTS] = self._remove_items_over_max_number_of_builds(
+            test[self.RESULTS])
+        test[self.TIMES] = self._remove_items_over_max_number_of_builds(
+            test[self.TIMES])
+
+        is_all_pass = self._is_results_all_of_type(test[self.RESULTS],
+                                                   self.PASS_RESULT)
+        is_all_no_data = self._is_results_all_of_type(test[self.RESULTS],
+            self.NO_DATA_RESULT)
+        max_time = max([time[1] for time in test[self.TIMES]])
+
+        # Remove all passes/no-data from the results to reduce noise and
+        # filesize. If a test passes every run, but takes > MIN_TIME to run,
+        # don't throw away the data.
+        if is_all_no_data or (is_all_pass and max_time <= self.MIN_TIME):
+            del tests[test_name]
+
+    def _is_results_all_of_type(self, results, type):
+        """Returns whether all the results are of the given type
+        (e.g. all passes)."""
+        return len(results) == 1 and results[0][1] == type
+
+
+# Left here not to break anything.
+class JSONResultsGenerator(JSONResultsGeneratorBase):
+    pass
diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator_unittest.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator_unittest.py
new file mode 100644
index 0000000..f04300f
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator_unittest.py
@@ -0,0 +1,235 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+import json
+import optparse
+import random
+
+from webkitpy.common.host_mock import MockHost
+from webkitpy.layout_tests.layout_package import json_results_generator
+from webkitpy.layout_tests.models import test_expectations
+from webkitpy.layout_tests.port import test
+from webkitpy.thirdparty.mock import Mock
+
+
+class JSONGeneratorTest(unittest.TestCase):
+    def setUp(self):
+        self.builder_name = 'DUMMY_BUILDER_NAME'
+        self.build_name = 'DUMMY_BUILD_NAME'
+        self.build_number = 'DUMMY_BUILDER_NUMBER'
+
+        # For archived results.
+        self._json = None
+        self._num_runs = 0
+        self._tests_set = set([])
+        self._test_timings = {}
+        self._failed_count_map = {}
+
+        self._PASS_count = 0
+        self._DISABLED_count = 0
+        self._FLAKY_count = 0
+        self._FAILS_count = 0
+        self._fixable_count = 0
+
+    def test_strip_json_wrapper(self):
+        json = "['contents']"
+        self.assertEqual(json_results_generator.strip_json_wrapper(json_results_generator._JSON_PREFIX + json + json_results_generator._JSON_SUFFIX), json)
+        self.assertEqual(json_results_generator.strip_json_wrapper(json), json)
+
+    def _test_json_generation(self, passed_tests_list, failed_tests_list):
+        tests_set = set(passed_tests_list) | set(failed_tests_list)
+
+        DISABLED_tests = set([t for t in tests_set
+                             if t.startswith('DISABLED_')])
+        FLAKY_tests = set([t for t in tests_set
+                           if t.startswith('FLAKY_')])
+        FAILS_tests = set([t for t in tests_set
+                           if t.startswith('FAILS_')])
+        PASS_tests = tests_set - (DISABLED_tests | FLAKY_tests | FAILS_tests)
+
+        failed_tests = set(failed_tests_list) - DISABLED_tests
+        failed_count_map = dict([(t, 1) for t in failed_tests])
+
+        test_timings = {}
+        i = 0
+        for test in tests_set:
+            test_timings[test] = float(self._num_runs * 100 + i)
+            i += 1
+
+        test_results_map = dict()
+        for test in tests_set:
+            test_results_map[test] = json_results_generator.TestResult(test,
+                failed=(test in failed_tests),
+                elapsed_time=test_timings[test])
+
+        host = MockHost()
+        port = Mock()
+        port._filesystem = host.filesystem
+        generator = json_results_generator.JSONResultsGeneratorBase(port,
+            self.builder_name, self.build_name, self.build_number,
+            '',
+            None,   # don't fetch past json results archive
+            test_results_map)
+
+        failed_count_map = dict([(t, 1) for t in failed_tests])
+
+        # Test incremental json results
+        incremental_json = generator.get_json()
+        self._verify_json_results(
+            tests_set,
+            test_timings,
+            failed_count_map,
+            len(PASS_tests),
+            len(DISABLED_tests),
+            len(FLAKY_tests),
+            len(DISABLED_tests | failed_tests),
+            incremental_json,
+            1)
+
+        # We don't verify the results here, but at least we make sure the code runs without errors.
+        generator.generate_json_output()
+        generator.generate_times_ms_file()
+
+    def _verify_json_results(self, tests_set, test_timings, failed_count_map,
+                             PASS_count, DISABLED_count, FLAKY_count,
+                             fixable_count,
+                             json, num_runs):
+        # Aliasing to a short name for better access to its constants.
+        JRG = json_results_generator.JSONResultsGeneratorBase
+
+        self.assertTrue(JRG.VERSION_KEY in json)
+        self.assertTrue(self.builder_name in json)
+
+        buildinfo = json[self.builder_name]
+        self.assertTrue(JRG.FIXABLE in buildinfo)
+        self.assertTrue(JRG.TESTS in buildinfo)
+        self.assertEqual(len(buildinfo[JRG.BUILD_NUMBERS]), num_runs)
+        self.assertEqual(buildinfo[JRG.BUILD_NUMBERS][0], self.build_number)
+
+        if tests_set or DISABLED_count:
+            fixable = {}
+            for fixable_items in buildinfo[JRG.FIXABLE]:
+                for (type, count) in fixable_items.iteritems():
+                    if type in fixable:
+                        fixable[type] = fixable[type] + count
+                    else:
+                        fixable[type] = count
+
+            if PASS_count:
+                self.assertEqual(fixable[JRG.PASS_RESULT], PASS_count)
+            else:
+                self.assertTrue(JRG.PASS_RESULT not in fixable or
+                                fixable[JRG.PASS_RESULT] == 0)
+            if DISABLED_count:
+                self.assertEqual(fixable[JRG.SKIP_RESULT], DISABLED_count)
+            else:
+                self.assertTrue(JRG.SKIP_RESULT not in fixable or
+                                fixable[JRG.SKIP_RESULT] == 0)
+            if FLAKY_count:
+                self.assertEqual(fixable[JRG.FLAKY_RESULT], FLAKY_count)
+            else:
+                self.assertTrue(JRG.FLAKY_RESULT not in fixable or
+                                fixable[JRG.FLAKY_RESULT] == 0)
+
+        if failed_count_map:
+            tests = buildinfo[JRG.TESTS]
+            for test_name in failed_count_map.iterkeys():
+                test = self._find_test_in_trie(test_name, tests)
+
+                failed = 0
+                for result in test[JRG.RESULTS]:
+                    if result[1] == JRG.FAIL_RESULT:
+                        failed += result[0]
+                self.assertEqual(failed_count_map[test_name], failed)
+
+                timing_count = 0
+                for timings in test[JRG.TIMES]:
+                    if timings[1] == test_timings[test_name]:
+                        timing_count = timings[0]
+                self.assertEqual(1, timing_count)
+
+        if fixable_count:
+            self.assertEqual(sum(buildinfo[JRG.FIXABLE_COUNT]), fixable_count)
+
+    def _find_test_in_trie(self, path, trie):
+        nodes = path.split("/")
+        sub_trie = trie
+        for node in nodes:
+            self.assertTrue(node in sub_trie)
+            sub_trie = sub_trie[node]
+        return sub_trie
+
+    def test_json_generation(self):
+        self._test_json_generation([], [])
+        self._test_json_generation(['A1', 'B1'], [])
+        self._test_json_generation([], ['FAILS_A2', 'FAILS_B2'])
+        self._test_json_generation(['DISABLED_A3', 'DISABLED_B3'], [])
+        self._test_json_generation(['A4'], ['B4', 'FAILS_C4'])
+        self._test_json_generation(['DISABLED_C5', 'DISABLED_D5'], ['A5', 'B5'])
+        self._test_json_generation(
+            ['A6', 'B6', 'FAILS_C6', 'DISABLED_E6', 'DISABLED_F6'],
+            ['FAILS_D6'])
+
+        # Generate JSON with the same test sets. (Both incremental results and
+        # archived results must be updated appropriately.)
+        self._test_json_generation(
+            ['A', 'FLAKY_B', 'DISABLED_C'],
+            ['FAILS_D', 'FLAKY_E'])
+        self._test_json_generation(
+            ['A', 'DISABLED_C', 'FLAKY_E'],
+            ['FLAKY_B', 'FAILS_D'])
+        self._test_json_generation(
+            ['FLAKY_B', 'DISABLED_C', 'FAILS_D'],
+            ['A', 'FLAKY_E'])
+
+    def test_hierarchical_json_generation(self):
+        # FIXME: Re-work tests to be more comprehensible and comprehensive.
+        self._test_json_generation(['foo/A'], ['foo/B', 'bar/C'])
+
+    def test_test_timings_trie(self):
+        test_port = test.TestPort(MockHost())
+        individual_test_timings = []
+        individual_test_timings.append(json_results_generator.TestResult('foo/bar/baz.html', elapsed_time=1.2))
+        individual_test_timings.append(json_results_generator.TestResult('bar.html', elapsed_time=0.0001))
+        trie = json_results_generator.test_timings_trie(test_port, individual_test_timings)
+
+        expected_trie = {
+          'bar.html': 0,
+          'foo': {
+              'bar': {
+                  'baz.html': 1200,
+              }
+          }
+        }
+
+        self.assertEqual(json.dumps(trie), json.dumps(expected_trie))
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/Tools/Scripts/webkitpy/layout_tests/models/__init__.py b/Tools/Scripts/webkitpy/layout_tests/models/__init__.py
new file mode 100644
index 0000000..ef65bee
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/models/__init__.py
@@ -0,0 +1 @@
+# Required for Python to search this directory for module files
diff --git a/Tools/Scripts/webkitpy/layout_tests/models/result_summary.py b/Tools/Scripts/webkitpy/layout_tests/models/result_summary.py
new file mode 100644
index 0000000..5bb5010
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/models/result_summary.py
@@ -0,0 +1,82 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+# Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Szeged
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from webkitpy.layout_tests.models.test_expectations import TestExpectations, SKIP, CRASH, TIMEOUT
+
+
+class ResultSummary(object):
+    def __init__(self, expectations, test_files, iterations, expected_skips):
+        self.total = len(test_files) * iterations
+        self.remaining = self.total
+        self.expectations = expectations
+        self.expected = 0
+        self.unexpected = 0
+        self.unexpected_failures = 0
+        self.unexpected_crashes = 0
+        self.unexpected_timeouts = 0
+        self.total_tests_by_expectation = {}
+        self.tests_by_expectation = {}
+        self.tests_by_timeline = {}
+        self.results = {}
+        self.unexpected_results = {}
+        self.failures = {}
+        self.total_failures = 0
+        self.expected_skips = 0
+        self.total_tests_by_expectation[SKIP] = len(expected_skips)
+        self.tests_by_expectation[SKIP] = expected_skips
+        for expectation in TestExpectations.EXPECTATIONS.values():
+            self.tests_by_expectation[expectation] = set()
+            self.total_tests_by_expectation[expectation] = 0
+        for timeline in TestExpectations.TIMELINES.values():
+            self.tests_by_timeline[timeline] = expectations.get_tests_with_timeline(timeline)
+        self.slow_tests = set()
+
+    def add(self, test_result, expected, test_is_slow):
+        self.total_tests_by_expectation[test_result.type] += 1
+        self.tests_by_expectation[test_result.type].add(test_result.test_name)
+        self.results[test_result.test_name] = test_result
+        self.remaining -= 1
+        if len(test_result.failures):
+            self.total_failures += 1
+            self.failures[test_result.test_name] = test_result.failures
+        if expected:
+            self.expected += 1
+            if test_result.type == SKIP:
+                self.expected_skips += 1
+        else:
+            self.unexpected_results[test_result.test_name] = test_result
+            self.unexpected += 1
+            if len(test_result.failures):
+                self.unexpected_failures += 1
+            if test_result.type == CRASH:
+                self.unexpected_crashes += 1
+            elif test_result.type == TIMEOUT:
+                self.unexpected_timeouts += 1
+        if test_is_slow:
+            self.slow_tests.add(test_result.test_name)
diff --git a/Tools/Scripts/webkitpy/layout_tests/models/test_configuration.py b/Tools/Scripts/webkitpy/layout_tests/models/test_configuration.py
new file mode 100644
index 0000000..95d0f2b
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/models/test_configuration.py
@@ -0,0 +1,306 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the Google name nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+class TestConfiguration(object):
+    def __init__(self, version, architecture, build_type):
+        self.version = version
+        self.architecture = architecture
+        self.build_type = build_type
+
+    @classmethod
+    def category_order(cls):
+        """The most common human-readable order in which the configuration properties are listed."""
+        return ['version', 'architecture', 'build_type']
+
+    def items(self):
+        return self.__dict__.items()
+
+    def keys(self):
+        return self.__dict__.keys()
+
+    def __str__(self):
+        return ("<%(version)s, %(architecture)s, %(build_type)s>" %
+                self.__dict__)
+
+    def __repr__(self):
+        return "TestConfig(version='%(version)s', architecture='%(architecture)s', build_type='%(build_type)s')" % self.__dict__
+
+    def __hash__(self):
+        return hash(self.version + self.architecture + self.build_type)
+
+    def __eq__(self, other):
+        return self.__hash__() == other.__hash__()
+
+    def values(self):
+        """Returns the configuration values of this instance as a tuple."""
+        return self.__dict__.values()
+
+
+class SpecifierSorter(object):
+    def __init__(self, all_test_configurations=None, macros=None):
+        self._specifier_to_category = {}
+
+        if not all_test_configurations:
+            return
+        for test_configuration in all_test_configurations:
+            for category, specifier in test_configuration.items():
+                self.add_specifier(category, specifier)
+
+        self.add_macros(macros)
+
+    def add_specifier(self, category, specifier):
+        self._specifier_to_category[specifier] = category
+
+    def add_macros(self, macros):
+        if not macros:
+            return
+        # Assume well-formed macros.
+        for macro, specifier_list in macros.items():
+            self.add_specifier(self.category_for_specifier(specifier_list[0]), macro)
+
+    @classmethod
+    def category_priority(cls, category):
+        return TestConfiguration.category_order().index(category)
+
+    def specifier_priority(self, specifier):
+        return self.category_priority(self._specifier_to_category[specifier])
+
+    def category_for_specifier(self, specifier):
+        return self._specifier_to_category.get(specifier)
+
+    def sort_specifiers(self, specifiers):
+        category_slots = map(lambda x: [], TestConfiguration.category_order())
+        for specifier in specifiers:
+            category_slots[self.specifier_priority(specifier)].append(specifier)
+
+        def sort_and_return(result, specifier_list):
+            specifier_list.sort()
+            return result + specifier_list
+
+        return reduce(sort_and_return, category_slots, [])
+
+
+class TestConfigurationConverter(object):
+    def __init__(self, all_test_configurations, configuration_macros=None):
+        self._all_test_configurations = all_test_configurations
+        self._configuration_macros = configuration_macros or {}
+        self._specifier_to_configuration_set = {}
+        self._specifier_sorter = SpecifierSorter()
+        self._collapsing_sets_by_size = {}
+        self._junk_specifier_combinations = {}
+        self._collapsing_sets_by_category = {}
+        matching_sets_by_category = {}
+        for configuration in all_test_configurations:
+            for category, specifier in configuration.items():
+                self._specifier_to_configuration_set.setdefault(specifier, set()).add(configuration)
+                self._specifier_sorter.add_specifier(category, specifier)
+                self._collapsing_sets_by_category.setdefault(category, set()).add(specifier)
+                # FIXME: This seems extra-awful.
+                for cat2, spec2 in configuration.items():
+                    if category == cat2:
+                        continue
+                    matching_sets_by_category.setdefault(specifier, {}).setdefault(cat2, set()).add(spec2)
+        for collapsing_set in self._collapsing_sets_by_category.values():
+            self._collapsing_sets_by_size.setdefault(len(collapsing_set), set()).add(frozenset(collapsing_set))
+
+        for specifier, sets_by_category in matching_sets_by_category.items():
+            for category, set_by_category in sets_by_category.items():
+                if len(set_by_category) == 1 and self._specifier_sorter.category_priority(category) > self._specifier_sorter.specifier_priority(specifier):
+                    self._junk_specifier_combinations[specifier] = set_by_category
+
+        self._specifier_sorter.add_macros(configuration_macros)
+
+    def specifier_sorter(self):
+        return self._specifier_sorter
+
+    def _expand_macros(self, specifier):
+        expanded_specifiers = self._configuration_macros.get(specifier)
+        return expanded_specifiers or [specifier]
+
+    def to_config_set(self, specifier_set, error_list=None):
+        """Convert a list of specifiers into a set of TestConfiguration instances."""
+        if len(specifier_set) == 0:
+            return self._all_test_configurations
+
+        matching_sets = {}
+
+        for specifier in specifier_set:
+            for expanded_specifier in self._expand_macros(specifier):
+                configurations = self._specifier_to_configuration_set.get(expanded_specifier)
+                if not configurations:
+                    if error_list is not None:
+                        error_list.append("Unrecognized modifier '" + expanded_specifier + "'")
+                    return set()
+                category = self._specifier_sorter.category_for_specifier(expanded_specifier)
+                matching_sets.setdefault(category, set()).update(configurations)
+
+        return reduce(set.intersection, matching_sets.values())
+
+    @classmethod
+    def collapse_macros(cls, macros_dict, specifiers_list):
+        for macro_specifier, macro in macros_dict.items():
+            if len(macro) == 1:
+                continue
+
+            for combination in cls.combinations(specifiers_list, len(macro)):
+                if cls.symmetric_difference(combination) == set(macro):
+                    for item in combination:
+                        specifiers_list.remove(item)
+                    new_specifier_set = cls.intersect_combination(combination)
+                    new_specifier_set.add(macro_specifier)
+                    specifiers_list.append(frozenset(new_specifier_set))
+
+        def collapse_individual_specifier_set(macro_specifier, macro):
+            specifiers_to_remove = []
+            specifiers_to_add = []
+            for specifier_set in specifiers_list:
+                macro_set = set(macro)
+                if macro_set.intersection(specifier_set) == macro_set:
+                    specifiers_to_remove.append(specifier_set)
+                    specifiers_to_add.append(frozenset((set(specifier_set) - macro_set) | set([macro_specifier])))
+            for specifier in specifiers_to_remove:
+                specifiers_list.remove(specifier)
+            for specifier in specifiers_to_add:
+                specifiers_list.append(specifier)
+
+        for macro_specifier, macro in macros_dict.items():
+            collapse_individual_specifier_set(macro_specifier, macro)
+
+    # FIXME: itertools.combinations in buggy in Python 2.6.1 (the version that ships on SL).
+    # It seems to be okay in 2.6.5 or later; until then, this is the implementation given
+    # in http://docs.python.org/library/itertools.html (from 2.7).
+    @staticmethod
+    def combinations(iterable, r):
+        # combinations('ABCD', 2) --> AB AC AD BC BD CD
+        # combinations(range(4), 3) --> 012 013 023 123
+        pool = tuple(iterable)
+        n = len(pool)
+        if r > n:
+            return
+        indices = range(r)
+        yield tuple(pool[i] for i in indices)
+        while True:
+            for i in reversed(range(r)):
+                if indices[i] != i + n - r:
+                    break
+            else:
+                return
+            indices[i] += 1  # pylint: disable=W0631
+            for j in range(i + 1, r):  # pylint: disable=W0631
+                indices[j] = indices[j - 1] + 1
+            yield tuple(pool[i] for i in indices)
+
+    @classmethod
+    def intersect_combination(cls, combination):
+        return reduce(set.intersection, [set(specifiers) for specifiers in combination])
+
+    @classmethod
+    def symmetric_difference(cls, iterable):
+        union = set()
+        intersection = iterable[0]
+        for item in iterable:
+            union = union | item
+            intersection = intersection.intersection(item)
+        return union - intersection
+
+    def to_specifiers_list(self, test_configuration_set):
+        """Convert a set of TestConfiguration instances into one or more list of specifiers."""
+        # Easy out: if the set is all configurations, the modifier is empty.
+        if len(test_configuration_set) == len(self._all_test_configurations):
+            return [[]]
+
+        # 1) Build a list of specifier sets, discarding specifiers that don't add value.
+        specifiers_list = []
+        for config in test_configuration_set:
+            values = set(config.values())
+            for specifier, junk_specifier_set in self._junk_specifier_combinations.items():
+                if specifier in values:
+                    values -= junk_specifier_set
+            specifiers_list.append(frozenset(values))
+
+        def try_collapsing(size, collapsing_sets):
+            if len(specifiers_list) < size:
+                return False
+            for combination in self.combinations(specifiers_list, size):
+                if self.symmetric_difference(combination) in collapsing_sets:
+                    for item in combination:
+                        specifiers_list.remove(item)
+                    specifiers_list.append(frozenset(self.intersect_combination(combination)))
+                    return True
+            return False
+
+        # 2) Collapse specifier sets with common specifiers:
+        #   (xp, release), (xp, debug) --> (xp, x86)
+        for size, collapsing_sets in self._collapsing_sets_by_size.items():
+            while try_collapsing(size, collapsing_sets):
+                pass
+
+        def try_abbreviating(collapsing_sets):
+            if len(specifiers_list) < 2:
+                return False
+            for combination in self.combinations(specifiers_list, 2):
+                for collapsing_set in collapsing_sets:
+                    diff = self.symmetric_difference(combination)
+                    if diff <= collapsing_set:
+                        common = self.intersect_combination(combination)
+                        for item in combination:
+                            specifiers_list.remove(item)
+                        specifiers_list.append(frozenset(common | diff))
+                        return True
+            return False
+
+        # 3) Abbreviate specifier sets by combining specifiers across categories.
+        #   (xp, release), (win7, release) --> (xp, win7, release)
+        while try_abbreviating(self._collapsing_sets_by_size.values()):
+            pass
+
+
+        # 4) Substitute specifier subsets that match macros witin each set:
+        #   (xp, vista, win7, release) -> (win, release)
+        self.collapse_macros(self._configuration_macros, specifiers_list)
+
+        macro_keys = set(self._configuration_macros.keys())
+
+        # 5) Collapsing macros may have created combinations the can now be abbreviated.
+        #   (xp, release), (linux, x86, release), (linux, x86_64, release) --> (xp, release), (linux, release) --> (xp, linux, release)
+        while try_abbreviating([self._collapsing_sets_by_category['version'] | macro_keys]):
+            pass
+
+        # 6) Remove cases where we have collapsed but have all macros.
+        #   (android, win, mac, linux, release) --> (release)
+        specifiers_to_remove = []
+        for specifier_set in specifiers_list:
+            if macro_keys <= specifier_set:
+                specifiers_to_remove.append(specifier_set)
+
+        for specifier_set in specifiers_to_remove:
+            specifiers_list.remove(specifier_set)
+            specifiers_list.append(frozenset(specifier_set - macro_keys))
+
+        return specifiers_list
diff --git a/Tools/Scripts/webkitpy/layout_tests/models/test_configuration_unittest.py b/Tools/Scripts/webkitpy/layout_tests/models/test_configuration_unittest.py
new file mode 100644
index 0000000..5c43b6a
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/models/test_configuration_unittest.py
@@ -0,0 +1,369 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the Google name nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from webkitpy.layout_tests.models.test_configuration import *
+
+
+def make_mock_all_test_configurations_set():
+    all_test_configurations = set()
+    for version, architecture in (('snowleopard', 'x86'), ('xp', 'x86'), ('win7', 'x86'), ('vista', 'x86'), ('lucid', 'x86'), ('lucid', 'x86_64')):
+        for build_type in ('debug', 'release'):
+            all_test_configurations.add(TestConfiguration(version, architecture, build_type))
+    return all_test_configurations
+
+MOCK_MACROS = {
+    'mac': ['snowleopard'],
+    'win': ['xp', 'vista', 'win7'],
+    'linux': ['lucid'],
+}
+
+
+class TestConfigurationTest(unittest.TestCase):
+    def test_items(self):
+        config = TestConfiguration('xp', 'x86', 'release')
+        result_config_dict = {}
+        for category, specifier in config.items():
+            result_config_dict[category] = specifier
+        self.assertEquals({'version': 'xp', 'architecture': 'x86', 'build_type': 'release'}, result_config_dict)
+
+    def test_keys(self):
+        config = TestConfiguration('xp', 'x86', 'release')
+        result_config_keys = []
+        for category in config.keys():
+            result_config_keys.append(category)
+        self.assertEquals(set(['version', 'architecture', 'build_type']), set(result_config_keys))
+
+    def test_str(self):
+        config = TestConfiguration('xp', 'x86', 'release')
+        self.assertEquals('<xp, x86, release>', str(config))
+
+    def test_repr(self):
+        config = TestConfiguration('xp', 'x86', 'release')
+        self.assertEquals("TestConfig(version='xp', architecture='x86', build_type='release')", repr(config))
+
+    def test_hash(self):
+        config_dict = {}
+        config_dict[TestConfiguration('xp', 'x86', 'release')] = True
+        self.assertTrue(TestConfiguration('xp', 'x86', 'release') in config_dict)
+        self.assertTrue(config_dict[TestConfiguration('xp', 'x86', 'release')])
+
+        def query_unknown_key():
+            return config_dict[TestConfiguration('xp', 'x86', 'debug')]
+
+        self.assertRaises(KeyError, query_unknown_key)
+        self.assertTrue(TestConfiguration('xp', 'x86', 'release') in config_dict)
+        self.assertFalse(TestConfiguration('xp', 'x86', 'debug') in config_dict)
+        configs_list = [TestConfiguration('xp', 'x86', 'release'), TestConfiguration('xp', 'x86', 'debug'), TestConfiguration('xp', 'x86', 'debug')]
+        self.assertEquals(len(configs_list), 3)
+        self.assertEquals(len(set(configs_list)), 2)
+
+    def test_eq(self):
+        self.assertEquals(TestConfiguration('xp', 'x86', 'release'), TestConfiguration('xp', 'x86', 'release'))
+        self.assertNotEquals(TestConfiguration('xp', 'x86', 'release'), TestConfiguration('xp', 'x86', 'debug'))
+
+    def test_values(self):
+        config = TestConfiguration('xp', 'x86', 'release')
+        result_config_values = []
+        for value in config.values():
+            result_config_values.append(value)
+        self.assertEquals(set(['xp', 'x86', 'release']), set(result_config_values))
+
+
+class SpecifierSorterTest(unittest.TestCase):
+    def __init__(self, testFunc):
+        self._all_test_configurations = make_mock_all_test_configurations_set()
+        unittest.TestCase.__init__(self, testFunc)
+
+    def test_init(self):
+        sorter = SpecifierSorter()
+        self.assertEquals(sorter.category_for_specifier('control'), None)
+        sorter = SpecifierSorter(self._all_test_configurations)
+        self.assertEquals(sorter.category_for_specifier('xp'), 'version')
+        sorter = SpecifierSorter(self._all_test_configurations, MOCK_MACROS)
+        self.assertEquals(sorter.category_for_specifier('mac'), 'version')
+
+    def test_add_specifier(self):
+        sorter = SpecifierSorter()
+        self.assertEquals(sorter.category_for_specifier('control'), None)
+        sorter.add_specifier('version', 'control')
+        self.assertEquals(sorter.category_for_specifier('control'), 'version')
+        sorter.add_specifier('version', 'one')
+        self.assertEquals(sorter.category_for_specifier('one'), 'version')
+        sorter.add_specifier('architecture', 'renaissance')
+        self.assertEquals(sorter.category_for_specifier('one'), 'version')
+        self.assertEquals(sorter.category_for_specifier('renaissance'), 'architecture')
+
+    def test_add_macros(self):
+        sorter = SpecifierSorter(self._all_test_configurations)
+        sorter.add_macros(MOCK_MACROS)
+        self.assertEquals(sorter.category_for_specifier('mac'), 'version')
+        self.assertEquals(sorter.category_for_specifier('win'), 'version')
+        self.assertEquals(sorter.category_for_specifier('x86'), 'architecture')
+
+    def test_category_priority(self):
+        sorter = SpecifierSorter(self._all_test_configurations)
+        self.assertEquals(sorter.category_priority('version'), 0)
+        self.assertEquals(sorter.category_priority('build_type'), 2)
+
+    def test_specifier_priority(self):
+        sorter = SpecifierSorter(self._all_test_configurations)
+        self.assertEquals(sorter.specifier_priority('x86'), 1)
+        self.assertEquals(sorter.specifier_priority('snowleopard'), 0)
+
+    def test_sort_specifiers(self):
+        sorter = SpecifierSorter(self._all_test_configurations, MOCK_MACROS)
+        self.assertEquals(sorter.sort_specifiers(set()), [])
+        self.assertEquals(sorter.sort_specifiers(set(['x86'])), ['x86'])
+        self.assertEquals(sorter.sort_specifiers(set(['x86', 'win7'])), ['win7', 'x86'])
+        self.assertEquals(sorter.sort_specifiers(set(['x86', 'debug', 'win7'])), ['win7', 'x86', 'debug'])
+        self.assertEquals(sorter.sort_specifiers(set(['snowleopard', 'x86', 'debug', 'win7'])), ['snowleopard', 'win7', 'x86', 'debug'])
+        self.assertEquals(sorter.sort_specifiers(set(['x86', 'mac', 'debug', 'win7'])), ['mac', 'win7', 'x86', 'debug'])
+
+
+class TestConfigurationConverterTest(unittest.TestCase):
+    def __init__(self, testFunc):
+        self._all_test_configurations = make_mock_all_test_configurations_set()
+        unittest.TestCase.__init__(self, testFunc)
+
+    def test_symmetric_difference(self):
+        self.assertEquals(TestConfigurationConverter.symmetric_difference([set(['a', 'b']), set(['b', 'c'])]), set(['a', 'c']))
+        self.assertEquals(TestConfigurationConverter.symmetric_difference([set(['a', 'b']), set(['b', 'c']), set(['b', 'd'])]), set(['a', 'c', 'd']))
+
+    def test_to_config_set(self):
+        converter = TestConfigurationConverter(self._all_test_configurations)
+
+        self.assertEquals(converter.to_config_set(set()), self._all_test_configurations)
+
+        self.assertEquals(converter.to_config_set(set(['foo'])), set())
+
+        self.assertEquals(converter.to_config_set(set(['xp', 'foo'])), set())
+
+        errors = []
+        self.assertEquals(converter.to_config_set(set(['xp', 'foo']), errors), set())
+        self.assertEquals(errors, ["Unrecognized modifier 'foo'"])
+
+        self.assertEquals(converter.to_config_set(set(['xp', 'x86_64'])), set())
+
+        configs_to_match = set([
+            TestConfiguration('xp', 'x86', 'release'),
+        ])
+        self.assertEquals(converter.to_config_set(set(['xp', 'release'])), configs_to_match)
+
+        configs_to_match = set([
+            TestConfiguration('snowleopard', 'x86', 'release'),
+            TestConfiguration('vista', 'x86', 'release'),
+            TestConfiguration('win7', 'x86', 'release'),
+            TestConfiguration('xp', 'x86', 'release'),
+            TestConfiguration('lucid', 'x86', 'release'),
+            TestConfiguration('lucid', 'x86_64', 'release'),
+       ])
+        self.assertEquals(converter.to_config_set(set(['release'])), configs_to_match)
+
+        configs_to_match = set([
+             TestConfiguration('lucid', 'x86_64', 'release'),
+             TestConfiguration('lucid', 'x86_64', 'debug'),
+        ])
+        self.assertEquals(converter.to_config_set(set(['x86_64'])), configs_to_match)
+
+        configs_to_match = set([
+            TestConfiguration('lucid', 'x86_64', 'release'),
+            TestConfiguration('lucid', 'x86_64', 'debug'),
+            TestConfiguration('lucid', 'x86', 'release'),
+            TestConfiguration('lucid', 'x86', 'debug'),
+            TestConfiguration('snowleopard', 'x86', 'release'),
+            TestConfiguration('snowleopard', 'x86', 'debug'),
+        ])
+        self.assertEquals(converter.to_config_set(set(['lucid', 'snowleopard'])), configs_to_match)
+
+        configs_to_match = set([
+            TestConfiguration('lucid', 'x86', 'release'),
+            TestConfiguration('lucid', 'x86', 'debug'),
+            TestConfiguration('snowleopard', 'x86', 'release'),
+            TestConfiguration('snowleopard', 'x86', 'debug'),
+        ])
+        self.assertEquals(converter.to_config_set(set(['lucid', 'snowleopard', 'x86'])), configs_to_match)
+
+        configs_to_match = set([
+            TestConfiguration('lucid', 'x86_64', 'release'),
+            TestConfiguration('lucid', 'x86', 'release'),
+            TestConfiguration('snowleopard', 'x86', 'release'),
+        ])
+        self.assertEquals(converter.to_config_set(set(['lucid', 'snowleopard', 'release'])), configs_to_match)
+
+    def test_macro_expansion(self):
+        converter = TestConfigurationConverter(self._all_test_configurations, MOCK_MACROS)
+
+        configs_to_match = set([
+            TestConfiguration('xp', 'x86', 'release'),
+            TestConfiguration('vista', 'x86', 'release'),
+            TestConfiguration('win7', 'x86', 'release'),
+        ])
+        self.assertEquals(converter.to_config_set(set(['win', 'release'])), configs_to_match)
+
+        configs_to_match = set([
+            TestConfiguration('xp', 'x86', 'release'),
+            TestConfiguration('vista', 'x86', 'release'),
+            TestConfiguration('win7', 'x86', 'release'),
+            TestConfiguration('lucid', 'x86', 'release'),
+            TestConfiguration('lucid', 'x86_64', 'release'),
+        ])
+        self.assertEquals(converter.to_config_set(set(['win', 'lucid', 'release'])), configs_to_match)
+
+        configs_to_match = set([
+            TestConfiguration('xp', 'x86', 'release'),
+            TestConfiguration('vista', 'x86', 'release'),
+            TestConfiguration('win7', 'x86', 'release'),
+            TestConfiguration('snowleopard', 'x86', 'release'),
+        ])
+        self.assertEquals(converter.to_config_set(set(['win', 'mac', 'release'])), configs_to_match)
+
+    def test_to_specifier_lists(self):
+        converter = TestConfigurationConverter(self._all_test_configurations, MOCK_MACROS)
+
+        self.assertEquals(converter.to_specifiers_list(set(self._all_test_configurations)), [[]])
+        self.assertEquals(converter.to_specifiers_list(set()), [])
+
+        configs_to_match = set([
+            TestConfiguration('xp', 'x86', 'release'),
+        ])
+        self.assertEquals(converter.to_specifiers_list(configs_to_match), [set(['release', 'xp'])])
+
+        configs_to_match = set([
+            TestConfiguration('xp', 'x86', 'release'),
+            TestConfiguration('xp', 'x86', 'debug'),
+        ])
+        self.assertEquals(converter.to_specifiers_list(configs_to_match), [set(['xp'])])
+
+        configs_to_match = set([
+            TestConfiguration('lucid', 'x86_64', 'debug'),
+            TestConfiguration('xp', 'x86', 'release'),
+        ])
+        self.assertEquals(converter.to_specifiers_list(configs_to_match), [set(['release', 'xp']), set(['debug', 'x86_64', 'linux'])])
+
+        configs_to_match = set([
+            TestConfiguration('xp', 'x86', 'release'),
+            TestConfiguration('xp', 'x86', 'release'),
+            TestConfiguration('lucid', 'x86_64', 'debug'),
+            TestConfiguration('lucid', 'x86', 'debug'),
+            TestConfiguration('lucid', 'x86_64', 'debug'),
+            TestConfiguration('lucid', 'x86', 'debug'),
+        ])
+        self.assertEquals(converter.to_specifiers_list(configs_to_match), [set(['release', 'xp']), set(['debug', 'linux'])])
+
+        configs_to_match = set([
+            TestConfiguration('xp', 'x86', 'release'),
+            TestConfiguration('snowleopard', 'x86', 'release'),
+            TestConfiguration('vista', 'x86', 'release'),
+            TestConfiguration('win7', 'x86', 'release'),
+            TestConfiguration('lucid', 'x86', 'release'),
+            TestConfiguration('lucid', 'x86_64', 'release'),
+        ])
+        self.assertEquals(converter.to_specifiers_list(configs_to_match), [set(['release'])])
+
+        configs_to_match = set([
+            TestConfiguration('xp', 'x86', 'release'),
+            TestConfiguration('snowleopard', 'x86', 'release'),
+        ])
+        self.assertEquals(converter.to_specifiers_list(configs_to_match), [set(['xp', 'mac', 'release'])])
+
+        configs_to_match = set([
+            TestConfiguration('xp', 'x86', 'release'),
+            TestConfiguration('snowleopard', 'x86', 'release'),
+            TestConfiguration('win7', 'x86', 'release'),
+            TestConfiguration('win7', 'x86', 'debug'),
+            TestConfiguration('lucid', 'x86', 'release'),
+        ])
+        self.assertEquals(converter.to_specifiers_list(configs_to_match), [set(['win7']), set(['release', 'linux', 'x86']), set(['release', 'xp', 'mac'])])
+
+    def test_macro_collapsing(self):
+        macros = {'foo': ['bar', 'baz'], 'people': ['bob', 'alice', 'john']}
+
+        specifiers_list = [set(['john', 'godzilla', 'bob', 'alice'])]
+        TestConfigurationConverter.collapse_macros(macros, specifiers_list)
+        self.assertEquals(specifiers_list, [set(['people', 'godzilla'])])
+
+        specifiers_list = [set(['john', 'godzilla', 'alice'])]
+        TestConfigurationConverter.collapse_macros(macros, specifiers_list)
+        self.assertEquals(specifiers_list, [set(['john', 'godzilla', 'alice', 'godzilla'])])
+
+        specifiers_list = [set(['bar', 'godzilla', 'baz', 'bob', 'alice', 'john'])]
+        TestConfigurationConverter.collapse_macros(macros, specifiers_list)
+        self.assertEquals(specifiers_list, [set(['foo', 'godzilla', 'people'])])
+
+        specifiers_list = [set(['bar', 'godzilla', 'baz', 'bob']), set(['bar', 'baz']), set(['people', 'alice', 'bob', 'john'])]
+        TestConfigurationConverter.collapse_macros(macros, specifiers_list)
+        self.assertEquals(specifiers_list, [set(['bob', 'foo', 'godzilla']), set(['foo']), set(['people'])])
+
+    def test_converter_macro_collapsing(self):
+        converter = TestConfigurationConverter(self._all_test_configurations, MOCK_MACROS)
+
+        configs_to_match = set([
+            TestConfiguration('xp', 'x86', 'release'),
+            TestConfiguration('vista', 'x86', 'release'),
+            TestConfiguration('win7', 'x86', 'release'),
+        ])
+        self.assertEquals(converter.to_specifiers_list(configs_to_match), [set(['win', 'release'])])
+
+        configs_to_match = set([
+            TestConfiguration('xp', 'x86', 'release'),
+            TestConfiguration('vista', 'x86', 'release'),
+            TestConfiguration('win7', 'x86', 'release'),
+            TestConfiguration('lucid', 'x86', 'release'),
+            TestConfiguration('lucid', 'x86_64', 'release'),
+        ])
+        self.assertEquals(converter.to_specifiers_list(configs_to_match), [set(['win', 'linux', 'release'])])
+
+        configs_to_match = set([
+            TestConfiguration('xp', 'x86', 'release'),
+            TestConfiguration('vista', 'x86', 'release'),
+            TestConfiguration('win7', 'x86', 'release'),
+            TestConfiguration('snowleopard', 'x86', 'release'),
+        ])
+        self.assertEquals(converter.to_specifiers_list(configs_to_match), [set(['win', 'mac', 'release'])])
+
+        configs_to_match = set([
+            TestConfiguration('xp', 'x86', 'release'),
+            TestConfiguration('vista', 'x86', 'release'),
+            TestConfiguration('win7', 'x86', 'release'),
+            TestConfiguration('snowleopard', 'x86', 'release'),
+        ])
+        self.assertEquals(converter.to_specifiers_list(configs_to_match), [set(['win', 'mac', 'release'])])
+
+        configs_to_match = set([
+            TestConfiguration('xp', 'x86', 'release'),
+            TestConfiguration('vista', 'x86', 'release'),
+            TestConfiguration('win7', 'x86', 'release'),
+        ])
+        self.assertEquals(converter.to_specifiers_list(configs_to_match), [set(['win', 'release'])])
+
+    def test_specifier_converter_access(self):
+        specifier_sorter = TestConfigurationConverter(self._all_test_configurations, MOCK_MACROS).specifier_sorter()
+        self.assertEquals(specifier_sorter.category_for_specifier('snowleopard'), 'version')
+        self.assertEquals(specifier_sorter.category_for_specifier('mac'), 'version')
diff --git a/Tools/Scripts/webkitpy/layout_tests/models/test_expectations.py b/Tools/Scripts/webkitpy/layout_tests/models/test_expectations.py
new file mode 100644
index 0000000..2342596
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/models/test_expectations.py
@@ -0,0 +1,1013 @@
+#!/usr/bin/env python
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""A helper class for reading in and dealing with tests expectations
+for layout tests.
+"""
+
+import logging
+import re
+
+from webkitpy.layout_tests.models.test_configuration import TestConfigurationConverter
+
+_log = logging.getLogger(__name__)
+
+
+# Test expectation and modifier constants.
+#
+# FIXME: range() starts with 0 which makes if expectation checks harder
+# as PASS is 0.
+(PASS, FAIL, TEXT, IMAGE, IMAGE_PLUS_TEXT, AUDIO, TIMEOUT, CRASH, SKIP, WONTFIX,
+ SLOW, REBASELINE, MISSING, FLAKY, NOW, NONE) = range(16)
+
+# FIXME: Perhas these two routines should be part of the Port instead?
+BASELINE_SUFFIX_LIST = ('png', 'wav', 'txt')
+
+
+class ParseError(Exception):
+    def __init__(self, warnings):
+        super(ParseError, self).__init__()
+        self.warnings = warnings
+
+    def __str__(self):
+        return '\n'.join(map(str, self.warnings))
+
+    def __repr__(self):
+        return 'ParseError(warnings=%s)' % self.warnings
+
+
+class TestExpectationParser(object):
+    """Provides parsing facilities for lines in the test_expectation.txt file."""
+
+    DUMMY_BUG_MODIFIER = "bug_dummy"
+    BUG_MODIFIER_PREFIX = 'bug'
+    BUG_MODIFIER_REGEX = 'bug\d+'
+    REBASELINE_MODIFIER = 'rebaseline'
+    PASS_EXPECTATION = 'pass'
+    SKIP_MODIFIER = 'skip'
+    SLOW_MODIFIER = 'slow'
+    WONTFIX_MODIFIER = 'wontfix'
+
+    TIMEOUT_EXPECTATION = 'timeout'
+
+    MISSING_BUG_WARNING = 'Test lacks BUG modifier.'
+
+    def __init__(self, port, full_test_list, allow_rebaseline_modifier):
+        self._port = port
+        self._test_configuration_converter = TestConfigurationConverter(set(port.all_test_configurations()), port.configuration_specifier_macros())
+        self._full_test_list = full_test_list
+        self._allow_rebaseline_modifier = allow_rebaseline_modifier
+
+    def parse(self, filename, expectations_string):
+        expectation_lines = []
+        line_number = 0
+        for line in expectations_string.split("\n"):
+            line_number += 1
+            test_expectation = self._tokenize_line(filename, line, line_number)
+            self._parse_line(test_expectation)
+            expectation_lines.append(test_expectation)
+        return expectation_lines
+
+    def expectation_for_skipped_test(self, test_name):
+        if not self._port.test_exists(test_name):
+            _log.warning('The following test %s from the Skipped list doesn\'t exist' % test_name)
+        expectation_line = TestExpectationLine()
+        expectation_line.original_string = test_name
+        expectation_line.modifiers = [TestExpectationParser.DUMMY_BUG_MODIFIER, TestExpectationParser.SKIP_MODIFIER]
+        # FIXME: It's not clear what the expectations for a skipped test should be; the expectations
+        # might be different for different entries in a Skipped file, or from the command line, or from
+        # only running parts of the tests. It's also not clear if it matters much.
+        expectation_line.modifiers.append(TestExpectationParser.WONTFIX_MODIFIER)
+        expectation_line.name = test_name
+        # FIXME: we should pass in a more descriptive string here.
+        expectation_line.filename = '<Skipped file>'
+        expectation_line.line_number = 0
+        expectation_line.expectations = [TestExpectationParser.PASS_EXPECTATION]
+        self._parse_line(expectation_line)
+        return expectation_line
+
+    def _parse_line(self, expectation_line):
+        if not expectation_line.name:
+            return
+
+        if not self._check_test_exists(expectation_line):
+            return
+
+        expectation_line.is_file = self._port.test_isfile(expectation_line.name)
+        if expectation_line.is_file:
+            expectation_line.path = expectation_line.name
+        else:
+            expectation_line.path = self._port.normalize_test_name(expectation_line.name)
+
+        self._collect_matching_tests(expectation_line)
+
+        self._parse_modifiers(expectation_line)
+        self._parse_expectations(expectation_line)
+
+    def _parse_modifiers(self, expectation_line):
+        has_wontfix = False
+        has_bugid = False
+        parsed_specifiers = set()
+
+        modifiers = [modifier.lower() for modifier in expectation_line.modifiers]
+        expectations = [expectation.lower() for expectation in expectation_line.expectations]
+
+        if self.SLOW_MODIFIER in modifiers and self.TIMEOUT_EXPECTATION in expectations:
+            expectation_line.warnings.append('A test can not be both SLOW and TIMEOUT. If it times out indefinitely, then it should be just TIMEOUT.')
+
+        for modifier in modifiers:
+            if modifier in TestExpectations.MODIFIERS:
+                expectation_line.parsed_modifiers.append(modifier)
+                if modifier == self.WONTFIX_MODIFIER:
+                    has_wontfix = True
+            elif modifier.startswith(self.BUG_MODIFIER_PREFIX):
+                has_bugid = True
+                if re.match(self.BUG_MODIFIER_REGEX, modifier):
+                    expectation_line.warnings.append('BUG\d+ is not allowed, must be one of BUGCR\d+, BUGWK\d+, BUGV8_\d+, or a non-numeric bug identifier.')
+                else:
+                    expectation_line.parsed_bug_modifiers.append(modifier)
+            else:
+                parsed_specifiers.add(modifier)
+
+        if not expectation_line.parsed_bug_modifiers and not has_wontfix and not has_bugid and self._port.warn_if_bug_missing_in_test_expectations():
+            expectation_line.warnings.append(self.MISSING_BUG_WARNING)
+
+        if self._allow_rebaseline_modifier and self.REBASELINE_MODIFIER in modifiers:
+            expectation_line.warnings.append('REBASELINE should only be used for running rebaseline.py. Cannot be checked in.')
+
+        expectation_line.matching_configurations = self._test_configuration_converter.to_config_set(parsed_specifiers, expectation_line.warnings)
+
+    def _parse_expectations(self, expectation_line):
+        result = set()
+        for part in expectation_line.expectations:
+            expectation = TestExpectations.expectation_from_string(part)
+            if expectation is None:  # Careful, PASS is currently 0.
+                expectation_line.warnings.append('Unsupported expectation: %s' % part)
+                continue
+            result.add(expectation)
+        expectation_line.parsed_expectations = result
+
+    def _check_test_exists(self, expectation_line):
+        # WebKit's way of skipping tests is to add a -disabled suffix.
+        # So we should consider the path existing if the path or the
+        # -disabled version exists.
+        if not self._port.test_exists(expectation_line.name) and not self._port.test_exists(expectation_line.name + '-disabled'):
+            # Log a warning here since you hit this case any
+            # time you update TestExpectations without syncing
+            # the LayoutTests directory
+            expectation_line.warnings.append('Path does not exist.')
+            return False
+        return True
+
+    def _collect_matching_tests(self, expectation_line):
+        """Convert the test specification to an absolute, normalized
+        path and make sure directories end with the OS path separator."""
+        # FIXME: full_test_list can quickly contain a big amount of
+        # elements. We should consider at some point to use a more
+        # efficient structure instead of a list. Maybe a dictionary of
+        # lists to represent the tree of tests, leaves being test
+        # files and nodes being categories.
+
+        if not self._full_test_list:
+            expectation_line.matching_tests = [expectation_line.path]
+            return
+
+        if not expectation_line.is_file:
+            # this is a test category, return all the tests of the category.
+            expectation_line.matching_tests = [test for test in self._full_test_list if test.startswith(expectation_line.path)]
+            return
+
+        # this is a test file, do a quick check if it's in the
+        # full test suite.
+        if expectation_line.path in self._full_test_list:
+            expectation_line.matching_tests.append(expectation_line.path)
+
+    # FIXME: Update the original modifiers and remove this once the old syntax is gone.
+    _configuration_tokens_list = [
+        'Mac', 'SnowLeopard', 'Lion', 'MountainLion',
+        'Win', 'XP', 'Vista', 'Win7',
+        'Linux',
+        'Android',
+        'Release',
+        'Debug',
+    ]
+
+    _configuration_tokens = dict((token, token.upper()) for token in _configuration_tokens_list)
+    _inverted_configuration_tokens = dict((value, name) for name, value in _configuration_tokens.iteritems())
+
+    # FIXME: Update the original modifiers list and remove this once the old syntax is gone.
+    _expectation_tokens = {
+        'Crash': 'CRASH',
+        'Failure': 'FAIL',
+        'ImageOnlyFailure': 'IMAGE',
+        'Missing': 'MISSING',
+        'Pass': 'PASS',
+        'Rebaseline': 'REBASELINE',
+        'Skip': 'SKIP',
+        'Slow': 'SLOW',
+        'Timeout': 'TIMEOUT',
+        'WontFix': 'WONTFIX',
+    }
+
+    _inverted_expectation_tokens = dict([(value, name) for name, value in _expectation_tokens.iteritems()] +
+                                        [('TEXT', 'Failure'), ('IMAGE+TEXT', 'Failure'), ('AUDIO', 'Failure')])
+
+    # FIXME: Seems like these should be classmethods on TestExpectationLine instead of TestExpectationParser.
+    @classmethod
+    def _tokenize_line(cls, filename, expectation_string, line_number):
+        """Tokenizes a line from TestExpectations and returns an unparsed TestExpectationLine instance using the old format.
+
+        The new format for a test expectation line is:
+
+        [[bugs] [ "[" <configuration modifiers> "]" <name> [ "[" <expectations> "]" ["#" <comment>]
+
+        Any errant whitespace is not preserved.
+
+        """
+        expectation_line = TestExpectationLine()
+        expectation_line.original_string = expectation_string
+        expectation_line.filename = filename
+        expectation_line.line_number = line_number
+
+        comment_index = expectation_string.find("#")
+        if comment_index == -1:
+            comment_index = len(expectation_string)
+        else:
+            expectation_line.comment = expectation_string[comment_index + 1:]
+
+        remaining_string = re.sub(r"\s+", " ", expectation_string[:comment_index].strip())
+        if len(remaining_string) == 0:
+            return expectation_line
+
+        # special-case parsing this so that we fail immediately instead of treating this as a test name
+        if remaining_string.startswith('//'):
+            expectation_line.warnings = ['use "#" instead of "//" for comments']
+            return expectation_line
+
+        bugs = []
+        modifiers = []
+        name = None
+        expectations = []
+        warnings = []
+
+        WEBKIT_BUG_PREFIX = 'webkit.org/b/'
+        CHROMIUM_BUG_PREFIX = 'crbug.com/'
+        V8_BUG_PREFIX = 'code.google.com/p/v8/issues/detail?id='
+
+        tokens = remaining_string.split()
+        state = 'start'
+        for token in tokens:
+            if (token.startswith(WEBKIT_BUG_PREFIX) or
+                token.startswith(CHROMIUM_BUG_PREFIX) or
+                token.startswith(V8_BUG_PREFIX) or
+                token.startswith('Bug(')):
+                if state != 'start':
+                    warnings.append('"%s" is not at the start of the line.' % token)
+                    break
+                if token.startswith(WEBKIT_BUG_PREFIX):
+                    bugs.append(token.replace(WEBKIT_BUG_PREFIX, 'BUGWK'))
+                elif token.startswith(CHROMIUM_BUG_PREFIX):
+                    bugs.append(token.replace(CHROMIUM_BUG_PREFIX, 'BUGCR'))
+                elif token.startswith(V8_BUG_PREFIX):
+                    bugs.append(token.replace(V8_BUG_PREFIX, 'BUGV8_'))
+                else:
+                    match = re.match('Bug\((\w+)\)$', token)
+                    if not match:
+                        warnings.append('unrecognized bug identifier "%s"' % token)
+                        break
+                    else:
+                        bugs.append('BUG' + match.group(1).upper())
+            elif token.startswith('BUG'):
+                warnings.append('unrecognized old-style bug identifier "%s"' % token)
+                break
+            elif token == '[':
+                if state == 'start':
+                    state = 'configuration'
+                elif state == 'name_found':
+                    state = 'expectations'
+                else:
+                    warnings.append('unexpected "["')
+                    break
+            elif token == ']':
+                if state == 'configuration':
+                    state = 'name'
+                elif state == 'expectations':
+                    state = 'done'
+                else:
+                    warnings.append('unexpected "]"')
+                    break
+            elif token in ('//', ':', '='):
+                warnings.append('"%s" is not legal in the new TestExpectations syntax.' % token)
+                break
+            elif state == 'configuration':
+                modifiers.append(cls._configuration_tokens.get(token, token))
+            elif state == 'expectations':
+                if token in ('Rebaseline', 'Skip', 'Slow', 'WontFix'):
+                    modifiers.append(token.upper())
+                else:
+                    expectations.append(cls._expectation_tokens.get(token, token))
+            elif state == 'name_found':
+                warnings.append('expecting "[", "#", or end of line instead of "%s"' % token)
+                break
+            else:
+                name = token
+                state = 'name_found'
+
+        if not warnings:
+            if not name:
+                warnings.append('Did not find a test name.')
+            elif state not in ('name_found', 'done'):
+                warnings.append('Missing a "]"')
+
+        if 'WONTFIX' in modifiers and 'SKIP' not in modifiers:
+            modifiers.append('SKIP')
+
+        if 'SKIP' in modifiers and expectations:
+            # FIXME: This is really a semantic warning and shouldn't be here. Remove when we drop the old syntax.
+            warnings.append('A test marked Skip or WontFix must not have other expectations.')
+        elif not expectations:
+            if 'SKIP' not in modifiers and 'REBASELINE' not in modifiers and 'SLOW' not in modifiers:
+                modifiers.append('SKIP')
+            expectations = ['PASS']
+
+        # FIXME: expectation line should just store bugs and modifiers separately.
+        expectation_line.modifiers = bugs + modifiers
+        expectation_line.expectations = expectations
+        expectation_line.name = name
+        expectation_line.warnings = warnings
+        return expectation_line
+
+    @classmethod
+    def _split_space_separated(cls, space_separated_string):
+        """Splits a space-separated string into an array."""
+        return [part.strip() for part in space_separated_string.strip().split(' ')]
+
+
+class TestExpectationLine(object):
+    """Represents a line in test expectations file."""
+
+    def __init__(self):
+        """Initializes a blank-line equivalent of an expectation."""
+        self.original_string = None
+        self.filename = None  # this is the path to the expectations file for this line
+        self.line_number = None
+        self.name = None  # this is the path in the line itself
+        self.path = None  # this is the normpath of self.name
+        self.modifiers = []
+        self.parsed_modifiers = []
+        self.parsed_bug_modifiers = []
+        self.matching_configurations = set()
+        self.expectations = []
+        self.parsed_expectations = set()
+        self.comment = None
+        self.matching_tests = []
+        self.warnings = []
+
+    def is_invalid(self):
+        return self.warnings and self.warnings != [TestExpectationParser.MISSING_BUG_WARNING]
+
+    def is_flaky(self):
+        return len(self.parsed_expectations) > 1
+
+    @staticmethod
+    def create_passing_expectation(test):
+        expectation_line = TestExpectationLine()
+        expectation_line.name = test
+        expectation_line.path = test
+        expectation_line.parsed_expectations = set([PASS])
+        expectation_line.expectations = set(['PASS'])
+        expectation_line.matching_tests = [test]
+        return expectation_line
+
+    def to_string(self, test_configuration_converter, include_modifiers=True, include_expectations=True, include_comment=True):
+        parsed_expectation_to_string = dict([[parsed_expectation, expectation_string] for expectation_string, parsed_expectation in TestExpectations.EXPECTATIONS.items()])
+
+        if self.is_invalid():
+            return self.original_string or ''
+
+        if self.name is None:
+            return '' if self.comment is None else "#%s" % self.comment
+
+        if test_configuration_converter and self.parsed_bug_modifiers:
+            specifiers_list = test_configuration_converter.to_specifiers_list(self.matching_configurations)
+            result = []
+            for specifiers in specifiers_list:
+                # FIXME: this is silly that we join the modifiers and then immediately split them.
+                modifiers = self._serialize_parsed_modifiers(test_configuration_converter, specifiers).split()
+                expectations = self._serialize_parsed_expectations(parsed_expectation_to_string).split()
+                result.append(self._format_line(modifiers, self.name, expectations, self.comment))
+            return "\n".join(result) if result else None
+
+        return self._format_line(self.modifiers, self.name, self.expectations, self.comment,
+            include_modifiers, include_expectations, include_comment)
+
+    def to_csv(self):
+        # Note that this doesn't include the comments.
+        return '%s,%s,%s' % (self.name, ' '.join(self.modifiers), ' '.join(self.expectations))
+
+    def _serialize_parsed_expectations(self, parsed_expectation_to_string):
+        result = []
+        for index in TestExpectations.EXPECTATION_ORDER:
+            if index in self.parsed_expectations:
+                result.append(parsed_expectation_to_string[index])
+        return ' '.join(result)
+
+    def _serialize_parsed_modifiers(self, test_configuration_converter, specifiers):
+        result = []
+        if self.parsed_bug_modifiers:
+            result.extend(sorted(self.parsed_bug_modifiers))
+        result.extend(sorted(self.parsed_modifiers))
+        result.extend(test_configuration_converter.specifier_sorter().sort_specifiers(specifiers))
+        return ' '.join(result)
+
+    @staticmethod
+    def _format_line(modifiers, name, expectations, comment, include_modifiers=True, include_expectations=True, include_comment=True):
+        bugs = []
+        new_modifiers = []
+        new_expectations = []
+        for modifier in modifiers:
+            modifier = modifier.upper()
+            if modifier.startswith('BUGWK'):
+                bugs.append('webkit.org/b/' + modifier.replace('BUGWK', ''))
+            elif modifier.startswith('BUGCR'):
+                bugs.append('crbug.com/' + modifier.replace('BUGCR', ''))
+            elif modifier.startswith('BUG'):
+                # FIXME: we should preserve case once we can drop the old syntax.
+                bugs.append('Bug(' + modifier[3:].lower() + ')')
+            elif modifier in ('SLOW', 'SKIP', 'REBASELINE', 'WONTFIX'):
+                new_expectations.append(TestExpectationParser._inverted_expectation_tokens.get(modifier))
+            else:
+                new_modifiers.append(TestExpectationParser._inverted_configuration_tokens.get(modifier, modifier))
+
+        for expectation in expectations:
+            expectation = expectation.upper()
+            new_expectations.append(TestExpectationParser._inverted_expectation_tokens.get(expectation, expectation))
+
+        result = ''
+        if include_modifiers and (bugs or new_modifiers):
+            if bugs:
+                result += ' '.join(bugs) + ' '
+            if new_modifiers:
+                result += '[ %s ] ' % ' '.join(new_modifiers)
+        result += name
+        if include_expectations and new_expectations and set(new_expectations) != set(['Skip', 'Pass']):
+            result += ' [ %s ]' % ' '.join(sorted(set(new_expectations)))
+        if include_comment and comment is not None:
+            result += " #%s" % comment
+        return result
+
+
+# FIXME: Refactor API to be a proper CRUD.
+class TestExpectationsModel(object):
+    """Represents relational store of all expectations and provides CRUD semantics to manage it."""
+
+    def __init__(self, shorten_filename=None):
+        # Maps a test to its list of expectations.
+        self._test_to_expectations = {}
+
+        # Maps a test to list of its modifiers (string values)
+        self._test_to_modifiers = {}
+
+        # Maps a test to a TestExpectationLine instance.
+        self._test_to_expectation_line = {}
+
+        self._modifier_to_tests = self._dict_of_sets(TestExpectations.MODIFIERS)
+        self._expectation_to_tests = self._dict_of_sets(TestExpectations.EXPECTATIONS)
+        self._timeline_to_tests = self._dict_of_sets(TestExpectations.TIMELINES)
+        self._result_type_to_tests = self._dict_of_sets(TestExpectations.RESULT_TYPES)
+
+        self._shorten_filename = shorten_filename or (lambda x: x)
+
+    def _dict_of_sets(self, strings_to_constants):
+        """Takes a dict of strings->constants and returns a dict mapping
+        each constant to an empty set."""
+        d = {}
+        for c in strings_to_constants.values():
+            d[c] = set()
+        return d
+
+    def get_test_set(self, modifier, expectation=None, include_skips=True):
+        if expectation is None:
+            tests = self._modifier_to_tests[modifier]
+        else:
+            tests = (self._expectation_to_tests[expectation] &
+                self._modifier_to_tests[modifier])
+
+        if not include_skips:
+            tests = tests - self.get_test_set(SKIP, expectation)
+
+        return tests
+
+    def get_test_set_for_keyword(self, keyword):
+        # FIXME: get_test_set() is an awkward public interface because it requires
+        # callers to know the difference between modifiers and expectations. We
+        # should replace that with this where possible.
+        expectation_enum = TestExpectations.EXPECTATIONS.get(keyword.lower(), None)
+        if expectation_enum is not None:
+            return self._expectation_to_tests[expectation_enum]
+        modifier_enum = TestExpectations.MODIFIERS.get(keyword.lower(), None)
+        if modifier_enum is not None:
+            return self._modifier_to_tests[modifier_enum]
+
+        # We must not have an index on this modifier.
+        matching_tests = set()
+        for test, modifiers in self._test_to_modifiers.iteritems():
+            if keyword.lower() in modifiers:
+                matching_tests.add(test)
+        return matching_tests
+
+    def get_tests_with_result_type(self, result_type):
+        return self._result_type_to_tests[result_type]
+
+    def get_tests_with_timeline(self, timeline):
+        return self._timeline_to_tests[timeline]
+
+    def get_modifiers(self, test):
+        """This returns modifiers for the given test (the modifiers plus the BUGXXXX identifier). This is used by the LTTF dashboard."""
+        return self._test_to_modifiers[test]
+
+    def has_modifier(self, test, modifier):
+        return test in self._modifier_to_tests[modifier]
+
+    def has_keyword(self, test, keyword):
+        return (keyword.upper() in self.get_expectations_string(test) or
+                keyword.lower() in self.get_modifiers(test))
+
+    def has_test(self, test):
+        return test in self._test_to_expectation_line
+
+    def get_expectation_line(self, test):
+        return self._test_to_expectation_line.get(test)
+
+    def get_expectations(self, test):
+        return self._test_to_expectations[test]
+
+    def get_expectations_string(self, test):
+        """Returns the expectatons for the given test as an uppercase string.
+        If there are no expectations for the test, then "PASS" is returned."""
+        expectations = self.get_expectations(test)
+        retval = []
+
+        for expectation in expectations:
+            retval.append(self.expectation_to_string(expectation))
+
+        return " ".join(retval)
+
+    def expectation_to_string(self, expectation):
+        """Return the uppercased string equivalent of a given expectation."""
+        for item in TestExpectations.EXPECTATIONS.items():
+            if item[1] == expectation:
+                return item[0].upper()
+        raise ValueError(expectation)
+
+
+    def add_expectation_line(self, expectation_line, in_skipped=False):
+        """Returns a list of warnings encountered while matching modifiers."""
+
+        if expectation_line.is_invalid():
+            return
+
+        for test in expectation_line.matching_tests:
+            if not in_skipped and self._already_seen_better_match(test, expectation_line):
+                continue
+
+            self._clear_expectations_for_test(test)
+            self._test_to_expectation_line[test] = expectation_line
+            self._add_test(test, expectation_line)
+
+    def _add_test(self, test, expectation_line):
+        """Sets the expected state for a given test.
+
+        This routine assumes the test has not been added before. If it has,
+        use _clear_expectations_for_test() to reset the state prior to
+        calling this."""
+        self._test_to_expectations[test] = expectation_line.parsed_expectations
+        for expectation in expectation_line.parsed_expectations:
+            self._expectation_to_tests[expectation].add(test)
+
+        self._test_to_modifiers[test] = expectation_line.modifiers
+        for modifier in expectation_line.parsed_modifiers:
+            mod_value = TestExpectations.MODIFIERS[modifier]
+            self._modifier_to_tests[mod_value].add(test)
+
+        if TestExpectationParser.WONTFIX_MODIFIER in expectation_line.parsed_modifiers:
+            self._timeline_to_tests[WONTFIX].add(test)
+        else:
+            self._timeline_to_tests[NOW].add(test)
+
+        if TestExpectationParser.SKIP_MODIFIER in expectation_line.parsed_modifiers:
+            self._result_type_to_tests[SKIP].add(test)
+        elif expectation_line.parsed_expectations == set([PASS]):
+            self._result_type_to_tests[PASS].add(test)
+        elif expectation_line.is_flaky():
+            self._result_type_to_tests[FLAKY].add(test)
+        else:
+            # FIXME: What is this?
+            self._result_type_to_tests[FAIL].add(test)
+
+    def _clear_expectations_for_test(self, test):
+        """Remove prexisting expectations for this test.
+        This happens if we are seeing a more precise path
+        than a previous listing.
+        """
+        if self.has_test(test):
+            self._test_to_expectations.pop(test, '')
+            self._remove_from_sets(test, self._expectation_to_tests)
+            self._remove_from_sets(test, self._modifier_to_tests)
+            self._remove_from_sets(test, self._timeline_to_tests)
+            self._remove_from_sets(test, self._result_type_to_tests)
+
+    def _remove_from_sets(self, test, dict_of_sets_of_tests):
+        """Removes the given test from the sets in the dictionary.
+
+        Args:
+          test: test to look for
+          dict: dict of sets of files"""
+        for set_of_tests in dict_of_sets_of_tests.itervalues():
+            if test in set_of_tests:
+                set_of_tests.remove(test)
+
+    def _already_seen_better_match(self, test, expectation_line):
+        """Returns whether we've seen a better match already in the file.
+
+        Returns True if we've already seen a expectation_line.name that matches more of the test
+            than this path does
+        """
+        # FIXME: See comment below about matching test configs and specificity.
+        if not self.has_test(test):
+            # We've never seen this test before.
+            return False
+
+        prev_expectation_line = self._test_to_expectation_line[test]
+
+        if prev_expectation_line.filename != expectation_line.filename:
+            # We've moved on to a new expectation file, which overrides older ones.
+            return False
+
+        if len(prev_expectation_line.path) > len(expectation_line.path):
+            # The previous path matched more of the test.
+            return True
+
+        if len(prev_expectation_line.path) < len(expectation_line.path):
+            # This path matches more of the test.
+            return False
+
+        # At this point we know we have seen a previous exact match on this
+        # base path, so we need to check the two sets of modifiers.
+
+        # FIXME: This code was originally designed to allow lines that matched
+        # more modifiers to override lines that matched fewer modifiers.
+        # However, we currently view these as errors.
+        #
+        # To use the "more modifiers wins" policy, change the errors for overrides
+        # to be warnings and return False".
+
+        if prev_expectation_line.matching_configurations == expectation_line.matching_configurations:
+            expectation_line.warnings.append('Duplicate or ambiguous entry lines %s:%d and %s:%d.' % (
+                self._shorten_filename(prev_expectation_line.filename), prev_expectation_line.line_number,
+                self._shorten_filename(expectation_line.filename), expectation_line.line_number))
+            return True
+
+        if prev_expectation_line.matching_configurations >= expectation_line.matching_configurations:
+            expectation_line.warnings.append('More specific entry for %s on line %s:%d overrides line %s:%d.' % (expectation_line.name,
+                self._shorten_filename(prev_expectation_line.filename), prev_expectation_line.line_number,
+                self._shorten_filename(expectation_line.filename), expectation_line.line_number))
+            # FIXME: return False if we want more specific to win.
+            return True
+
+        if prev_expectation_line.matching_configurations <= expectation_line.matching_configurations:
+            expectation_line.warnings.append('More specific entry for %s on line %s:%d overrides line %s:%d.' % (expectation_line.name,
+                self._shorten_filename(expectation_line.filename), expectation_line.line_number,
+                self._shorten_filename(prev_expectation_line.filename), prev_expectation_line.line_number))
+            return True
+
+        if prev_expectation_line.matching_configurations & expectation_line.matching_configurations:
+            expectation_line.warnings.append('Entries for %s on lines %s:%d and %s:%d match overlapping sets of configurations.' % (expectation_line.name,
+                self._shorten_filename(prev_expectation_line.filename), prev_expectation_line.line_number,
+                self._shorten_filename(expectation_line.filename), expectation_line.line_number))
+            return True
+
+        # Configuration sets are disjoint, then.
+        return False
+
+
+class TestExpectations(object):
+    """Test expectations consist of lines with specifications of what
+    to expect from layout test cases. The test cases can be directories
+    in which case the expectations apply to all test cases in that
+    directory and any subdirectory. The format is along the lines of:
+
+      LayoutTests/fast/js/fixme.js [ Failure ]
+      LayoutTests/fast/js/flaky.js [ Failure Pass ]
+      LayoutTests/fast/js/crash.js [ Crash Failure Pass Timeout ]
+      ...
+
+    To add modifiers:
+      LayoutTests/fast/js/no-good.js
+      [ Debug ] LayoutTests/fast/js/no-good.js [ Pass Timeout ]
+      [ Debug ] LayoutTests/fast/js/no-good.js [ Pass Skip Timeout ]
+      [ Linux Debug ] LayoutTests/fast/js/no-good.js [ Pass Skip Timeout ]
+      [ Linux Win ] LayoutTests/fast/js/no-good.js [ Pass Skip Timeout ]
+
+    Skip: Doesn't run the test.
+    Slow: The test takes a long time to run, but does not timeout indefinitely.
+    WontFix: For tests that we never intend to pass on a given platform (treated like Skip).
+
+    Notes:
+      -A test cannot be both SLOW and TIMEOUT
+      -A test can be included twice, but not via the same path.
+      -If a test is included twice, then the more precise path wins.
+      -CRASH tests cannot be WONTFIX
+    """
+
+    # FIXME: Update to new syntax once the old format is no longer supported.
+    EXPECTATIONS = {'pass': PASS,
+                    'audio': AUDIO,
+                    'fail': FAIL,
+                    'image': IMAGE,
+                    'image+text': IMAGE_PLUS_TEXT,
+                    'text': TEXT,
+                    'timeout': TIMEOUT,
+                    'crash': CRASH,
+                    'missing': MISSING}
+
+    # (aggregated by category, pass/fail/skip, type)
+    EXPECTATION_DESCRIPTIONS = {SKIP: 'skipped',
+                                PASS: 'passes',
+                                FAIL: 'failures',
+                                IMAGE: 'image-only failures',
+                                TEXT: 'text-only failures',
+                                IMAGE_PLUS_TEXT: 'image and text failures',
+                                AUDIO: 'audio failures',
+                                CRASH: 'crashes',
+                                TIMEOUT: 'timeouts',
+                                MISSING: 'missing results'}
+
+    EXPECTATION_ORDER = (PASS, CRASH, TIMEOUT, MISSING, FAIL, IMAGE, SKIP)
+
+    BUILD_TYPES = ('debug', 'release')
+
+    MODIFIERS = {TestExpectationParser.SKIP_MODIFIER: SKIP,
+                 TestExpectationParser.WONTFIX_MODIFIER: WONTFIX,
+                 TestExpectationParser.SLOW_MODIFIER: SLOW,
+                 TestExpectationParser.REBASELINE_MODIFIER: REBASELINE,
+                 'none': NONE}
+
+    TIMELINES = {TestExpectationParser.WONTFIX_MODIFIER: WONTFIX,
+                 'now': NOW}
+
+    RESULT_TYPES = {'skip': SKIP,
+                    'pass': PASS,
+                    'fail': FAIL,
+                    'flaky': FLAKY}
+
+    @classmethod
+    def expectation_from_string(cls, string):
+        assert(' ' not in string)  # This only handles one expectation at a time.
+        return cls.EXPECTATIONS.get(string.lower())
+
+    @staticmethod
+    def result_was_expected(result, expected_results, test_needs_rebaselining, test_is_skipped):
+        """Returns whether we got a result we were expecting.
+        Args:
+            result: actual result of a test execution
+            expected_results: set of results listed in test_expectations
+            test_needs_rebaselining: whether test was marked as REBASELINE
+            test_is_skipped: whether test was marked as SKIP"""
+        if result in expected_results:
+            return True
+        if result in (TEXT, IMAGE_PLUS_TEXT, AUDIO) and (FAIL in expected_results):
+            return True
+        if result == MISSING and test_needs_rebaselining:
+            return True
+        if result == SKIP and test_is_skipped:
+            return True
+        return False
+
+    @staticmethod
+    def remove_pixel_failures(expected_results):
+        """Returns a copy of the expected results for a test, except that we
+        drop any pixel failures and return the remaining expectations. For example,
+        if we're not running pixel tests, then tests expected to fail as IMAGE
+        will PASS."""
+        expected_results = expected_results.copy()
+        if IMAGE in expected_results:
+            expected_results.remove(IMAGE)
+            expected_results.add(PASS)
+        return expected_results
+
+    @staticmethod
+    def has_pixel_failures(actual_results):
+        return IMAGE in actual_results or FAIL in actual_results
+
+    @staticmethod
+    def suffixes_for_expectations(expectations):
+        suffixes = set()
+        if IMAGE in expectations:
+            suffixes.add('png')
+        if FAIL in expectations:
+            suffixes.add('txt')
+            suffixes.add('png')
+            suffixes.add('wav')
+        return set(suffixes)
+
+    # FIXME: This constructor does too much work. We should move the actual parsing of
+    # the expectations into separate routines so that linting and handling overrides
+    # can be controlled separately, and the constructor can be more of a no-op.
+    def __init__(self, port, tests=None, include_overrides=True, expectations_to_lint=None):
+        self._full_test_list = tests
+        self._test_config = port.test_configuration()
+        self._is_lint_mode = expectations_to_lint is not None
+        self._model = TestExpectationsModel(self._shorten_filename)
+        self._parser = TestExpectationParser(port, tests, self._is_lint_mode)
+        self._port = port
+        self._skipped_tests_warnings = []
+
+        expectations_dict = expectations_to_lint or port.expectations_dict()
+        self._expectations = self._parser.parse(expectations_dict.keys()[0], expectations_dict.values()[0])
+        self._add_expectations(self._expectations)
+
+        if len(expectations_dict) > 1 and include_overrides:
+            for name in expectations_dict.keys()[1:]:
+                expectations = self._parser.parse(name, expectations_dict[name])
+                self._add_expectations(expectations)
+                self._expectations += expectations
+
+        # FIXME: move ignore_tests into port.skipped_layout_tests()
+        self.add_skipped_tests(port.skipped_layout_tests(tests).union(set(port.get_option('ignore_tests', []))))
+
+        self._has_warnings = False
+        self._report_warnings()
+        self._process_tests_without_expectations()
+
+    # TODO(ojan): Allow for removing skipped tests when getting the list of
+    # tests to run, but not when getting metrics.
+    def model(self):
+        return self._model
+
+    def get_rebaselining_failures(self):
+        return self._model.get_test_set(REBASELINE)
+
+    # FIXME: Change the callsites to use TestExpectationsModel and remove.
+    def get_expectations(self, test):
+        return self._model.get_expectations(test)
+
+    # FIXME: Change the callsites to use TestExpectationsModel and remove.
+    def has_modifier(self, test, modifier):
+        return self._model.has_modifier(test, modifier)
+
+    # FIXME: Change the callsites to use TestExpectationsModel and remove.
+    def get_tests_with_result_type(self, result_type):
+        return self._model.get_tests_with_result_type(result_type)
+
+    # FIXME: Change the callsites to use TestExpectationsModel and remove.
+    def get_test_set(self, modifier, expectation=None, include_skips=True):
+        return self._model.get_test_set(modifier, expectation, include_skips)
+
+    # FIXME: Change the callsites to use TestExpectationsModel and remove.
+    def get_modifiers(self, test):
+        return self._model.get_modifiers(test)
+
+    # FIXME: Change the callsites to use TestExpectationsModel and remove.
+    def get_tests_with_timeline(self, timeline):
+        return self._model.get_tests_with_timeline(timeline)
+
+    def get_expectations_string(self, test):
+        return self._model.get_expectations_string(test)
+
+    def expectation_to_string(self, expectation):
+        return self._model.expectation_to_string(expectation)
+
+    def matches_an_expected_result(self, test, result, pixel_tests_are_enabled):
+        expected_results = self._model.get_expectations(test)
+        if not pixel_tests_are_enabled:
+            expected_results = self.remove_pixel_failures(expected_results)
+        return self.result_was_expected(result,
+                                   expected_results,
+                                   self.is_rebaselining(test),
+                                   self._model.has_modifier(test, SKIP))
+
+    def is_rebaselining(self, test):
+        return self._model.has_modifier(test, REBASELINE)
+
+    def _shorten_filename(self, filename):
+        if filename.startswith(self._port.path_from_webkit_base()):
+            return self._port.host.filesystem.relpath(filename, self._port.path_from_webkit_base())
+        return filename
+
+    def _report_warnings(self):
+        warnings = []
+        for expectation in self._expectations:
+            for warning in expectation.warnings:
+                warnings.append('%s:%d %s %s' % (self._shorten_filename(expectation.filename), expectation.line_number,
+                                warning, expectation.name if expectation.expectations else expectation.original_string))
+
+        if warnings:
+            self._has_warnings = True
+            if self._is_lint_mode:
+                raise ParseError(warnings)
+            _log.warning('--lint-test-files warnings:')
+            for warning in warnings:
+                _log.warning(warning)
+            _log.warning('')
+
+    def _process_tests_without_expectations(self):
+        if self._full_test_list:
+            for test in self._full_test_list:
+                if not self._model.has_test(test):
+                    self._model.add_expectation_line(TestExpectationLine.create_passing_expectation(test))
+
+    def has_warnings(self):
+        return self._has_warnings
+
+    def remove_configuration_from_test(self, test, test_configuration):
+        expectations_to_remove = []
+        modified_expectations = []
+
+        for expectation in self._expectations:
+            if expectation.name != test or expectation.is_flaky() or not expectation.parsed_expectations:
+                continue
+            if iter(expectation.parsed_expectations).next() not in (FAIL, IMAGE):
+                continue
+            if test_configuration not in expectation.matching_configurations:
+                continue
+
+            expectation.matching_configurations.remove(test_configuration)
+            if expectation.matching_configurations:
+                modified_expectations.append(expectation)
+            else:
+                expectations_to_remove.append(expectation)
+
+        for expectation in expectations_to_remove:
+            self._expectations.remove(expectation)
+
+        return self.list_to_string(self._expectations, self._parser._test_configuration_converter, modified_expectations)
+
+    def remove_rebaselined_tests(self, except_these_tests, filename):
+        """Returns a copy of the expectations in the file with the tests removed."""
+        def without_rebaseline_modifier(expectation):
+            return (expectation.filename == filename and
+                    not (not expectation.is_invalid() and
+                         expectation.name in except_these_tests and
+                         'rebaseline' in expectation.parsed_modifiers))
+
+        return self.list_to_string(filter(without_rebaseline_modifier, self._expectations), reconstitute_only_these=[])
+
+    def _add_expectations(self, expectation_list):
+        for expectation_line in expectation_list:
+            if not expectation_line.expectations:
+                continue
+
+            if self._is_lint_mode or self._test_config in expectation_line.matching_configurations:
+                self._model.add_expectation_line(expectation_line)
+
+    def add_skipped_tests(self, tests_to_skip):
+        if not tests_to_skip:
+            return
+        for test in self._expectations:
+            if test.name and test.name in tests_to_skip:
+                test.warnings.append('%s:%d %s is also in a Skipped file.' % (test.filename, test.line_number, test.name))
+
+        for test_name in tests_to_skip:
+            expectation_line = self._parser.expectation_for_skipped_test(test_name)
+            self._model.add_expectation_line(expectation_line, in_skipped=True)
+
+    @staticmethod
+    def list_to_string(expectation_lines, test_configuration_converter=None, reconstitute_only_these=None):
+        def serialize(expectation_line):
+            # If reconstitute_only_these is an empty list, we want to return original_string.
+            # So we need to compare reconstitute_only_these to None, not just check if it's falsey.
+            if reconstitute_only_these is None or expectation_line in reconstitute_only_these:
+                return expectation_line.to_string(test_configuration_converter)
+            return expectation_line.original_string
+
+        def nones_out(expectation_line):
+            return expectation_line is not None
+
+        return "\n".join(filter(nones_out, map(serialize, expectation_lines)))
diff --git a/Tools/Scripts/webkitpy/layout_tests/models/test_expectations_unittest.py b/Tools/Scripts/webkitpy/layout_tests/models/test_expectations_unittest.py
new file mode 100644
index 0000000..d78ae3f
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/models/test_expectations_unittest.py
@@ -0,0 +1,710 @@
+#!/usr/bin/python
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from webkitpy.common.host_mock import MockHost
+from webkitpy.common.system.outputcapture import OutputCapture
+
+from webkitpy.layout_tests.models.test_configuration import *
+from webkitpy.layout_tests.models.test_expectations import *
+from webkitpy.layout_tests.models.test_configuration import *
+
+try:
+    from collections import OrderedDict
+except ImportError:
+    # Needed for Python < 2.7
+    from webkitpy.thirdparty.ordered_dict import OrderedDict
+
+
+class Base(unittest.TestCase):
+    # Note that all of these tests are written assuming the configuration
+    # being tested is Windows XP, Release build.
+
+    def __init__(self, testFunc):
+        host = MockHost()
+        self._port = host.port_factory.get('test-win-xp', None)
+        self._exp = None
+        unittest.TestCase.__init__(self, testFunc)
+
+    def get_test(self, test_name):
+        # FIXME: Remove this routine and just reference test names directly.
+        return test_name
+
+    def get_basic_tests(self):
+        return [self.get_test('failures/expected/text.html'),
+                self.get_test('failures/expected/image_checksum.html'),
+                self.get_test('failures/expected/crash.html'),
+                self.get_test('failures/expected/missing_text.html'),
+                self.get_test('failures/expected/image.html'),
+                self.get_test('passes/text.html')]
+
+    def get_basic_expectations(self):
+        return """
+Bug(test) failures/expected/text.html [ Failure ]
+Bug(test) failures/expected/crash.html [ WontFix ]
+Bug(test) failures/expected/missing_image.html [ Rebaseline Missing ]
+Bug(test) failures/expected/image_checksum.html [ WontFix ]
+Bug(test) failures/expected/image.html [ WontFix Mac ]
+"""
+
+    def parse_exp(self, expectations, overrides=None, is_lint_mode=False):
+        expectations_dict = OrderedDict()
+        expectations_dict['expectations'] = expectations
+        if overrides:
+            expectations_dict['overrides'] = overrides
+        self._port.expectations_dict = lambda: expectations_dict
+        expectations_to_lint = expectations_dict if is_lint_mode else None
+        self._exp = TestExpectations(self._port, self.get_basic_tests(), expectations_to_lint=expectations_to_lint)
+
+    def assert_exp(self, test, result):
+        self.assertEquals(self._exp.get_expectations(self.get_test(test)),
+                          set([result]))
+
+    def assert_bad_expectations(self, expectations, overrides=None):
+        self.assertRaises(ParseError, self.parse_exp, expectations, is_lint_mode=True, overrides=overrides)
+
+
+class BasicTests(Base):
+    def test_basic(self):
+        self.parse_exp(self.get_basic_expectations())
+        self.assert_exp('failures/expected/text.html', FAIL)
+        self.assert_exp('failures/expected/image_checksum.html', PASS)
+        self.assert_exp('passes/text.html', PASS)
+        self.assert_exp('failures/expected/image.html', PASS)
+
+
+class MiscTests(Base):
+    def test_multiple_results(self):
+        self.parse_exp('Bug(x) failures/expected/text.html [ Crash Failure ]')
+        self.assertEqual(self._exp.get_expectations(
+            self.get_test('failures/expected/text.html')),
+            set([FAIL, CRASH]))
+
+    def test_result_was_expected(self):
+        # test basics
+        self.assertEquals(TestExpectations.result_was_expected(PASS, set([PASS]), test_needs_rebaselining=False, test_is_skipped=False), True)
+        self.assertEquals(TestExpectations.result_was_expected(FAIL, set([PASS]), test_needs_rebaselining=False, test_is_skipped=False), False)
+
+        # test handling of SKIPped tests and results
+        self.assertEquals(TestExpectations.result_was_expected(SKIP, set([CRASH]), test_needs_rebaselining=False, test_is_skipped=True), True)
+        self.assertEquals(TestExpectations.result_was_expected(SKIP, set([CRASH]), test_needs_rebaselining=False, test_is_skipped=False), False)
+
+        # test handling of MISSING results and the REBASELINE modifier
+        self.assertEquals(TestExpectations.result_was_expected(MISSING, set([PASS]), test_needs_rebaselining=True, test_is_skipped=False), True)
+        self.assertEquals(TestExpectations.result_was_expected(MISSING, set([PASS]), test_needs_rebaselining=False, test_is_skipped=False), False)
+
+    def test_remove_pixel_failures(self):
+        self.assertEquals(TestExpectations.remove_pixel_failures(set([FAIL])), set([FAIL]))
+        self.assertEquals(TestExpectations.remove_pixel_failures(set([PASS])), set([PASS]))
+        self.assertEquals(TestExpectations.remove_pixel_failures(set([IMAGE])), set([PASS]))
+        self.assertEquals(TestExpectations.remove_pixel_failures(set([FAIL])), set([FAIL]))
+        self.assertEquals(TestExpectations.remove_pixel_failures(set([PASS, IMAGE, CRASH])), set([PASS, CRASH]))
+
+    def test_suffixes_for_expectations(self):
+        self.assertEquals(TestExpectations.suffixes_for_expectations(set([FAIL])), set(['txt', 'png', 'wav']))
+        self.assertEquals(TestExpectations.suffixes_for_expectations(set([IMAGE])), set(['png']))
+        self.assertEquals(TestExpectations.suffixes_for_expectations(set([FAIL, IMAGE, CRASH])), set(['txt', 'png', 'wav']))
+        self.assertEquals(TestExpectations.suffixes_for_expectations(set()), set())
+
+    def test_category_expectations(self):
+        # This test checks unknown tests are not present in the
+        # expectations and that known test part of a test category is
+        # present in the expectations.
+        exp_str = 'Bug(x) failures/expected [ WontFix ]'
+        self.parse_exp(exp_str)
+        test_name = 'failures/expected/unknown-test.html'
+        unknown_test = self.get_test(test_name)
+        self.assertRaises(KeyError, self._exp.get_expectations,
+                          unknown_test)
+        self.assert_exp('failures/expected/crash.html', PASS)
+
+    def test_get_modifiers(self):
+        self.parse_exp(self.get_basic_expectations())
+        self.assertEqual(self._exp.get_modifiers(
+                         self.get_test('passes/text.html')), [])
+
+    def test_get_expectations_string(self):
+        self.parse_exp(self.get_basic_expectations())
+        self.assertEquals(self._exp.get_expectations_string(
+                          self.get_test('failures/expected/text.html')),
+                          'FAIL')
+
+    def test_expectation_to_string(self):
+        # Normal cases are handled by other tests.
+        self.parse_exp(self.get_basic_expectations())
+        self.assertRaises(ValueError, self._exp.expectation_to_string,
+                          -1)
+
+    def test_get_test_set(self):
+        # Handle some corner cases for this routine not covered by other tests.
+        self.parse_exp(self.get_basic_expectations())
+        s = self._exp.get_test_set(WONTFIX)
+        self.assertEqual(s,
+            set([self.get_test('failures/expected/crash.html'),
+                 self.get_test('failures/expected/image_checksum.html')]))
+
+    def test_parse_warning(self):
+        try:
+            filesystem = self._port.host.filesystem
+            filesystem.write_text_file(filesystem.join(self._port.layout_tests_dir(), 'disabled-test.html-disabled'), 'content')
+            self.get_test('disabled-test.html-disabled'),
+            self.parse_exp("[ FOO ] failures/expected/text.html [ Failure ]\n"
+                "Bug(rniwa) non-existent-test.html [ Failure ]\n"
+                "Bug(rniwa) disabled-test.html-disabled [ ImageOnlyFailure ]", is_lint_mode=True)
+            self.assertFalse(True, "ParseError wasn't raised")
+        except ParseError, e:
+            warnings = ("expectations:1 Unrecognized modifier 'foo' failures/expected/text.html\n"
+                        "expectations:2 Path does not exist. non-existent-test.html")
+            self.assertEqual(str(e), warnings)
+
+    def test_parse_warnings_are_logged_if_not_in_lint_mode(self):
+        oc = OutputCapture()
+        try:
+            oc.capture_output()
+            self.parse_exp('-- this should be a syntax error', is_lint_mode=False)
+        finally:
+            _, _, logs = oc.restore_output()
+            self.assertNotEquals(logs, '')
+
+    def test_error_on_different_platform(self):
+        # parse_exp uses a Windows port. Assert errors on Mac show up in lint mode.
+        self.assertRaises(ParseError, self.parse_exp,
+            'Bug(test) [ Mac ] failures/expected/text.html [ Failure ]\nBug(test) [ Mac ] failures/expected/text.html [ Failure ]',
+            is_lint_mode=True)
+
+    def test_error_on_different_build_type(self):
+        # parse_exp uses a Release port. Assert errors on DEBUG show up in lint mode.
+        self.assertRaises(ParseError, self.parse_exp,
+            'Bug(test) [ Debug ] failures/expected/text.html [ Failure ]\nBug(test) [ Debug ] failures/expected/text.html [ Failure ]',
+            is_lint_mode=True)
+
+    def test_overrides(self):
+        self.parse_exp("Bug(exp) failures/expected/text.html [ Failure ]",
+                       "Bug(override) failures/expected/text.html [ ImageOnlyFailure ]")
+        self.assert_exp('failures/expected/text.html', IMAGE)
+
+    def test_overrides__directory(self):
+        self.parse_exp("Bug(exp) failures/expected/text.html [ Failure ]",
+                       "Bug(override) failures/expected [ Crash ]")
+        self.assert_exp('failures/expected/text.html', CRASH)
+        self.assert_exp('failures/expected/image.html', CRASH)
+
+    def test_overrides__duplicate(self):
+        self.assert_bad_expectations("Bug(exp) failures/expected/text.html [ Failure ]",
+                                     "Bug(override) failures/expected/text.html [ ImageOnlyFailure ]\n"
+                                     "Bug(override) failures/expected/text.html [ Crash ]\n")
+
+    def test_pixel_tests_flag(self):
+        def match(test, result, pixel_tests_enabled):
+            return self._exp.matches_an_expected_result(
+                self.get_test(test), result, pixel_tests_enabled)
+
+        self.parse_exp(self.get_basic_expectations())
+        self.assertTrue(match('failures/expected/text.html', FAIL, True))
+        self.assertTrue(match('failures/expected/text.html', FAIL, False))
+        self.assertFalse(match('failures/expected/text.html', CRASH, True))
+        self.assertFalse(match('failures/expected/text.html', CRASH, False))
+        self.assertTrue(match('failures/expected/image_checksum.html', PASS,
+                              True))
+        self.assertTrue(match('failures/expected/image_checksum.html', PASS,
+                              False))
+        self.assertTrue(match('failures/expected/crash.html', PASS, False))
+        self.assertTrue(match('passes/text.html', PASS, False))
+
+    def test_more_specific_override_resets_skip(self):
+        self.parse_exp("Bug(x) failures/expected [ Skip ]\n"
+                       "Bug(x) failures/expected/text.html [ ImageOnlyFailure ]\n")
+        self.assert_exp('failures/expected/text.html', IMAGE)
+        self.assertFalse(self._port._filesystem.join(self._port.layout_tests_dir(),
+                                                     'failures/expected/text.html') in
+                         self._exp.get_tests_with_result_type(SKIP))
+
+
+class SkippedTests(Base):
+    def check(self, expectations, overrides, skips, lint=False):
+        port = MockHost().port_factory.get('qt')
+        port._filesystem.write_text_file(port._filesystem.join(port.layout_tests_dir(), 'failures/expected/text.html'), 'foo')
+        expectations_dict = OrderedDict()
+        expectations_dict['expectations'] = expectations
+        if overrides:
+            expectations_dict['overrides'] = overrides
+        port.expectations_dict = lambda: expectations_dict
+        port.skipped_layout_tests = lambda tests: set(skips)
+        expectations_to_lint = expectations_dict if lint else None
+        exp = TestExpectations(port, ['failures/expected/text.html'], expectations_to_lint=expectations_to_lint)
+
+        # Check that the expectation is for BUG_DUMMY SKIP : ... [ Pass ]
+        self.assertEquals(exp.get_modifiers('failures/expected/text.html'),
+                          [TestExpectationParser.DUMMY_BUG_MODIFIER, TestExpectationParser.SKIP_MODIFIER, TestExpectationParser.WONTFIX_MODIFIER])
+        self.assertEquals(exp.get_expectations('failures/expected/text.html'), set([PASS]))
+
+    def test_skipped_tests_work(self):
+        self.check(expectations='', overrides=None, skips=['failures/expected/text.html'])
+
+    def test_duplicate_skipped_test_fails_lint(self):
+        self.assertRaises(ParseError, self.check, expectations='Bug(x) failures/expected/text.html [ Failure ]\n', overrides=None, skips=['failures/expected/text.html'], lint=True)
+
+    def test_skipped_file_overrides_expectations(self):
+        self.check(expectations='Bug(x) failures/expected/text.html [ Failure ]\n', overrides=None,
+                   skips=['failures/expected/text.html'])
+
+    def test_skipped_dir_overrides_expectations(self):
+        self.check(expectations='Bug(x) failures/expected/text.html [ Failure ]\n', overrides=None,
+                   skips=['failures/expected'])
+
+    def test_skipped_file_overrides_overrides(self):
+        self.check(expectations='', overrides='Bug(x) failures/expected/text.html [ Failure ]\n',
+                   skips=['failures/expected/text.html'])
+
+    def test_skipped_dir_overrides_overrides(self):
+        self.check(expectations='', overrides='Bug(x) failures/expected/text.html [ Failure ]\n',
+                   skips=['failures/expected'])
+
+    def test_skipped_entry_dont_exist(self):
+        port = MockHost().port_factory.get('qt')
+        expectations_dict = OrderedDict()
+        expectations_dict['expectations'] = ''
+        port.expectations_dict = lambda: expectations_dict
+        port.skipped_layout_tests = lambda tests: set(['foo/bar/baz.html'])
+        capture = OutputCapture()
+        capture.capture_output()
+        exp = TestExpectations(port)
+        _, _, logs = capture.restore_output()
+        self.assertEqual('The following test foo/bar/baz.html from the Skipped list doesn\'t exist\n', logs)
+
+
+class ExpectationSyntaxTests(Base):
+    def test_unrecognized_expectation(self):
+        self.assert_bad_expectations('Bug(test) failures/expected/text.html [ Unknown ]')
+
+    def test_macro(self):
+        exp_str = 'Bug(test) [ Win ] failures/expected/text.html [ Failure ]'
+        self.parse_exp(exp_str)
+        self.assert_exp('failures/expected/text.html', FAIL)
+
+    def assert_tokenize_exp(self, line, bugs=None, modifiers=None, expectations=None, warnings=None, comment=None, name='foo.html'):
+        bugs = bugs or []
+        modifiers = modifiers or []
+        expectations = expectations or []
+        warnings = warnings or []
+        filename = 'TestExpectations'
+        line_number = 1
+        expectation_line = TestExpectationParser._tokenize_line(filename, line, line_number)
+        self.assertEquals(expectation_line.warnings, warnings)
+        self.assertEquals(expectation_line.name, name)
+        self.assertEquals(expectation_line.filename, filename)
+        self.assertEquals(expectation_line.line_number, line_number)
+        if not warnings:
+            self.assertEquals(expectation_line.modifiers, modifiers)
+            self.assertEquals(expectation_line.expectations, expectations)
+
+    def test_bare_name(self):
+        self.assert_tokenize_exp('foo.html', modifiers=['SKIP'], expectations=['PASS'])
+
+    def test_bare_name_and_bugs(self):
+        self.assert_tokenize_exp('webkit.org/b/12345 foo.html', modifiers=['BUGWK12345', 'SKIP'], expectations=['PASS'])
+        self.assert_tokenize_exp('crbug.com/12345 foo.html', modifiers=['BUGCR12345', 'SKIP'], expectations=['PASS'])
+        self.assert_tokenize_exp('Bug(dpranke) foo.html', modifiers=['BUGDPRANKE', 'SKIP'], expectations=['PASS'])
+        self.assert_tokenize_exp('crbug.com/12345 crbug.com/34567 foo.html', modifiers=['BUGCR12345', 'BUGCR34567', 'SKIP'], expectations=['PASS'])
+
+    def test_comments(self):
+        self.assert_tokenize_exp("# comment", name=None, comment="# comment")
+        self.assert_tokenize_exp("foo.html # comment", comment="# comment", expectations=['PASS'], modifiers=['SKIP'])
+
+    def test_config_modifiers(self):
+        self.assert_tokenize_exp('[ Mac ] foo.html', modifiers=['MAC', 'SKIP'], expectations=['PASS'])
+        self.assert_tokenize_exp('[ Mac Vista ] foo.html', modifiers=['MAC', 'VISTA', 'SKIP'], expectations=['PASS'])
+        self.assert_tokenize_exp('[ Mac ] foo.html [ Failure ] ', modifiers=['MAC'], expectations=['FAIL'])
+
+    def test_unknown_config(self):
+        self.assert_tokenize_exp('[ Foo ] foo.html ', modifiers=['Foo', 'SKIP'], expectations=['PASS'])
+
+    def test_unknown_expectation(self):
+        self.assert_tokenize_exp('foo.html [ Audio ]', expectations=['Audio'])
+
+    def test_skip(self):
+        self.assert_tokenize_exp('foo.html [ Skip ]', modifiers=['SKIP'], expectations=['PASS'])
+
+    def test_slow(self):
+        self.assert_tokenize_exp('foo.html [ Slow ]', modifiers=['SLOW'], expectations=['PASS'])
+
+    def test_wontfix(self):
+        self.assert_tokenize_exp('foo.html [ WontFix ]', modifiers=['WONTFIX', 'SKIP'], expectations=['PASS'])
+
+    def test_blank_line(self):
+        self.assert_tokenize_exp('', name=None)
+
+    def test_warnings(self):
+        self.assert_tokenize_exp('[ Mac ]', warnings=['Did not find a test name.'], name=None)
+        self.assert_tokenize_exp('[ [', warnings=['unexpected "["'], name=None)
+        self.assert_tokenize_exp('crbug.com/12345 ]', warnings=['unexpected "]"'], name=None)
+
+        self.assert_tokenize_exp('foo.html crbug.com/12345 ]', warnings=['"crbug.com/12345" is not at the start of the line.'])
+
+
+class SemanticTests(Base):
+    def test_bug_format(self):
+        self.assertRaises(ParseError, self.parse_exp, 'BUG1234 failures/expected/text.html [ Failure ]', is_lint_mode=True)
+
+    def test_bad_bugid(self):
+        try:
+            self.parse_exp('BUG1234 failures/expected/text.html [ Failure ]', is_lint_mode=True)
+            self.fail('should have raised an error about a bad bug identifier')
+        except ParseError, exp:
+            self.assertEquals(len(exp.warnings), 1)
+
+    def test_missing_bugid(self):
+        self.parse_exp('failures/expected/text.html [ Failure ]')
+        self.assertFalse(self._exp.has_warnings())
+
+        self._port.warn_if_bug_missing_in_test_expectations = lambda: True
+
+        self.parse_exp('failures/expected/text.html [ Failure ]')
+        line = self._exp._model.get_expectation_line('failures/expected/text.html')
+        self.assertFalse(line.is_invalid())
+        self.assertEquals(line.warnings, ['Test lacks BUG modifier.'])
+
+    def test_skip_and_wontfix(self):
+        # Skip and WontFix are not allowed to have other expectations as well, because those
+        # expectations won't be exercised and may become stale .
+        self.parse_exp('failures/expected/text.html [ Failure Skip ]')
+        self.assertTrue(self._exp.has_warnings())
+
+        self.parse_exp('failures/expected/text.html [ Crash WontFix ]')
+        self.assertTrue(self._exp.has_warnings())
+
+        self.parse_exp('failures/expected/text.html [ Pass WontFix ]')
+        self.assertTrue(self._exp.has_warnings())
+
+    def test_slow_and_timeout(self):
+        # A test cannot be SLOW and expected to TIMEOUT.
+        self.assertRaises(ParseError, self.parse_exp,
+            'Bug(test) failures/expected/timeout.html [ Slow Timeout ]', is_lint_mode=True)
+
+    def test_rebaseline(self):
+        # Can't lint a file w/ 'REBASELINE' in it.
+        self.assertRaises(ParseError, self.parse_exp,
+            'Bug(test) failures/expected/text.html [ Failure Rebaseline ]',
+            is_lint_mode=True)
+
+    def test_duplicates(self):
+        self.assertRaises(ParseError, self.parse_exp, """
+Bug(exp) failures/expected/text.html [ Failure ]
+Bug(exp) failures/expected/text.html [ ImageOnlyFailure ]""", is_lint_mode=True)
+
+        self.assertRaises(ParseError, self.parse_exp,
+            self.get_basic_expectations(), overrides="""
+Bug(override) failures/expected/text.html [ Failure ]
+Bug(override) failures/expected/text.html [ ImageOnlyFailure ]""", is_lint_mode=True)
+
+    def test_missing_file(self):
+        self.parse_exp('Bug(test) missing_file.html [ Failure ]')
+        self.assertTrue(self._exp.has_warnings(), 1)
+
+
+class PrecedenceTests(Base):
+    def test_file_over_directory(self):
+        # This tests handling precedence of specific lines over directories
+        # and tests expectations covering entire directories.
+        exp_str = """
+Bug(x) failures/expected/text.html [ Failure ]
+Bug(y) failures/expected [ WontFix ]
+"""
+        self.parse_exp(exp_str)
+        self.assert_exp('failures/expected/text.html', FAIL)
+        self.assert_exp('failures/expected/crash.html', PASS)
+
+        exp_str = """
+Bug(x) failures/expected [ WontFix ]
+Bug(y) failures/expected/text.html [ Failure ]
+"""
+        self.parse_exp(exp_str)
+        self.assert_exp('failures/expected/text.html', FAIL)
+        self.assert_exp('failures/expected/crash.html', PASS)
+
+    def test_ambiguous(self):
+        self.assert_bad_expectations("Bug(test) [ Release ] passes/text.html [ Pass ]\n"
+                                     "Bug(test) [ Win ] passes/text.html [ Failure ]\n")
+
+    def test_more_modifiers(self):
+        self.assert_bad_expectations("Bug(test) [ Release ] passes/text.html [ Pass ]\n"
+                                     "Bug(test) [ Win Release ] passes/text.html [ Failure ]\n")
+
+    def test_order_in_file(self):
+        self.assert_bad_expectations("Bug(test) [ Win Release ] : passes/text.html [ Failure ]\n"
+                                     "Bug(test) [ Release ] : passes/text.html [ Pass ]\n")
+
+    def test_macro_overrides(self):
+        self.assert_bad_expectations("Bug(test) [ Win ] passes/text.html [ Pass ]\n"
+                                     "Bug(test) [ XP ] passes/text.html [ Failure ]\n")
+
+
+class RemoveConfigurationsTest(Base):
+    def test_remove(self):
+        host = MockHost()
+        test_port = host.port_factory.get('test-win-xp', None)
+        test_port.test_exists = lambda test: True
+        test_port.test_isfile = lambda test: True
+
+        test_config = test_port.test_configuration()
+        test_port.expectations_dict = lambda: {"expectations": """Bug(x) [ Linux Win Release ] failures/expected/foo.html [ Failure ]
+Bug(y) [ Win Mac Debug ] failures/expected/foo.html [ Crash ]
+"""}
+        expectations = TestExpectations(test_port, self.get_basic_tests())
+
+        actual_expectations = expectations.remove_configuration_from_test('failures/expected/foo.html', test_config)
+
+        self.assertEqual("""Bug(x) [ Linux Vista Win7 Release ] failures/expected/foo.html [ Failure ]
+Bug(y) [ Win Mac Debug ] failures/expected/foo.html [ Crash ]
+""", actual_expectations)
+
+    def test_remove_line(self):
+        host = MockHost()
+        test_port = host.port_factory.get('test-win-xp', None)
+        test_port.test_exists = lambda test: True
+        test_port.test_isfile = lambda test: True
+
+        test_config = test_port.test_configuration()
+        test_port.expectations_dict = lambda: {'expectations': """Bug(x) [ Win Release ] failures/expected/foo.html [ Failure ]
+Bug(y) [ Win Debug ] failures/expected/foo.html [ Crash ]
+"""}
+        expectations = TestExpectations(test_port)
+
+        actual_expectations = expectations.remove_configuration_from_test('failures/expected/foo.html', test_config)
+        actual_expectations = expectations.remove_configuration_from_test('failures/expected/foo.html', host.port_factory.get('test-win-vista', None).test_configuration())
+        actual_expectations = expectations.remove_configuration_from_test('failures/expected/foo.html', host.port_factory.get('test-win-win7', None).test_configuration())
+
+        self.assertEqual("""Bug(y) [ Win Debug ] failures/expected/foo.html [ Crash ]
+""", actual_expectations)
+
+
+class RebaseliningTest(Base):
+    """Test rebaselining-specific functionality."""
+    def assertRemove(self, input_expectations, input_overrides, tests, expected_expectations, expected_overrides):
+        self.parse_exp(input_expectations, is_lint_mode=False, overrides=input_overrides)
+        actual_expectations = self._exp.remove_rebaselined_tests(tests, 'expectations')
+        self.assertEqual(expected_expectations, actual_expectations)
+        actual_overrides = self._exp.remove_rebaselined_tests(tests, 'overrides')
+        self.assertEqual(expected_overrides, actual_overrides)
+
+    def test_remove(self):
+        self.assertRemove('Bug(x) failures/expected/text.html [ Failure Rebaseline ]\n'
+                          'Bug(y) failures/expected/image.html [ ImageOnlyFailure Rebaseline ]\n'
+                          'Bug(z) failures/expected/crash.html [ Crash ]\n',
+                          'Bug(x0) failures/expected/image.html [ Crash ]\n',
+                          ['failures/expected/text.html'],
+                          'Bug(y) failures/expected/image.html [ ImageOnlyFailure Rebaseline ]\n'
+                          'Bug(z) failures/expected/crash.html [ Crash ]\n',
+                          'Bug(x0) failures/expected/image.html [ Crash ]\n')
+
+        # Ensure that we don't modify unrelated lines, even if we could rewrite them.
+        # i.e., the second line doesn't get rewritten to "Bug(y) failures/expected/skip.html"
+        self.assertRemove('Bug(x) failures/expected/text.html [ Failure Rebaseline ]\n'
+                          'Bug(Y) failures/expected/image.html [ Skip   ]\n'
+                          'Bug(z) failures/expected/crash.html\n',
+                          '',
+                          ['failures/expected/text.html'],
+                          'Bug(Y) failures/expected/image.html [ Skip   ]\n'
+                          'Bug(z) failures/expected/crash.html\n',
+                          '')
+
+    def test_get_rebaselining_failures(self):
+        # Make sure we find a test as needing a rebaseline even if it is not marked as a failure.
+        self.parse_exp('Bug(x) failures/expected/text.html [ Rebaseline ]\n')
+        self.assertEqual(len(self._exp.get_rebaselining_failures()), 1)
+
+        self.parse_exp(self.get_basic_expectations())
+        self.assertEqual(len(self._exp.get_rebaselining_failures()), 0)
+
+
+class TestExpectationSerializationTests(unittest.TestCase):
+    def __init__(self, testFunc):
+        host = MockHost()
+        test_port = host.port_factory.get('test-win-xp', None)
+        self._converter = TestConfigurationConverter(test_port.all_test_configurations(), test_port.configuration_specifier_macros())
+        unittest.TestCase.__init__(self, testFunc)
+
+    def _tokenize(self, line):
+        return TestExpectationParser._tokenize_line('path', line, 0)
+
+    def assert_round_trip(self, in_string, expected_string=None):
+        expectation = self._tokenize(in_string)
+        if expected_string is None:
+            expected_string = in_string
+        self.assertEqual(expected_string, expectation.to_string(self._converter))
+
+    def assert_list_round_trip(self, in_string, expected_string=None):
+        host = MockHost()
+        parser = TestExpectationParser(host.port_factory.get('test-win-xp', None), [], allow_rebaseline_modifier=False)
+        expectations = parser.parse('path', in_string)
+        if expected_string is None:
+            expected_string = in_string
+        self.assertEqual(expected_string, TestExpectations.list_to_string(expectations, self._converter))
+
+    def test_unparsed_to_string(self):
+        expectation = TestExpectationLine()
+
+        self.assertEqual(expectation.to_string(self._converter), '')
+        expectation.comment = ' Qux.'
+        self.assertEqual(expectation.to_string(self._converter), '# Qux.')
+        expectation.name = 'bar'
+        self.assertEqual(expectation.to_string(self._converter), 'bar # Qux.')
+        expectation.modifiers = ['foo']
+        # FIXME: case should be preserved here but we can't until we drop the old syntax.
+        self.assertEqual(expectation.to_string(self._converter), '[ FOO ] bar # Qux.')
+        expectation.expectations = ['bAz']
+        self.assertEqual(expectation.to_string(self._converter), '[ FOO ] bar [ BAZ ] # Qux.')
+        expectation.expectations = ['bAz1', 'baZ2']
+        self.assertEqual(expectation.to_string(self._converter), '[ FOO ] bar [ BAZ1 BAZ2 ] # Qux.')
+        expectation.modifiers = ['foo1', 'foO2']
+        self.assertEqual(expectation.to_string(self._converter), '[ FOO1 FOO2 ] bar [ BAZ1 BAZ2 ] # Qux.')
+        expectation.warnings.append('Oh the horror.')
+        self.assertEqual(expectation.to_string(self._converter), '')
+        expectation.original_string = 'Yes it is!'
+        self.assertEqual(expectation.to_string(self._converter), 'Yes it is!')
+
+    def test_unparsed_list_to_string(self):
+        expectation = TestExpectationLine()
+        expectation.comment = 'Qux.'
+        expectation.name = 'bar'
+        expectation.modifiers = ['foo']
+        expectation.expectations = ['bAz1', 'baZ2']
+        # FIXME: case should be preserved here but we can't until we drop the old syntax.
+        self.assertEqual(TestExpectations.list_to_string([expectation]), '[ FOO ] bar [ BAZ1 BAZ2 ] #Qux.')
+
+    def test_parsed_to_string(self):
+        expectation_line = TestExpectationLine()
+        expectation_line.parsed_bug_modifiers = ['BUGX']
+        expectation_line.name = 'test/name/for/realz.html'
+        expectation_line.parsed_expectations = set([IMAGE])
+        self.assertEqual(expectation_line.to_string(self._converter), None)
+        expectation_line.matching_configurations = set([TestConfiguration('xp', 'x86', 'release')])
+        self.assertEqual(expectation_line.to_string(self._converter), 'Bug(x) [ XP Release ] test/name/for/realz.html [ ImageOnlyFailure ]')
+        expectation_line.matching_configurations = set([TestConfiguration('xp', 'x86', 'release'), TestConfiguration('xp', 'x86', 'debug')])
+        self.assertEqual(expectation_line.to_string(self._converter), 'Bug(x) [ XP ] test/name/for/realz.html [ ImageOnlyFailure ]')
+
+    def test_serialize_parsed_expectations(self):
+        expectation_line = TestExpectationLine()
+        expectation_line.parsed_expectations = set([])
+        parsed_expectation_to_string = dict([[parsed_expectation, expectation_string] for expectation_string, parsed_expectation in TestExpectations.EXPECTATIONS.items()])
+        self.assertEqual(expectation_line._serialize_parsed_expectations(parsed_expectation_to_string), '')
+        expectation_line.parsed_expectations = set([FAIL])
+        self.assertEqual(expectation_line._serialize_parsed_expectations(parsed_expectation_to_string), 'fail')
+        expectation_line.parsed_expectations = set([PASS, IMAGE])
+        self.assertEqual(expectation_line._serialize_parsed_expectations(parsed_expectation_to_string), 'pass image')
+        expectation_line.parsed_expectations = set([FAIL, PASS])
+        self.assertEqual(expectation_line._serialize_parsed_expectations(parsed_expectation_to_string), 'pass fail')
+
+    def test_serialize_parsed_modifier_string(self):
+        expectation_line = TestExpectationLine()
+        expectation_line.parsed_bug_modifiers = ['garden-o-matic']
+        expectation_line.parsed_modifiers = ['for', 'the']
+        self.assertEqual(expectation_line._serialize_parsed_modifiers(self._converter, []), 'garden-o-matic for the')
+        self.assertEqual(expectation_line._serialize_parsed_modifiers(self._converter, ['win']), 'garden-o-matic for the win')
+        expectation_line.parsed_bug_modifiers = []
+        expectation_line.parsed_modifiers = []
+        self.assertEqual(expectation_line._serialize_parsed_modifiers(self._converter, []), '')
+        self.assertEqual(expectation_line._serialize_parsed_modifiers(self._converter, ['win']), 'win')
+        expectation_line.parsed_bug_modifiers = ['garden-o-matic', 'total', 'is']
+        self.assertEqual(expectation_line._serialize_parsed_modifiers(self._converter, ['win']), 'garden-o-matic is total win')
+        expectation_line.parsed_bug_modifiers = []
+        expectation_line.parsed_modifiers = ['garden-o-matic', 'total', 'is']
+        self.assertEqual(expectation_line._serialize_parsed_modifiers(self._converter, ['win']), 'garden-o-matic is total win')
+
+    def test_format_line(self):
+        self.assertEqual(TestExpectationLine._format_line(['MODIFIERS'], 'name', ['EXPECTATIONS'], 'comment'), '[ MODIFIERS ] name [ EXPECTATIONS ] #comment')
+        self.assertEqual(TestExpectationLine._format_line(['MODIFIERS'], 'name', ['EXPECTATIONS'], None), '[ MODIFIERS ] name [ EXPECTATIONS ]')
+
+    def test_string_roundtrip(self):
+        self.assert_round_trip('')
+        self.assert_round_trip('FOO')
+        self.assert_round_trip('[')
+        self.assert_round_trip('FOO [')
+        self.assert_round_trip('FOO ] bar')
+        self.assert_round_trip('  FOO [')
+        self.assert_round_trip('    [ FOO ] ')
+        self.assert_round_trip('[ FOO ] bar [ BAZ ]')
+        self.assert_round_trip('[ FOO ] bar [ BAZ ] # Qux.')
+        self.assert_round_trip('[ FOO ] bar [ BAZ ] # Qux.')
+        self.assert_round_trip('[ FOO ] bar [ BAZ ] # Qux.     ')
+        self.assert_round_trip('[ FOO ] bar [ BAZ ] #        Qux.     ')
+        self.assert_round_trip('[ FOO ] ] ] bar BAZ')
+        self.assert_round_trip('[ FOO ] ] ] bar [ BAZ ]')
+        self.assert_round_trip('FOO ] ] bar ==== BAZ')
+        self.assert_round_trip('=')
+        self.assert_round_trip('#')
+        self.assert_round_trip('# ')
+        self.assert_round_trip('# Foo')
+        self.assert_round_trip('# Foo')
+        self.assert_round_trip('# Foo :')
+        self.assert_round_trip('# Foo : =')
+
+    def test_list_roundtrip(self):
+        self.assert_list_round_trip('')
+        self.assert_list_round_trip('\n')
+        self.assert_list_round_trip('\n\n')
+        self.assert_list_round_trip('bar')
+        self.assert_list_round_trip('bar\n# Qux.')
+        self.assert_list_round_trip('bar\n# Qux.\n')
+
+    def test_reconstitute_only_these(self):
+        lines = []
+        reconstitute_only_these = []
+
+        def add_line(matching_configurations, reconstitute):
+            expectation_line = TestExpectationLine()
+            expectation_line.original_string = "Nay"
+            expectation_line.parsed_bug_modifiers = ['BUGX']
+            expectation_line.name = 'Yay'
+            expectation_line.parsed_expectations = set([IMAGE])
+            expectation_line.matching_configurations = matching_configurations
+            lines.append(expectation_line)
+            if reconstitute:
+                reconstitute_only_these.append(expectation_line)
+
+        add_line(set([TestConfiguration('xp', 'x86', 'release')]), True)
+        add_line(set([TestConfiguration('xp', 'x86', 'release'), TestConfiguration('xp', 'x86', 'debug')]), False)
+        serialized = TestExpectations.list_to_string(lines, self._converter)
+        self.assertEquals(serialized, "Bug(x) [ XP Release ] Yay [ ImageOnlyFailure ]\nBug(x) [ XP ] Yay [ ImageOnlyFailure ]")
+        serialized = TestExpectations.list_to_string(lines, self._converter, reconstitute_only_these=reconstitute_only_these)
+        self.assertEquals(serialized, "Bug(x) [ XP Release ] Yay [ ImageOnlyFailure ]\nNay")
+
+    def test_string_whitespace_stripping(self):
+        self.assert_round_trip('\n', '')
+        self.assert_round_trip('  [ FOO ] bar [ BAZ ]', '[ FOO ] bar [ BAZ ]')
+        self.assert_round_trip('[ FOO ]    bar [ BAZ ]', '[ FOO ] bar [ BAZ ]')
+        self.assert_round_trip('[ FOO ] bar [ BAZ ]       # Qux.', '[ FOO ] bar [ BAZ ] # Qux.')
+        self.assert_round_trip('[ FOO ] bar [        BAZ ]  # Qux.', '[ FOO ] bar [ BAZ ] # Qux.')
+        self.assert_round_trip('[ FOO ]       bar [    BAZ ]  # Qux.', '[ FOO ] bar [ BAZ ] # Qux.')
+        self.assert_round_trip('[ FOO ]       bar     [    BAZ ]  # Qux.', '[ FOO ] bar [ BAZ ] # Qux.')
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/Tools/Scripts/webkitpy/layout_tests/models/test_failures.py b/Tools/Scripts/webkitpy/layout_tests/models/test_failures.py
new file mode 100644
index 0000000..402b30a
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/models/test_failures.py
@@ -0,0 +1,224 @@
+#!/usr/bin/env python
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import cPickle
+
+from webkitpy.layout_tests.models import test_expectations
+
+
+def is_reftest_failure(failure_list):
+    failure_types = [type(f) for f in failure_list]
+    return set((FailureReftestMismatch, FailureReftestMismatchDidNotOccur, FailureReftestNoImagesGenerated)).intersection(failure_types)
+
+# FIXME: This is backwards.  Each TestFailure subclass should know what
+# test_expectation type it corresponds too.  Then this method just
+# collects them all from the failure list and returns the worst one.
+def determine_result_type(failure_list):
+    """Takes a set of test_failures and returns which result type best fits
+    the list of failures. "Best fits" means we use the worst type of failure.
+
+    Returns:
+      one of the test_expectations result types - PASS, FAIL, CRASH, etc."""
+
+    if not failure_list or len(failure_list) == 0:
+        return test_expectations.PASS
+
+    failure_types = [type(f) for f in failure_list]
+    if FailureCrash in failure_types:
+        return test_expectations.CRASH
+    elif FailureTimeout in failure_types:
+        return test_expectations.TIMEOUT
+    elif FailureEarlyExit in failure_types:
+        return test_expectations.SKIP
+    elif (FailureMissingResult in failure_types or
+          FailureMissingImage in failure_types or
+          FailureMissingImageHash in failure_types or
+          FailureMissingAudio in failure_types):
+        return test_expectations.MISSING
+    else:
+        is_text_failure = FailureTextMismatch in failure_types
+        is_image_failure = (FailureImageHashIncorrect in failure_types or
+                            FailureImageHashMismatch in failure_types)
+        is_audio_failure = (FailureAudioMismatch in failure_types)
+        if is_text_failure and is_image_failure:
+            return test_expectations.IMAGE_PLUS_TEXT
+        elif is_text_failure:
+            return test_expectations.TEXT
+        elif is_image_failure or is_reftest_failure(failure_list):
+            return test_expectations.IMAGE
+        elif is_audio_failure:
+            return test_expectations.AUDIO
+        else:
+            raise ValueError("unclassifiable set of failures: "
+                             + str(failure_types))
+
+
+class TestFailure(object):
+    """Abstract base class that defines the failure interface."""
+
+    @staticmethod
+    def loads(s):
+        """Creates a TestFailure object from the specified string."""
+        return cPickle.loads(s)
+
+    def message(self):
+        """Returns a string describing the failure in more detail."""
+        raise NotImplementedError
+
+    def __eq__(self, other):
+        return self.__class__.__name__ == other.__class__.__name__
+
+    def __ne__(self, other):
+        return self.__class__.__name__ != other.__class__.__name__
+
+    def __hash__(self):
+        return hash(self.__class__.__name__)
+
+    def dumps(self):
+        """Returns the string/JSON representation of a TestFailure."""
+        return cPickle.dumps(self)
+
+    def driver_needs_restart(self):
+        """Returns True if we should kill DumpRenderTree/WebKitTestRunner before the next test."""
+        return False
+
+
+class FailureTimeout(TestFailure):
+    def __init__(self, is_reftest=False):
+        super(FailureTimeout, self).__init__()
+        self.is_reftest = is_reftest
+
+    def message(self):
+        return "test timed out"
+
+    def driver_needs_restart(self):
+        return True
+
+
+class FailureCrash(TestFailure):
+    def __init__(self, is_reftest=False, process_name='DumpRenderTree', pid=None):
+        super(FailureCrash, self).__init__()
+        self.process_name = process_name
+        self.pid = pid
+        self.is_reftest = is_reftest
+
+    def message(self):
+        if self.pid:
+            return "%s crashed [pid=%d]" % (self.process_name, self.pid)
+        return self.process_name + " crashed"
+
+    def driver_needs_restart(self):
+        return True
+
+
+class FailureMissingResult(TestFailure):
+    def message(self):
+        return "-expected.txt was missing"
+
+
+class FailureTextMismatch(TestFailure):
+    def message(self):
+        return "text diff"
+
+class FailureMissingImageHash(TestFailure):
+    def message(self):
+        return "-expected.png was missing an embedded checksum"
+
+
+class FailureMissingImage(TestFailure):
+    def message(self):
+        return "-expected.png was missing"
+
+
+class FailureImageHashMismatch(TestFailure):
+    def __init__(self, diff_percent=0):
+        super(FailureImageHashMismatch, self).__init__()
+        self.diff_percent = diff_percent
+
+    def message(self):
+        return "image diff"
+
+
+class FailureImageHashIncorrect(TestFailure):
+    def message(self):
+        return "-expected.png embedded checksum is incorrect"
+
+
+class FailureReftestMismatch(TestFailure):
+    def __init__(self, reference_filename=None):
+        super(FailureReftestMismatch, self).__init__()
+        self.reference_filename = reference_filename
+        self.diff_percent = None
+
+    def message(self):
+        return "reference mismatch"
+
+
+class FailureReftestMismatchDidNotOccur(TestFailure):
+    def __init__(self, reference_filename=None):
+        super(FailureReftestMismatchDidNotOccur, self).__init__()
+        self.reference_filename = reference_filename
+
+    def message(self):
+        return "reference mismatch didn't happen"
+
+
+class FailureReftestNoImagesGenerated(TestFailure):
+    def __init__(self, reference_filename=None):
+        super(FailureReftestNoImagesGenerated, self).__init__()
+        self.reference_filename = reference_filename
+
+    def message(self):
+        return "reference didn't generate pixel results."
+
+
+class FailureMissingAudio(TestFailure):
+    def message(self):
+        return "expected audio result was missing"
+
+
+class FailureAudioMismatch(TestFailure):
+    def message(self):
+        return "audio mismatch"
+
+
+class FailureEarlyExit(TestFailure):
+    def message(self):
+        return "skipped due to early exit"
+
+
+# Convenient collection of all failure classes for anything that might
+# need to enumerate over them all.
+ALL_FAILURE_CLASSES = (FailureTimeout, FailureCrash, FailureMissingResult,
+                       FailureTextMismatch, FailureMissingImageHash,
+                       FailureMissingImage, FailureImageHashMismatch,
+                       FailureImageHashIncorrect, FailureReftestMismatch,
+                       FailureReftestMismatchDidNotOccur, FailureReftestNoImagesGenerated,
+                       FailureMissingAudio, FailureAudioMismatch,
+                       FailureEarlyExit)
diff --git a/Tools/Scripts/webkitpy/layout_tests/models/test_failures_unittest.py b/Tools/Scripts/webkitpy/layout_tests/models/test_failures_unittest.py
new file mode 100644
index 0000000..1c8f029
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/models/test_failures_unittest.py
@@ -0,0 +1,73 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#    * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from webkitpy.layout_tests.models.test_failures import *
+
+
+class TestFailuresTest(unittest.TestCase):
+    def assert_loads(self, cls):
+        failure_obj = cls()
+        s = failure_obj.dumps()
+        new_failure_obj = TestFailure.loads(s)
+        self.assertTrue(isinstance(new_failure_obj, cls))
+
+        self.assertEqual(failure_obj, new_failure_obj)
+
+        # Also test that != is implemented.
+        self.assertFalse(failure_obj != new_failure_obj)
+
+    def test_unknown_failure_type(self):
+        class UnknownFailure(TestFailure):
+            def message(self):
+                return ''
+
+        failure_obj = UnknownFailure()
+        self.assertRaises(ValueError, determine_result_type, [failure_obj])
+
+    def test_message_is_virtual(self):
+        failure_obj = TestFailure()
+        self.assertRaises(NotImplementedError, failure_obj.message)
+
+    def test_loads(self):
+        for c in ALL_FAILURE_CLASSES:
+            self.assert_loads(c)
+
+    def test_equals(self):
+        self.assertEqual(FailureCrash(), FailureCrash())
+        self.assertNotEqual(FailureCrash(), FailureTimeout())
+        crash_set = set([FailureCrash(), FailureCrash()])
+        self.assertEqual(len(crash_set), 1)
+        # The hash happens to be the name of the class, but sets still work:
+        crash_set = set([FailureCrash(), "FailureCrash"])
+        self.assertEqual(len(crash_set), 2)
+
+    def test_crashes(self):
+        self.assertEquals(FailureCrash().message(), 'DumpRenderTree crashed')
+        self.assertEquals(FailureCrash(process_name='foo', pid=1234).message(), 'foo crashed [pid=1234]')
diff --git a/Tools/Scripts/webkitpy/layout_tests/models/test_input.py b/Tools/Scripts/webkitpy/layout_tests/models/test_input.py
new file mode 100644
index 0000000..56f2d52
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/models/test_input.py
@@ -0,0 +1,46 @@
+#!/usr/bin/env python
+# Copyright (C) 2010 Google Inc. All rights reserved.
+# Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Szeged
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+class TestInput(object):
+    """Groups information about a test for easy passing of data."""
+
+    def __init__(self, test_name, timeout=None, requires_lock=None, reference_files=None, should_run_pixel_tests=None):
+        # TestInput objects are normally constructed by the manager and passed
+        # to the workers, but these some fields are set lazily in the workers where possible
+        # because they require us to look at the filesystem and we want to be able to do that in parallel.
+        self.test_name = test_name
+        self.timeout = timeout  # in msecs; should rename this for consistency
+        self.requires_lock = requires_lock
+        self.reference_files = reference_files
+        self.should_run_pixel_tests = should_run_pixel_tests
+
+    def __repr__(self):
+        return "TestInput('%s', timeout=%s, requires_lock=%s, reference_files=%s, should_run_pixel_tests=%s)" % (self.test_name, self.timeout, self.requires_lock, self.reference_files, self.should_run_pixel_tests)
diff --git a/Tools/Scripts/webkitpy/layout_tests/models/test_results.py b/Tools/Scripts/webkitpy/layout_tests/models/test_results.py
new file mode 100644
index 0000000..6b9db55
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/models/test_results.py
@@ -0,0 +1,65 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import cPickle
+
+from webkitpy.layout_tests.models import test_failures
+
+
+class TestResult(object):
+    """Data object containing the results of a single test."""
+
+    @staticmethod
+    def loads(string):
+        return cPickle.loads(string)
+
+    def __init__(self, test_name, failures=None, test_run_time=None, has_stderr=False, reftest_type=[]):
+        self.test_name = test_name
+        self.failures = failures or []
+        self.test_run_time = test_run_time or 0
+        self.has_stderr = has_stderr
+        self.reftest_type = reftest_type
+        # FIXME: Setting this in the constructor makes this class hard to mutate.
+        self.type = test_failures.determine_result_type(failures)
+
+    def __eq__(self, other):
+        return (self.test_name == other.test_name and
+                self.failures == other.failures and
+                self.test_run_time == other.test_run_time)
+
+    def __ne__(self, other):
+        return not (self == other)
+
+    def has_failure_matching_types(self, *failure_classes):
+        for failure in self.failures:
+            if type(failure) in failure_classes:
+                return True
+        return False
+
+    def dumps(self):
+        return cPickle.dumps(self)
diff --git a/Tools/Scripts/webkitpy/layout_tests/models/test_results_unittest.py b/Tools/Scripts/webkitpy/layout_tests/models/test_results_unittest.py
new file mode 100644
index 0000000..80d8a47
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/models/test_results_unittest.py
@@ -0,0 +1,52 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from webkitpy.layout_tests.models.test_results import TestResult
+
+
+class TestResultsTest(unittest.TestCase):
+    def test_defaults(self):
+        result = TestResult("foo")
+        self.assertEqual(result.test_name, 'foo')
+        self.assertEqual(result.failures, [])
+        self.assertEqual(result.test_run_time, 0)
+
+    def test_loads(self):
+        result = TestResult(test_name='foo',
+                            failures=[],
+                            test_run_time=1.1)
+        s = result.dumps()
+        new_result = TestResult.loads(s)
+        self.assertTrue(isinstance(new_result, TestResult))
+
+        self.assertEqual(new_result, result)
+
+        # Also check that != is implemented.
+        self.assertFalse(new_result != result)
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/__init__.py b/Tools/Scripts/webkitpy/layout_tests/port/__init__.py
new file mode 100644
index 0000000..6365b4c
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/port/__init__.py
@@ -0,0 +1,36 @@
+#!/usr/bin/env python
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Port-specific entrypoints for the layout tests test infrastructure."""
+
+import builders  # Why is this in port?
+
+from base import Port  # It's possible we don't need to export this virtual baseclass outside the module.
+from driver import Driver, DriverInput, DriverOutput
+from factory import platform_options, configuration_options
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/apple.py b/Tools/Scripts/webkitpy/layout_tests/port/apple.py
new file mode 100644
index 0000000..4b97f41
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/port/apple.py
@@ -0,0 +1,100 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the Google name nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import logging
+
+from webkitpy.layout_tests.port.base import Port
+from webkitpy.layout_tests.models.test_configuration import TestConfiguration
+
+
+_log = logging.getLogger(__name__)
+
+
+class ApplePort(Port):
+    """Shared logic between all of Apple's ports."""
+
+    # This is used to represent the version of an operating system
+    # corresponding to the "mac" or "win" base LayoutTests/platform
+    # directory.  I'm not sure this concept is very useful,
+    # but it gives us a way to refer to fallback paths *only* including
+    # the base directory.
+    # This is mostly done because TestConfiguration assumes that self.version()
+    # will never return None. (None would be another way to represent this concept.)
+    # Apple supposedly has explicit "future" results which are kept in an internal repository.
+    # It's possible that Apple would want to fix this code to work better with those results.
+    FUTURE_VERSION = 'future'  # FIXME: This whole 'future' thing feels like a hack.
+
+    @classmethod
+    def determine_full_port_name(cls, host, options, port_name):
+        # If the port_name matches the (badly named) cls.port_name, that
+        # means that they passed 'mac' or 'win' and didn't specify a version.
+        # That convention means that we're supposed to use the version currently
+        # being run, so this won't work if you're not on mac or win (respectively).
+        # If you're not on the o/s in question, you must specify a full version or -future (cf. above).
+        if port_name == cls.port_name:
+            assert port_name == host.platform.os_name
+            return cls.port_name + '-' + host.platform.os_version
+        if port_name == cls.port_name + '-wk2':
+            assert port_name == host.platform.os_name + '-wk2'
+            return cls.port_name + '-' + host.platform.os_version + '-wk2'
+        return port_name
+
+    def _strip_port_name_prefix(self, port_name):
+        # Callers treat this return value as the "version", which only works
+        # because Apple ports use a simple name-version port_name scheme.
+        # FIXME: This parsing wouldn't be needed if port_name handling was moved to factory.py
+        # instead of the individual port constructors.
+        return port_name[len(self.port_name + '-'):]
+
+    def __init__(self, host, port_name, **kwargs):
+        super(ApplePort, self).__init__(host, port_name, **kwargs)
+
+        allowed_port_names = self.VERSION_FALLBACK_ORDER + [self.operating_system() + "-future"]
+        port_name = port_name.replace('-wk2', '')
+        self._version = self._strip_port_name_prefix(port_name)
+        assert port_name in allowed_port_names, "%s is not in %s" % (port_name, allowed_port_names)
+
+    def _skipped_file_search_paths(self):
+        # We don't have a dedicated Skipped file for the most recent version of the port;
+        # we just use the one in platform/{mac,win}
+        most_recent_name = self.VERSION_FALLBACK_ORDER[-1]
+        return set(filter(lambda name: name != most_recent_name, super(ApplePort, self)._skipped_file_search_paths()))
+
+    # FIXME: A more sophisticated version of this function should move to WebKitPort and replace all calls to name().
+    # This is also a misleading name, since 'mac-future' gets remapped to 'mac'.
+    def _port_name_with_version(self):
+        return self.name().replace('-future', '').replace('-wk2', '')
+
+    def _generate_all_test_configurations(self):
+        configurations = []
+        allowed_port_names = self.VERSION_FALLBACK_ORDER + [self.operating_system() + "-future"]
+        for port_name in allowed_port_names:
+            for build_type in self.ALL_BUILD_TYPES:
+                for architecture in self.ARCHITECTURES:
+                    configurations.append(TestConfiguration(version=self._strip_port_name_prefix(port_name), architecture=architecture, build_type=build_type))
+        return configurations
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/base.py b/Tools/Scripts/webkitpy/layout_tests/port/base.py
new file mode 100755
index 0000000..ea1e9d0
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/port/base.py
@@ -0,0 +1,1520 @@
+#!/usr/bin/env python
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the Google name nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Abstract base class of Port-specific entry points for the layout tests
+test infrastructure (the Port and Driver classes)."""
+
+import cgi
+import difflib
+import errno
+import itertools
+import logging
+import os
+import operator
+import optparse
+import re
+import sys
+
+try:
+    from collections import OrderedDict
+except ImportError:
+    # Needed for Python < 2.7
+    from webkitpy.thirdparty.ordered_dict import OrderedDict
+
+
+from webkitpy.common import find_files
+from webkitpy.common import read_checksum_from_png
+from webkitpy.common.memoized import memoized
+from webkitpy.common.system import path
+from webkitpy.common.system.executive import ScriptError
+from webkitpy.common.system.systemhost import SystemHost
+from webkitpy.layout_tests.models.test_configuration import TestConfiguration
+from webkitpy.layout_tests.port import config as port_config
+from webkitpy.layout_tests.port import driver
+from webkitpy.layout_tests.port import http_lock
+from webkitpy.layout_tests.port import image_diff
+from webkitpy.layout_tests.port import server_process
+from webkitpy.layout_tests.port.factory import PortFactory
+from webkitpy.layout_tests.servers import apache_http_server
+from webkitpy.layout_tests.servers import http_server
+from webkitpy.layout_tests.servers import websocket_server
+
+_log = logging.getLogger(__name__)
+
+
+# FIXME: This class should merge with WebKitPort now that Chromium behaves mostly like other webkit ports.
+class Port(object):
+    """Abstract class for Port-specific hooks for the layout_test package."""
+
+    # Subclasses override this. This should indicate the basic implementation
+    # part of the port name, e.g., 'chromium-mac', 'win', 'gtk'; there is probably (?)
+    # one unique value per class.
+
+    # FIXME: We should probably rename this to something like 'implementation_name'.
+    port_name = None
+
+    # Test names resemble unix relative paths, and use '/' as a directory separator.
+    TEST_PATH_SEPARATOR = '/'
+
+    ALL_BUILD_TYPES = ('debug', 'release')
+
+    @classmethod
+    def determine_full_port_name(cls, host, options, port_name):
+        """Return a fully-specified port name that can be used to construct objects."""
+        # Subclasses will usually override this.
+        return cls.port_name
+
+    def __init__(self, host, port_name=None, options=None, config=None, **kwargs):
+
+        # This value may be different from cls.port_name by having version modifiers
+        # and other fields appended to it (for example, 'qt-arm' or 'mac-wk2').
+
+        # FIXME: port_name should be a required parameter. It isn't yet because lots of tests need to be updatd.
+        self._name = port_name or self.port_name
+
+        # These are default values that should be overridden in a subclasses.
+        self._version = ''
+        self._architecture = 'x86'
+
+        # FIXME: Ideally we'd have a package-wide way to get a
+        # well-formed options object that had all of the necessary
+        # options defined on it.
+        self._options = options or optparse.Values()
+
+        self.host = host
+        self._executive = host.executive
+        self._filesystem = host.filesystem
+        self._config = config or port_config.Config(self._executive, self._filesystem, self.port_name)
+
+        self._helper = None
+        self._http_server = None
+        self._websocket_server = None
+        self._image_differ = None
+        self._server_process_constructor = server_process.ServerProcess  # overridable for testing
+        self._http_lock = None  # FIXME: Why does this live on the port object?
+
+        # Python's Popen has a bug that causes any pipes opened to a
+        # process that can't be executed to be leaked.  Since this
+        # code is specifically designed to tolerate exec failures
+        # to gracefully handle cases where wdiff is not installed,
+        # the bug results in a massive file descriptor leak. As a
+        # workaround, if an exec failure is ever experienced for
+        # wdiff, assume it's not available.  This will leak one
+        # file descriptor but that's better than leaking each time
+        # wdiff would be run.
+        #
+        # http://mail.python.org/pipermail/python-list/
+        #    2008-August/505753.html
+        # http://bugs.python.org/issue3210
+        self._wdiff_available = None
+
+        # FIXME: prettypatch.py knows this path, why is it copied here?
+        self._pretty_patch_path = self.path_from_webkit_base("Websites", "bugs.webkit.org", "PrettyPatch", "prettify.rb")
+        self._pretty_patch_available = None
+
+        if not hasattr(options, 'configuration') or not options.configuration:
+            self.set_option_default('configuration', self.default_configuration())
+        self._test_configuration = None
+        self._reftest_list = {}
+        self._results_directory = None
+        self._root_was_set = hasattr(options, 'root') and options.root
+
+    def additional_drt_flag(self):
+        return []
+
+    def default_pixel_tests(self):
+        # FIXME: Disable until they are run by default on build.webkit.org.
+        return False
+
+    def default_timeout_ms(self):
+        if self.get_option('webkit_test_runner'):
+            # Add some more time to WebKitTestRunner because it needs to syncronise the state
+            # with the web process and we want to detect if there is a problem with that in the driver.
+            return 80 * 1000
+        return 35 * 1000
+
+    def driver_stop_timeout(self):
+        """ Returns the amount of time in seconds to wait before killing the process in driver.stop()."""
+        # We want to wait for at least 3 seconds, but if we are really slow, we want to be slow on cleanup as
+        # well (for things like ASAN, Valgrind, etc.)
+        return 3.0 * float(self.get_option('time_out_ms', '0')) / self.default_timeout_ms()
+
+    def wdiff_available(self):
+        if self._wdiff_available is None:
+            self._wdiff_available = self.check_wdiff(logging=False)
+        return self._wdiff_available
+
+    def pretty_patch_available(self):
+        if self._pretty_patch_available is None:
+            self._pretty_patch_available = self.check_pretty_patch(logging=False)
+        return self._pretty_patch_available
+
+    def should_retry_crashes(self):
+        return False
+
+    def default_child_processes(self):
+        """Return the number of DumpRenderTree instances to use for this port."""
+        return self._executive.cpu_count()
+
+    def default_max_locked_shards(self):
+        """Return the number of "locked" shards to run in parallel (like the http tests)."""
+        return 1
+
+    def worker_startup_delay_secs(self):
+        # FIXME: If we start workers up too quickly, DumpRenderTree appears
+        # to thrash on something and time out its first few tests. Until
+        # we can figure out what's going on, sleep a bit in between
+        # workers. See https://bugs.webkit.org/show_bug.cgi?id=79147 .
+        return 0.1
+
+    def baseline_path(self):
+        """Return the absolute path to the directory to store new baselines in for this port."""
+        # FIXME: remove once all callers are calling either baseline_version_dir() or baseline_platform_dir()
+        return self.baseline_version_dir()
+
+    def baseline_platform_dir(self):
+        """Return the absolute path to the default (version-independent) platform-specific results."""
+        return self._filesystem.join(self.layout_tests_dir(), 'platform', self.port_name)
+
+    def baseline_version_dir(self):
+        """Return the absolute path to the platform-and-version-specific results."""
+        baseline_search_paths = self.baseline_search_path()
+        return baseline_search_paths[0]
+
+    def baseline_search_path(self):
+        return self.get_option('additional_platform_directory', []) + self._compare_baseline() + self.default_baseline_search_path()
+
+    def default_baseline_search_path(self):
+        """Return a list of absolute paths to directories to search under for
+        baselines. The directories are searched in order."""
+        search_paths = []
+        if self.get_option('webkit_test_runner'):
+            search_paths.append(self._wk2_port_name())
+        search_paths.append(self.name())
+        if self.name() != self.port_name:
+            search_paths.append(self.port_name)
+        return map(self._webkit_baseline_path, search_paths)
+
+    @memoized
+    def _compare_baseline(self):
+        factory = PortFactory(self.host)
+        target_port = self.get_option('compare_port')
+        if target_port:
+            return factory.get(target_port).default_baseline_search_path()
+        return []
+
+    def check_build(self, needs_http):
+        """This routine is used to ensure that the build is up to date
+        and all the needed binaries are present."""
+        # If we're using a pre-built copy of WebKit (--root), we assume it also includes a build of DRT.
+        if not self._root_was_set and self.get_option('build') and not self._build_driver():
+            return False
+        if not self._check_driver():
+            return False
+        if self.get_option('pixel_tests'):
+            if not self.check_image_diff():
+                return False
+        if not self._check_port_build():
+            return False
+        return True
+
+    def _check_driver(self):
+        driver_path = self._path_to_driver()
+        if not self._filesystem.exists(driver_path):
+            _log.error("%s was not found at %s" % (self.driver_name(), driver_path))
+            return False
+        return True
+
+    def _check_port_build(self):
+        # Ports can override this method to do additional checks.
+        return True
+
+    def check_sys_deps(self, needs_http):
+        """If the port needs to do some runtime checks to ensure that the
+        tests can be run successfully, it should override this routine.
+        This step can be skipped with --nocheck-sys-deps.
+
+        Returns whether the system is properly configured."""
+        if needs_http:
+            return self.check_httpd()
+        return True
+
+    def check_image_diff(self, override_step=None, logging=True):
+        """This routine is used to check whether image_diff binary exists."""
+        image_diff_path = self._path_to_image_diff()
+        if not self._filesystem.exists(image_diff_path):
+            _log.error("ImageDiff was not found at %s" % image_diff_path)
+            return False
+        return True
+
+    def check_pretty_patch(self, logging=True):
+        """Checks whether we can use the PrettyPatch ruby script."""
+        try:
+            _ = self._executive.run_command(['ruby', '--version'])
+        except OSError, e:
+            if e.errno in [errno.ENOENT, errno.EACCES, errno.ECHILD]:
+                if logging:
+                    _log.warning("Ruby is not installed; can't generate pretty patches.")
+                    _log.warning('')
+                return False
+
+        if not self._filesystem.exists(self._pretty_patch_path):
+            if logging:
+                _log.warning("Unable to find %s; can't generate pretty patches." % self._pretty_patch_path)
+                _log.warning('')
+            return False
+
+        return True
+
+    def check_wdiff(self, logging=True):
+        if not self._path_to_wdiff():
+            # Don't need to log here since this is the port choosing not to use wdiff.
+            return False
+
+        try:
+            _ = self._executive.run_command([self._path_to_wdiff(), '--help'])
+        except OSError:
+            if logging:
+                message = self._wdiff_missing_message()
+                if message:
+                    for line in message.splitlines():
+                        _log.warning('    ' + line)
+                        _log.warning('')
+            return False
+
+        return True
+
+    def _wdiff_missing_message(self):
+        return 'wdiff is not installed; please install it to generate word-by-word diffs.'
+
+    def check_httpd(self):
+        if self._uses_apache():
+            httpd_path = self._path_to_apache()
+        else:
+            httpd_path = self._path_to_lighttpd()
+
+        try:
+            server_name = self._filesystem.basename(httpd_path)
+            env = self.setup_environ_for_server(server_name)
+            if self._executive.run_command([httpd_path, "-v"], env=env, return_exit_code=True) != 0:
+                _log.error("httpd seems broken. Cannot run http tests.")
+                return False
+            return True
+        except OSError:
+            _log.error("No httpd found. Cannot run http tests.")
+            return False
+
+    def do_text_results_differ(self, expected_text, actual_text):
+        return expected_text != actual_text
+
+    def do_audio_results_differ(self, expected_audio, actual_audio):
+        return expected_audio != actual_audio
+
+    def diff_image(self, expected_contents, actual_contents, tolerance=None):
+        """Compare two images and return a tuple of an image diff, a percentage difference (0-100), and an error string.
+
+        |tolerance| should be a percentage value (0.0 - 100.0).
+        If it is omitted, the port default tolerance value is used.
+
+        If an error occurs (like ImageDiff isn't found, or crashes, we log an error and return True (for a diff).
+        """
+        if not actual_contents and not expected_contents:
+            return (None, 0, None)
+        if not actual_contents or not expected_contents:
+            return (True, 0, None)
+        if not self._image_differ:
+            self._image_differ = image_diff.ImageDiffer(self)
+        self.set_option_default('tolerance', 0.1)
+        if tolerance is None:
+            tolerance = self.get_option('tolerance')
+        return self._image_differ.diff_image(expected_contents, actual_contents, tolerance)
+
+    def diff_text(self, expected_text, actual_text, expected_filename, actual_filename):
+        """Returns a string containing the diff of the two text strings
+        in 'unified diff' format."""
+
+        # The filenames show up in the diff output, make sure they're
+        # raw bytes and not unicode, so that they don't trigger join()
+        # trying to decode the input.
+        def to_raw_bytes(string_value):
+            if isinstance(string_value, unicode):
+                return string_value.encode('utf-8')
+            return string_value
+        expected_filename = to_raw_bytes(expected_filename)
+        actual_filename = to_raw_bytes(actual_filename)
+        diff = difflib.unified_diff(expected_text.splitlines(True),
+                                    actual_text.splitlines(True),
+                                    expected_filename,
+                                    actual_filename)
+        return ''.join(diff)
+
+    def check_for_leaks(self, process_name, process_pid):
+        # Subclasses should check for leaks in the running process
+        # and print any necessary warnings if leaks are found.
+        # FIXME: We should consider moving much of this logic into
+        # Executive and make it platform-specific instead of port-specific.
+        pass
+
+    def print_leaks_summary(self):
+        # Subclasses can override this to print a summary of leaks found
+        # while running the layout tests.
+        pass
+
+    def driver_name(self):
+        if self.get_option('driver_name'):
+            return self.get_option('driver_name')
+        if self.get_option('webkit_test_runner'):
+            return 'WebKitTestRunner'
+        return 'DumpRenderTree'
+
+    def expected_baselines_by_extension(self, test_name):
+        """Returns a dict mapping baseline suffix to relative path for each baseline in
+        a test. For reftests, it returns ".==" or ".!=" instead of the suffix."""
+        # FIXME: The name similarity between this and expected_baselines() below, is unfortunate.
+        # We should probably rename them both.
+        baseline_dict = {}
+        reference_files = self.reference_files(test_name)
+        if reference_files:
+            # FIXME: How should this handle more than one type of reftest?
+            baseline_dict['.' + reference_files[0][0]] = self.relative_test_filename(reference_files[0][1])
+
+        for extension in self.baseline_extensions():
+            path = self.expected_filename(test_name, extension, return_default=False)
+            baseline_dict[extension] = self.relative_test_filename(path) if path else path
+
+        return baseline_dict
+
+    def baseline_extensions(self):
+        """Returns a tuple of all of the non-reftest baseline extensions we use. The extensions include the leading '.'."""
+        return ('.wav', '.webarchive', '.txt', '.png')
+
+    def expected_baselines(self, test_name, suffix, all_baselines=False):
+        """Given a test name, finds where the baseline results are located.
+
+        Args:
+        test_name: name of test file (usually a relative path under LayoutTests/)
+        suffix: file suffix of the expected results, including dot; e.g.
+            '.txt' or '.png'.  This should not be None, but may be an empty
+            string.
+        all_baselines: If True, return an ordered list of all baseline paths
+            for the given platform. If False, return only the first one.
+        Returns
+        a list of ( platform_dir, results_filename ), where
+            platform_dir - abs path to the top of the results tree (or test
+                tree)
+            results_filename - relative path from top of tree to the results
+                file
+            (port.join() of the two gives you the full path to the file,
+                unless None was returned.)
+        Return values will be in the format appropriate for the current
+        platform (e.g., "\\" for path separators on Windows). If the results
+        file is not found, then None will be returned for the directory,
+        but the expected relative pathname will still be returned.
+
+        This routine is generic but lives here since it is used in
+        conjunction with the other baseline and filename routines that are
+        platform specific.
+        """
+        baseline_filename = self._filesystem.splitext(test_name)[0] + '-expected' + suffix
+        baseline_search_path = self.baseline_search_path()
+
+        baselines = []
+        for platform_dir in baseline_search_path:
+            if self._filesystem.exists(self._filesystem.join(platform_dir, baseline_filename)):
+                baselines.append((platform_dir, baseline_filename))
+
+            if not all_baselines and baselines:
+                return baselines
+
+        # If it wasn't found in a platform directory, return the expected
+        # result in the test directory, even if no such file actually exists.
+        platform_dir = self.layout_tests_dir()
+        if self._filesystem.exists(self._filesystem.join(platform_dir, baseline_filename)):
+            baselines.append((platform_dir, baseline_filename))
+
+        if baselines:
+            return baselines
+
+        return [(None, baseline_filename)]
+
+    def expected_filename(self, test_name, suffix, return_default=True):
+        """Given a test name, returns an absolute path to its expected results.
+
+        If no expected results are found in any of the searched directories,
+        the directory in which the test itself is located will be returned.
+        The return value is in the format appropriate for the platform
+        (e.g., "\\" for path separators on windows).
+
+        Args:
+        test_name: name of test file (usually a relative path under LayoutTests/)
+        suffix: file suffix of the expected results, including dot; e.g. '.txt'
+            or '.png'.  This should not be None, but may be an empty string.
+        platform: the most-specific directory name to use to build the
+            search list of directories, e.g., 'chromium-win', or
+            'chromium-cg-mac-leopard' (we follow the WebKit format)
+        return_default: if True, returns the path to the generic expectation if nothing
+            else is found; if False, returns None.
+
+        This routine is generic but is implemented here to live alongside
+        the other baseline and filename manipulation routines.
+        """
+        # FIXME: The [0] here is very mysterious, as is the destructured return.
+        platform_dir, baseline_filename = self.expected_baselines(test_name, suffix)[0]
+        if platform_dir:
+            return self._filesystem.join(platform_dir, baseline_filename)
+
+        actual_test_name = self.lookup_virtual_test_base(test_name)
+        if actual_test_name:
+            return self.expected_filename(actual_test_name, suffix)
+
+        if return_default:
+            return self._filesystem.join(self.layout_tests_dir(), baseline_filename)
+        return None
+
+    def expected_checksum(self, test_name):
+        """Returns the checksum of the image we expect the test to produce, or None if it is a text-only test."""
+        png_path = self.expected_filename(test_name, '.png')
+
+        if self._filesystem.exists(png_path):
+            with self._filesystem.open_binary_file_for_reading(png_path) as filehandle:
+                return read_checksum_from_png.read_checksum(filehandle)
+
+        return None
+
+    def expected_image(self, test_name):
+        """Returns the image we expect the test to produce."""
+        baseline_path = self.expected_filename(test_name, '.png')
+        if not self._filesystem.exists(baseline_path):
+            return None
+        return self._filesystem.read_binary_file(baseline_path)
+
+    def expected_audio(self, test_name):
+        baseline_path = self.expected_filename(test_name, '.wav')
+        if not self._filesystem.exists(baseline_path):
+            return None
+        return self._filesystem.read_binary_file(baseline_path)
+
+    def expected_text(self, test_name):
+        """Returns the text output we expect the test to produce, or None
+        if we don't expect there to be any text output.
+        End-of-line characters are normalized to '\n'."""
+        # FIXME: DRT output is actually utf-8, but since we don't decode the
+        # output from DRT (instead treating it as a binary string), we read the
+        # baselines as a binary string, too.
+        baseline_path = self.expected_filename(test_name, '.txt')
+        if not self._filesystem.exists(baseline_path):
+            baseline_path = self.expected_filename(test_name, '.webarchive')
+            if not self._filesystem.exists(baseline_path):
+                return None
+        text = self._filesystem.read_binary_file(baseline_path)
+        return text.replace("\r\n", "\n")
+
+    def _get_reftest_list(self, test_name):
+        dirname = self._filesystem.join(self.layout_tests_dir(), self._filesystem.dirname(test_name))
+        if dirname not in self._reftest_list:
+            self._reftest_list[dirname] = Port._parse_reftest_list(self._filesystem, dirname)
+        return self._reftest_list[dirname]
+
+    @staticmethod
+    def _parse_reftest_list(filesystem, test_dirpath):
+        reftest_list_path = filesystem.join(test_dirpath, 'reftest.list')
+        if not filesystem.isfile(reftest_list_path):
+            return None
+        reftest_list_file = filesystem.read_text_file(reftest_list_path)
+
+        parsed_list = {}
+        for line in reftest_list_file.split('\n'):
+            line = re.sub('#.+$', '', line)
+            split_line = line.split()
+            if len(split_line) < 3:
+                continue
+            expectation_type, test_file, ref_file = split_line
+            parsed_list.setdefault(filesystem.join(test_dirpath, test_file), []).append((expectation_type, filesystem.join(test_dirpath, ref_file)))
+        return parsed_list
+
+    def reference_files(self, test_name):
+        """Return a list of expectation (== or !=) and filename pairs"""
+
+        reftest_list = self._get_reftest_list(test_name)
+        if not reftest_list:
+            reftest_list = []
+            for expectation, prefix in (('==', ''), ('!=', '-mismatch')):
+                for extention in Port._supported_file_extensions:
+                    path = self.expected_filename(test_name, prefix + extention)
+                    if self._filesystem.exists(path):
+                        reftest_list.append((expectation, path))
+            return reftest_list
+
+        return reftest_list.get(self._filesystem.join(self.layout_tests_dir(), test_name), [])
+
+    def tests(self, paths):
+        """Return the list of tests found. Both generic and platform-specific tests matching paths should be returned."""
+        expanded_paths = self._expanded_paths(paths)
+        return self._real_tests(expanded_paths).union(self._virtual_tests(expanded_paths, self.populated_virtual_test_suites()))
+
+    def _expanded_paths(self, paths):
+        expanded_paths = []
+        fs = self._filesystem
+        all_platform_dirs = [path for path in fs.glob(fs.join(self.layout_tests_dir(), 'platform', '*')) if fs.isdir(path)]
+        for path in paths:
+            expanded_paths.append(path)
+            if self.test_isdir(path) and not path.startswith('platform'):
+                for platform_dir in all_platform_dirs:
+                    if fs.isdir(fs.join(platform_dir, path)) and platform_dir in self.baseline_search_path():
+                        expanded_paths.append(self.relative_test_filename(fs.join(platform_dir, path)))
+
+        return expanded_paths
+
+    def _real_tests(self, paths):
+        # When collecting test cases, skip these directories
+        skipped_directories = set(['.svn', '_svn', 'resources', 'script-tests', 'reference', 'reftest'])
+        files = find_files.find(self._filesystem, self.layout_tests_dir(), paths, skipped_directories, Port._is_test_file)
+        return set([self.relative_test_filename(f) for f in files])
+
+    # When collecting test cases, we include any file with these extensions.
+    _supported_file_extensions = set(['.html', '.shtml', '.xml', '.xhtml', '.pl',
+                                      '.htm', '.php', '.svg', '.mht'])
+
+    @staticmethod
+    def is_reference_html_file(filesystem, dirname, filename):
+        if filename.startswith('ref-') or filename.endswith('notref-'):
+            return True
+        filename_wihout_ext, unused = filesystem.splitext(filename)
+        for suffix in ['-expected', '-expected-mismatch', '-ref', '-notref']:
+            if filename_wihout_ext.endswith(suffix):
+                return True
+        return False
+
+    @staticmethod
+    def _has_supported_extension(filesystem, filename):
+        """Return true if filename is one of the file extensions we want to run a test on."""
+        extension = filesystem.splitext(filename)[1]
+        return extension in Port._supported_file_extensions
+
+    @staticmethod
+    def _is_test_file(filesystem, dirname, filename):
+        return Port._has_supported_extension(filesystem, filename) and not Port.is_reference_html_file(filesystem, dirname, filename)
+
+    def test_dirs(self):
+        """Returns the list of top-level test directories."""
+        layout_tests_dir = self.layout_tests_dir()
+        return filter(lambda x: self._filesystem.isdir(self._filesystem.join(layout_tests_dir, x)),
+                      self._filesystem.listdir(layout_tests_dir))
+
+    @memoized
+    def test_isfile(self, test_name):
+        """Return True if the test name refers to a directory of tests."""
+        # Used by test_expectations.py to apply rules to whole directories.
+        if self._filesystem.isfile(self.abspath_for_test(test_name)):
+            return True
+        base = self.lookup_virtual_test_base(test_name)
+        return base and self._filesystem.isfile(self.abspath_for_test(base))
+
+    @memoized
+    def test_isdir(self, test_name):
+        """Return True if the test name refers to a directory of tests."""
+        # Used by test_expectations.py to apply rules to whole directories.
+        if self._filesystem.isdir(self.abspath_for_test(test_name)):
+            return True
+        base = self.lookup_virtual_test_base(test_name)
+        return base and self._filesystem.isdir(self.abspath_for_test(base))
+
+    @memoized
+    def test_exists(self, test_name):
+        """Return True if the test name refers to an existing test or baseline."""
+        # Used by test_expectations.py to determine if an entry refers to a
+        # valid test and by printing.py to determine if baselines exist.
+        return self.test_isfile(test_name) or self.test_isdir(test_name)
+
+    def split_test(self, test_name):
+        """Splits a test name into the 'directory' part and the 'basename' part."""
+        index = test_name.rfind(self.TEST_PATH_SEPARATOR)
+        if index < 1:
+            return ('', test_name)
+        return (test_name[0:index], test_name[index:])
+
+    def normalize_test_name(self, test_name):
+        """Returns a normalized version of the test name or test directory."""
+        if test_name.endswith('/'):
+            return test_name
+        if self.test_isdir(test_name):
+            return test_name + '/'
+        return test_name
+
+    def driver_cmd_line(self):
+        """Prints the DRT command line that will be used."""
+        driver = self.create_driver(0)
+        return driver.cmd_line(self.get_option('pixel_tests'), [])
+
+    def update_baseline(self, baseline_path, data):
+        """Updates the baseline for a test.
+
+        Args:
+            baseline_path: the actual path to use for baseline, not the path to
+              the test. This function is used to update either generic or
+              platform-specific baselines, but we can't infer which here.
+            data: contents of the baseline.
+        """
+        self._filesystem.write_binary_file(baseline_path, data)
+
+    @memoized
+    def layout_tests_dir(self):
+        """Return the absolute path to the top of the LayoutTests directory."""
+        return self._filesystem.normpath(self.path_from_webkit_base('LayoutTests'))
+
+    def perf_tests_dir(self):
+        """Return the absolute path to the top of the PerformanceTests directory."""
+        return self.path_from_webkit_base('PerformanceTests')
+
+    def webkit_base(self):
+        return self._filesystem.abspath(self.path_from_webkit_base('.'))
+
+    def skipped_layout_tests(self, test_list):
+        """Returns tests skipped outside of the TestExpectations files."""
+        return set(self._tests_for_other_platforms()).union(self._skipped_tests_for_unsupported_features(test_list))
+
+    def _tests_from_skipped_file_contents(self, skipped_file_contents):
+        tests_to_skip = []
+        for line in skipped_file_contents.split('\n'):
+            line = line.strip()
+            line = line.rstrip('/')  # Best to normalize directory names to not include the trailing slash.
+            if line.startswith('#') or not len(line):
+                continue
+            tests_to_skip.append(line)
+        return tests_to_skip
+
+    def _expectations_from_skipped_files(self, skipped_file_paths):
+        tests_to_skip = []
+        for search_path in skipped_file_paths:
+            filename = self._filesystem.join(self._webkit_baseline_path(search_path), "Skipped")
+            if not self._filesystem.exists(filename):
+                _log.debug("Skipped does not exist: %s" % filename)
+                continue
+            _log.debug("Using Skipped file: %s" % filename)
+            skipped_file_contents = self._filesystem.read_text_file(filename)
+            tests_to_skip.extend(self._tests_from_skipped_file_contents(skipped_file_contents))
+        return tests_to_skip
+
+    @memoized
+    def skipped_perf_tests(self):
+        return self._expectations_from_skipped_files([self.perf_tests_dir()])
+
+    def skips_perf_test(self, test_name):
+        for test_or_category in self.skipped_perf_tests():
+            if test_or_category == test_name:
+                return True
+            category = self._filesystem.join(self.perf_tests_dir(), test_or_category)
+            if self._filesystem.isdir(category) and test_name.startswith(test_or_category):
+                return True
+        return False
+
+    def is_chromium(self):
+        return False
+
+    def name(self):
+        """Returns a name that uniquely identifies this particular type of port
+        (e.g., "mac-snowleopard" or "chromium-linux-x86_x64" and can be passed
+        to factory.get() to instantiate the port."""
+        return self._name
+
+    def operating_system(self):
+        # Subclasses should override this default implementation.
+        return 'mac'
+
+    def version(self):
+        """Returns a string indicating the version of a given platform, e.g.
+        'leopard' or 'xp'.
+
+        This is used to help identify the exact port when parsing test
+        expectations, determining search paths, and logging information."""
+        return self._version
+
+    def architecture(self):
+        return self._architecture
+
+    def get_option(self, name, default_value=None):
+        return getattr(self._options, name, default_value)
+
+    def set_option_default(self, name, default_value):
+        return self._options.ensure_value(name, default_value)
+
+    def path_from_webkit_base(self, *comps):
+        """Returns the full path to path made by joining the top of the
+        WebKit source tree and the list of path components in |*comps|."""
+        return self._config.path_from_webkit_base(*comps)
+
+    @memoized
+    def path_to_test_expectations_file(self):
+        """Update the test expectations to the passed-in string.
+
+        This is used by the rebaselining tool. Raises NotImplementedError
+        if the port does not use expectations files."""
+
+        # FIXME: We need to remove this when we make rebaselining work with multiple files and just generalize expectations_files().
+
+        # test_expectations are always in mac/ not mac-leopard/ by convention, hence we use port_name instead of name().
+        port_name = self.port_name
+        if port_name.startswith('chromium'):
+            port_name = 'chromium'
+
+        return self._filesystem.join(self._webkit_baseline_path(port_name), 'TestExpectations')
+
+    def relative_test_filename(self, filename):
+        """Returns a test_name a relative unix-style path for a filename under the LayoutTests
+        directory. Ports may legitimately return abspaths here if no relpath makes sense."""
+        # Ports that run on windows need to override this method to deal with
+        # filenames with backslashes in them.
+        if filename.startswith(self.layout_tests_dir()):
+            return self.host.filesystem.relpath(filename, self.layout_tests_dir())
+        else:
+            return self.host.filesystem.abspath(filename)
+
+    def relative_perf_test_filename(self, filename):
+        if filename.startswith(self.perf_tests_dir()):
+            return self.host.filesystem.relpath(filename, self.perf_tests_dir())
+        else:
+            return self.host.filesystem.abspath(filename)
+
+    @memoized
+    def abspath_for_test(self, test_name):
+        """Returns the full path to the file for a given test name. This is the
+        inverse of relative_test_filename()."""
+        return self._filesystem.join(self.layout_tests_dir(), test_name)
+
+    def results_directory(self):
+        """Absolute path to the place to store the test results (uses --results-directory)."""
+        if not self._results_directory:
+            option_val = self.get_option('results_directory') or self.default_results_directory()
+            self._results_directory = self._filesystem.abspath(option_val)
+        return self._results_directory
+
+    def perf_results_directory(self):
+        return self._build_path()
+
+    def default_results_directory(self):
+        """Absolute path to the default place to store the test results."""
+        # Results are store relative to the built products to make it easy
+        # to have multiple copies of webkit checked out and built.
+        return self._build_path('layout-test-results')
+
+    def setup_test_run(self):
+        """Perform port-specific work at the beginning of a test run."""
+        pass
+
+    def clean_up_test_run(self):
+        """Perform port-specific work at the end of a test run."""
+        if self._image_differ:
+            self._image_differ.stop()
+            self._image_differ = None
+
+    # FIXME: os.environ access should be moved to onto a common/system class to be more easily mockable.
+    def _value_or_default_from_environ(self, name, default=None):
+        if name in os.environ:
+            return os.environ[name]
+        return default
+
+    def _copy_value_from_environ_if_set(self, clean_env, name):
+        if name in os.environ:
+            clean_env[name] = os.environ[name]
+
+    def setup_environ_for_server(self, server_name=None):
+        # We intentionally copy only a subset of os.environ when
+        # launching subprocesses to ensure consistent test results.
+        clean_env = {}
+        variables_to_copy = [
+            # For Linux:
+            'XAUTHORITY',
+            'HOME',
+            'LANG',
+            'LD_LIBRARY_PATH',
+            'DBUS_SESSION_BUS_ADDRESS',
+            'XDG_DATA_DIRS',
+
+            # Darwin:
+            'DYLD_LIBRARY_PATH',
+            'HOME',
+
+            # CYGWIN:
+            'HOMEDRIVE',
+            'HOMEPATH',
+            '_NT_SYMBOL_PATH',
+
+            # Windows:
+            'PATH',
+
+            # Most ports (?):
+            'WEBKIT_TESTFONTS',
+            'WEBKITOUTPUTDIR',
+        ]
+        for variable in variables_to_copy:
+            self._copy_value_from_environ_if_set(clean_env, variable)
+
+        # For Linux:
+        clean_env['DISPLAY'] = self._value_or_default_from_environ('DISPLAY', ':1')
+
+        for string_variable in self.get_option('additional_env_var', []):
+            [name, value] = string_variable.split('=', 1)
+            clean_env[name] = value
+
+        return clean_env
+
+    def show_results_html_file(self, results_filename):
+        """This routine should display the HTML file pointed at by
+        results_filename in a users' browser."""
+        return self.host.user.open_url(path.abspath_to_uri(self.host.platform, results_filename))
+
+    def create_driver(self, worker_number, no_timeout=False):
+        """Return a newly created Driver subclass for starting/stopping the test driver."""
+        return driver.DriverProxy(self, worker_number, self._driver_class(), pixel_tests=self.get_option('pixel_tests'), no_timeout=no_timeout)
+
+    def start_helper(self):
+        """If a port needs to reconfigure graphics settings or do other
+        things to ensure a known test configuration, it should override this
+        method."""
+        pass
+
+    def requires_http_server(self):
+        """Does the port require an HTTP server for running tests? This could
+        be the case when the tests aren't run on the host platform."""
+        return False
+
+    def start_http_server(self, additional_dirs=None, number_of_servers=None):
+        """Start a web server. Raise an error if it can't start or is already running.
+
+        Ports can stub this out if they don't need a web server to be running."""
+        assert not self._http_server, 'Already running an http server.'
+
+        if self._uses_apache():
+            server = apache_http_server.LayoutTestApacheHttpd(self, self.results_directory(), additional_dirs=additional_dirs, number_of_servers=number_of_servers)
+        else:
+            server = http_server.Lighttpd(self, self.results_directory(), additional_dirs=additional_dirs, number_of_servers=number_of_servers)
+
+        server.start()
+        self._http_server = server
+
+    def start_websocket_server(self):
+        """Start a web server. Raise an error if it can't start or is already running.
+
+        Ports can stub this out if they don't need a websocket server to be running."""
+        assert not self._websocket_server, 'Already running a websocket server.'
+
+        server = websocket_server.PyWebSocket(self, self.results_directory())
+        server.start()
+        self._websocket_server = server
+
+    def http_server_supports_ipv6(self):
+        # Cygwin is the only platform to still use Apache 1.3, which only supports IPV4.
+        # Once it moves to Apache 2, we can drop this method altogether.
+        if self.host.platform.is_cygwin():
+            return False
+        return True
+
+    def acquire_http_lock(self):
+        self._http_lock = http_lock.HttpLock(None, filesystem=self._filesystem, executive=self._executive)
+        self._http_lock.wait_for_httpd_lock()
+
+    def stop_helper(self):
+        """Shut down the test helper if it is running. Do nothing if
+        it isn't, or it isn't available. If a port overrides start_helper()
+        it must override this routine as well."""
+        pass
+
+    def stop_http_server(self):
+        """Shut down the http server if it is running. Do nothing if it isn't."""
+        if self._http_server:
+            self._http_server.stop()
+            self._http_server = None
+
+    def stop_websocket_server(self):
+        """Shut down the websocket server if it is running. Do nothing if it isn't."""
+        if self._websocket_server:
+            self._websocket_server.stop()
+            self._websocket_server = None
+
+    def release_http_lock(self):
+        if self._http_lock:
+            self._http_lock.cleanup_http_lock()
+
+    def exit_code_from_summarized_results(self, unexpected_results):
+        """Given summarized results, compute the exit code to be returned by new-run-webkit-tests.
+        Bots turn red when this function returns a non-zero value. By default, return the number of regressions
+        to avoid turning bots red by flaky failures, unexpected passes, and missing results"""
+        # Don't turn bots red for flaky failures, unexpected passes, and missing results.
+        return unexpected_results['num_regressions']
+
+    #
+    # TEST EXPECTATION-RELATED METHODS
+    #
+
+    def test_configuration(self):
+        """Returns the current TestConfiguration for the port."""
+        if not self._test_configuration:
+            self._test_configuration = TestConfiguration(self._version, self._architecture, self._options.configuration.lower())
+        return self._test_configuration
+
+    # FIXME: Belongs on a Platform object.
+    @memoized
+    def all_test_configurations(self):
+        """Returns a list of TestConfiguration instances, representing all available
+        test configurations for this port."""
+        return self._generate_all_test_configurations()
+
+    # FIXME: Belongs on a Platform object.
+    def configuration_specifier_macros(self):
+        """Ports may provide a way to abbreviate configuration specifiers to conveniently
+        refer to them as one term or alias specific values to more generic ones. For example:
+
+        (xp, vista, win7) -> win # Abbreviate all Windows versions into one namesake.
+        (lucid) -> linux  # Change specific name of the Linux distro to a more generic term.
+
+        Returns a dictionary, each key representing a macro term ('win', for example),
+        and value being a list of valid configuration specifiers (such as ['xp', 'vista', 'win7'])."""
+        return {}
+
+    def all_baseline_variants(self):
+        """Returns a list of platform names sufficient to cover all the baselines.
+
+        The list should be sorted so that a later platform  will reuse
+        an earlier platform's baselines if they are the same (e.g.,
+        'snowleopard' should precede 'leopard')."""
+        raise NotImplementedError
+
+    def uses_test_expectations_file(self):
+        # This is different from checking test_expectations() is None, because
+        # some ports have Skipped files which are returned as part of test_expectations().
+        return self._filesystem.exists(self.path_to_test_expectations_file())
+
+    def warn_if_bug_missing_in_test_expectations(self):
+        return False
+
+    def expectations_dict(self):
+        """Returns an OrderedDict of name -> expectations strings.
+        The names are expected to be (but not required to be) paths in the filesystem.
+        If the name is a path, the file can be considered updatable for things like rebaselining,
+        so don't use names that are paths if they're not paths.
+        Generally speaking the ordering should be files in the filesystem in cascade order
+        (TestExpectations followed by Skipped, if the port honors both formats),
+        then any built-in expectations (e.g., from compile-time exclusions), then --additional-expectations options."""
+        # FIXME: rename this to test_expectations() once all the callers are updated to know about the ordered dict.
+        expectations = OrderedDict()
+
+        for path in self.expectations_files():
+            if self._filesystem.exists(path):
+                expectations[path] = self._filesystem.read_text_file(path)
+
+        for path in self.get_option('additional_expectations', []):
+            expanded_path = self._filesystem.expanduser(path)
+            if self._filesystem.exists(expanded_path):
+                _log.debug("reading additional_expectations from path '%s'" % path)
+                expectations[path] = self._filesystem.read_text_file(expanded_path)
+            else:
+                _log.warning("additional_expectations path '%s' does not exist" % path)
+        return expectations
+
+    def expectations_files(self):
+        # Unlike baseline_search_path, we only want to search [WK2-PORT, PORT-VERSION, PORT] and any directories
+        # included via --additional-platform-directory, not the full casade.
+        search_paths = [self.port_name]
+        if self.name() != self.port_name:
+            search_paths.append(self.name())
+
+        if self.get_option('webkit_test_runner'):
+            # Because nearly all of the skipped tests for WebKit 2 are due to cross-platform
+            # issues, all wk2 ports share a skipped list under platform/wk2.
+            search_paths.extend([self._wk2_port_name(), "wk2"])
+
+        search_paths.extend(self.get_option("additional_platform_directory", []))
+
+        return [self._filesystem.join(self._webkit_baseline_path(d), 'TestExpectations') for d in search_paths]
+
+    def repository_paths(self):
+        """Returns a list of (repository_name, repository_path) tuples of its depending code base.
+        By default it returns a list that only contains a ('webkit', <webkitRepossitoryPath>) tuple."""
+
+        # We use LayoutTest directory here because webkit_base isn't a part webkit repository in Chromium port
+        # where turnk isn't checked out as a whole.
+        return [('webkit', self.layout_tests_dir())]
+
+    _WDIFF_DEL = '##WDIFF_DEL##'
+    _WDIFF_ADD = '##WDIFF_ADD##'
+    _WDIFF_END = '##WDIFF_END##'
+
+    def _format_wdiff_output_as_html(self, wdiff):
+        wdiff = cgi.escape(wdiff)
+        wdiff = wdiff.replace(self._WDIFF_DEL, "<span class=del>")
+        wdiff = wdiff.replace(self._WDIFF_ADD, "<span class=add>")
+        wdiff = wdiff.replace(self._WDIFF_END, "</span>")
+        html = "<head><style>.del { background: #faa; } "
+        html += ".add { background: #afa; }</style></head>"
+        html += "<pre>%s</pre>" % wdiff
+        return html
+
+    def _wdiff_command(self, actual_filename, expected_filename):
+        executable = self._path_to_wdiff()
+        return [executable,
+                "--start-delete=%s" % self._WDIFF_DEL,
+                "--end-delete=%s" % self._WDIFF_END,
+                "--start-insert=%s" % self._WDIFF_ADD,
+                "--end-insert=%s" % self._WDIFF_END,
+                actual_filename,
+                expected_filename]
+
+    @staticmethod
+    def _handle_wdiff_error(script_error):
+        # Exit 1 means the files differed, any other exit code is an error.
+        if script_error.exit_code != 1:
+            raise script_error
+
+    def _run_wdiff(self, actual_filename, expected_filename):
+        """Runs wdiff and may throw exceptions.
+        This is mostly a hook for unit testing."""
+        # Diffs are treated as binary as they may include multiple files
+        # with conflicting encodings.  Thus we do not decode the output.
+        command = self._wdiff_command(actual_filename, expected_filename)
+        wdiff = self._executive.run_command(command, decode_output=False,
+            error_handler=self._handle_wdiff_error)
+        return self._format_wdiff_output_as_html(wdiff)
+
+    def wdiff_text(self, actual_filename, expected_filename):
+        """Returns a string of HTML indicating the word-level diff of the
+        contents of the two filenames. Returns an empty string if word-level
+        diffing isn't available."""
+        if not self.wdiff_available():
+            return ""
+        try:
+            # It's possible to raise a ScriptError we pass wdiff invalid paths.
+            return self._run_wdiff(actual_filename, expected_filename)
+        except OSError, e:
+            if e.errno in [errno.ENOENT, errno.EACCES, errno.ECHILD]:
+                # Silently ignore cases where wdiff is missing.
+                self._wdiff_available = False
+                return ""
+            raise
+
+    # This is a class variable so we can test error output easily.
+    _pretty_patch_error_html = "Failed to run PrettyPatch, see error log."
+
+    def pretty_patch_text(self, diff_path):
+        if self._pretty_patch_available is None:
+            self._pretty_patch_available = self.check_pretty_patch(logging=False)
+        if not self._pretty_patch_available:
+            return self._pretty_patch_error_html
+        command = ("ruby", "-I", self._filesystem.dirname(self._pretty_patch_path),
+                   self._pretty_patch_path, diff_path)
+        try:
+            # Diffs are treated as binary (we pass decode_output=False) as they
+            # may contain multiple files of conflicting encodings.
+            return self._executive.run_command(command, decode_output=False)
+        except OSError, e:
+            # If the system is missing ruby log the error and stop trying.
+            self._pretty_patch_available = False
+            _log.error("Failed to run PrettyPatch (%s): %s" % (command, e))
+            return self._pretty_patch_error_html
+        except ScriptError, e:
+            # If ruby failed to run for some reason, log the command
+            # output and stop trying.
+            self._pretty_patch_available = False
+            _log.error("Failed to run PrettyPatch (%s):\n%s" % (command, e.message_with_output()))
+            return self._pretty_patch_error_html
+
+    def default_configuration(self):
+        return self._config.default_configuration()
+
+    #
+    # PROTECTED ROUTINES
+    #
+    # The routines below should only be called by routines in this class
+    # or any of its subclasses.
+    #
+
+    def _uses_apache(self):
+        return True
+
+    # FIXME: This does not belong on the port object.
+    @memoized
+    def _path_to_apache(self):
+        """Returns the full path to the apache binary.
+
+        This is needed only by ports that use the apache_http_server module."""
+        # The Apache binary path can vary depending on OS and distribution
+        # See http://wiki.apache.org/httpd/DistrosDefaultLayout
+        for path in ["/usr/sbin/httpd", "/usr/sbin/apache2"]:
+            if self._filesystem.exists(path):
+                return path
+        _log.error("Could not find apache. Not installed or unknown path.")
+        return None
+
+    # FIXME: This belongs on some platform abstraction instead of Port.
+    def _is_redhat_based(self):
+        return self._filesystem.exists('/etc/redhat-release')
+
+    def _is_debian_based(self):
+        return self._filesystem.exists('/etc/debian_version')
+
+    # We pass sys_platform into this method to make it easy to unit test.
+    def _apache_config_file_name_for_platform(self, sys_platform):
+        if sys_platform == 'cygwin':
+            return 'cygwin-httpd.conf'  # CYGWIN is the only platform to still use Apache 1.3.
+        if sys_platform.startswith('linux'):
+            if self._is_redhat_based():
+                return 'fedora-httpd.conf'  # This is an Apache 2.x config file despite the naming.
+            if self._is_debian_based():
+                return 'apache2-debian-httpd.conf'
+        # All platforms use apache2 except for CYGWIN (and Mac OS X Tiger and prior, which we no longer support).
+        return "apache2-httpd.conf"
+
+    def _path_to_apache_config_file(self):
+        """Returns the full path to the apache configuration file.
+
+        If the WEBKIT_HTTP_SERVER_CONF_PATH environment variable is set, its
+        contents will be used instead.
+
+        This is needed only by ports that use the apache_http_server module."""
+        config_file_from_env = os.environ.get('WEBKIT_HTTP_SERVER_CONF_PATH')
+        if config_file_from_env:
+            if not self._filesystem.exists(config_file_from_env):
+                raise IOError('%s was not found on the system' % config_file_from_env)
+            return config_file_from_env
+
+        config_file_name = self._apache_config_file_name_for_platform(sys.platform)
+        return self._filesystem.join(self.layout_tests_dir(), 'http', 'conf', config_file_name)
+
+    def _build_path(self, *comps):
+        root_directory = self.get_option('root')
+        if not root_directory:
+            build_directory = self.get_option('build_directory')
+            if build_directory:
+                root_directory = self._filesystem.join(build_directory, self.get_option('configuration'))
+            else:
+                root_directory = self._config.build_directory(self.get_option('configuration'))
+            # Set --root so that we can pass this to subprocesses and avoid making the
+            # slow call to config.build_directory() N times in each worker.
+            # FIXME: This is like @memoized, but more annoying and fragile; there should be another
+            # way to propagate values without mutating the options list.
+            self.set_option_default('root', root_directory)
+        return self._filesystem.join(self._filesystem.abspath(root_directory), *comps)
+
+    def _path_to_driver(self, configuration=None):
+        """Returns the full path to the test driver (DumpRenderTree)."""
+        return self._build_path(self.driver_name())
+
+    def _path_to_webcore_library(self):
+        """Returns the full path to a built copy of WebCore."""
+        return None
+
+    def _path_to_helper(self):
+        """Returns the full path to the layout_test_helper binary, which
+        is used to help configure the system for the test run, or None
+        if no helper is needed.
+
+        This is likely only used by start/stop_helper()."""
+        return None
+
+    def _path_to_image_diff(self):
+        """Returns the full path to the image_diff binary, or None if it is not available.
+
+        This is likely used only by diff_image()"""
+        return self._build_path('ImageDiff')
+
+    def _path_to_lighttpd(self):
+        """Returns the path to the LigHTTPd binary.
+
+        This is needed only by ports that use the http_server.py module."""
+        raise NotImplementedError('Port._path_to_lighttpd')
+
+    def _path_to_lighttpd_modules(self):
+        """Returns the path to the LigHTTPd modules directory.
+
+        This is needed only by ports that use the http_server.py module."""
+        raise NotImplementedError('Port._path_to_lighttpd_modules')
+
+    def _path_to_lighttpd_php(self):
+        """Returns the path to the LigHTTPd PHP executable.
+
+        This is needed only by ports that use the http_server.py module."""
+        raise NotImplementedError('Port._path_to_lighttpd_php')
+
+    @memoized
+    def _path_to_wdiff(self):
+        """Returns the full path to the wdiff binary, or None if it is not available.
+
+        This is likely used only by wdiff_text()"""
+        for path in ("/usr/bin/wdiff", "/usr/bin/dwdiff"):
+            if self._filesystem.exists(path):
+                return path
+        return None
+
+    def _webkit_baseline_path(self, platform):
+        """Return the  full path to the top of the baseline tree for a
+        given platform."""
+        return self._filesystem.join(self.layout_tests_dir(), 'platform', platform)
+
+    # FIXME: Belongs on a Platform object.
+    def _generate_all_test_configurations(self):
+        """Generates a list of TestConfiguration instances, representing configurations
+        for a platform across all OSes, architectures, build and graphics types."""
+        raise NotImplementedError('Port._generate_test_configurations')
+
+    def _driver_class(self):
+        """Returns the port's driver implementation."""
+        return driver.Driver
+
+    def _get_crash_log(self, name, pid, stdout, stderr, newer_than):
+        name_str = name or '<unknown process name>'
+        pid_str = str(pid or '<unknown>')
+        stdout_lines = (stdout or '<empty>').decode('utf8', 'replace').splitlines()
+        stderr_lines = (stderr or '<empty>').decode('utf8', 'replace').splitlines()
+        return (stderr, 'crash log for %s (pid %s):\n%s\n%s\n' % (name_str, pid_str,
+            '\n'.join(('STDOUT: ' + l) for l in stdout_lines),
+            '\n'.join(('STDERR: ' + l) for l in stderr_lines)))
+
+    def look_for_new_crash_logs(self, crashed_processes, start_time):
+        pass
+
+    def sample_process(self, name, pid):
+        pass
+
+    def virtual_test_suites(self):
+        return []
+
+    @memoized
+    def populated_virtual_test_suites(self):
+        suites = self.virtual_test_suites()
+
+        # Sanity-check the suites to make sure they don't point to other suites.
+        suite_dirs = [suite.name for suite in suites]
+        for suite in suites:
+            assert suite.base not in suite_dirs
+
+        for suite in suites:
+            base_tests = self._real_tests([suite.base])
+            suite.tests = {}
+            for test in base_tests:
+                suite.tests[test.replace(suite.base, suite.name, 1)] = test
+        return suites
+
+    def _virtual_tests(self, paths, suites):
+        virtual_tests = set()
+        for suite in suites:
+            if paths:
+                for test in suite.tests:
+                    if any(test.startswith(p) for p in paths):
+                        virtual_tests.add(test)
+            else:
+                virtual_tests.update(set(suite.tests.keys()))
+        return virtual_tests
+
+    def lookup_virtual_test_base(self, test_name):
+        for suite in self.populated_virtual_test_suites():
+            if test_name.startswith(suite.name):
+                return test_name.replace(suite.name, suite.base, 1)
+        return None
+
+    def lookup_virtual_test_args(self, test_name):
+        for suite in self.populated_virtual_test_suites():
+            if test_name.startswith(suite.name):
+                return suite.args
+        return []
+
+    def should_run_as_pixel_test(self, test_input):
+        if not self._options.pixel_tests:
+            return False
+        if self._options.pixel_test_directories:
+            return any(test_input.test_name.startswith(directory) for directory in self._options.pixel_test_directories)
+        return self._should_run_as_pixel_test(test_input)
+
+    def _should_run_as_pixel_test(self, test_input):
+        # Default behavior is to allow all test to run as pixel tests if --pixel-tests is on and
+        # --pixel-test-directory is not specified.
+        return True
+
+    # FIXME: Eventually we should standarize port naming, and make this method smart enough
+    # to use for all port configurations (including architectures, graphics types, etc).
+    def _port_flag_for_scripts(self):
+        # This is overrriden by ports which need a flag passed to scripts to distinguish the use of that port.
+        # For example --qt on linux, since a user might have both Gtk and Qt libraries installed.
+        # FIXME: Chromium should override this once ChromiumPort is a WebKitPort.
+        return None
+
+    # This is modeled after webkitdirs.pm argumentsForConfiguration() from old-run-webkit-tests
+    def _arguments_for_configuration(self):
+        config_args = []
+        config_args.append(self._config.flag_for_configuration(self.get_option('configuration')))
+        # FIXME: We may need to add support for passing --32-bit like old-run-webkit-tests had.
+        port_flag = self._port_flag_for_scripts()
+        if port_flag:
+            config_args.append(port_flag)
+        return config_args
+
+    def _run_script(self, script_name, args=None, include_configuration_arguments=True, decode_output=True, env=None):
+        run_script_command = [self._config.script_path(script_name)]
+        if include_configuration_arguments:
+            run_script_command.extend(self._arguments_for_configuration())
+        if args:
+            run_script_command.extend(args)
+        output = self._executive.run_command(run_script_command, cwd=self._config.webkit_base_dir(), decode_output=decode_output, env=env)
+        _log.debug('Output of %s:\n%s' % (run_script_command, output))
+        return output
+
+    def _build_driver(self):
+        environment = self.host.copy_current_environment()
+        environment.disable_gcc_smartquotes()
+        env = environment.to_dictionary()
+
+        # FIXME: We build both DumpRenderTree and WebKitTestRunner for
+        # WebKitTestRunner runs because DumpRenderTree still includes
+        # the DumpRenderTreeSupport module and the TestNetscapePlugin.
+        # These two projects should be factored out into their own
+        # projects.
+        try:
+            self._run_script("build-dumprendertree", args=self._build_driver_flags(), env=env)
+            if self.get_option('webkit_test_runner'):
+                self._run_script("build-webkittestrunner", args=self._build_driver_flags(), env=env)
+        except ScriptError, e:
+            _log.error(e.message_with_output(output_limit=None))
+            return False
+        return True
+
+    def _build_driver_flags(self):
+        return []
+
+    def _tests_for_other_platforms(self):
+        # By default we will skip any directory under LayoutTests/platform
+        # that isn't in our baseline search path (this mirrors what
+        # old-run-webkit-tests does in findTestsToRun()).
+        # Note this returns LayoutTests/platform/*, not platform/*/*.
+        entries = self._filesystem.glob(self._webkit_baseline_path('*'))
+        dirs_to_skip = []
+        for entry in entries:
+            if self._filesystem.isdir(entry) and entry not in self.baseline_search_path():
+                basename = self._filesystem.basename(entry)
+                dirs_to_skip.append('platform/%s' % basename)
+        return dirs_to_skip
+
+    def _runtime_feature_list(self):
+        """If a port makes certain features available only through runtime flags, it can override this routine to indicate which ones are available."""
+        return None
+
+    def nm_command(self):
+        return 'nm'
+
+    def _modules_to_search_for_symbols(self):
+        path = self._path_to_webcore_library()
+        if path:
+            return [path]
+        return []
+
+    def _symbols_string(self):
+        symbols = ''
+        for path_to_module in self._modules_to_search_for_symbols():
+            try:
+                symbols += self._executive.run_command([self.nm_command(), path_to_module], error_handler=self._executive.ignore_error)
+            except OSError, e:
+                _log.warn("Failed to run nm: %s.  Can't determine supported features correctly." % e)
+        return symbols
+
+    # Ports which use run-time feature detection should define this method and return
+    # a dictionary mapping from Feature Names to skipped directoires.  NRWT will
+    # run DumpRenderTree --print-supported-features and parse the output.
+    # If the Feature Names are not found in the output, the corresponding directories
+    # will be skipped.
+    def _missing_feature_to_skipped_tests(self):
+        """Return the supported feature dictionary. Keys are feature names and values
+        are the lists of directories to skip if the feature name is not matched."""
+        # FIXME: This list matches WebKitWin and should be moved onto the Win port.
+        return {
+            "Accelerated Compositing": ["compositing"],
+            "3D Rendering": ["animations/3d", "transforms/3d"],
+        }
+
+    # Ports which use compile-time feature detection should define this method and return
+    # a dictionary mapping from symbol substrings to possibly disabled test directories.
+    # When the symbol substrings are not matched, the directories will be skipped.
+    # If ports don't ever enable certain features, then those directories can just be
+    # in the Skipped list instead of compile-time-checked here.
+    def _missing_symbol_to_skipped_tests(self):
+        """Return the supported feature dictionary. The keys are symbol-substrings
+        and the values are the lists of directories to skip if that symbol is missing."""
+        return {
+            "MathMLElement": ["mathml"],
+            "GraphicsLayer": ["compositing"],
+            "WebCoreHas3DRendering": ["animations/3d", "transforms/3d"],
+            "WebGLShader": ["fast/canvas/webgl", "compositing/webgl", "http/tests/canvas/webgl"],
+            "MHTMLArchive": ["mhtml"],
+            "CSSVariableValue": ["fast/css/variables", "inspector/styles/variables"],
+        }
+
+    def _has_test_in_directories(self, directory_lists, test_list):
+        if not test_list:
+            return False
+
+        directories = itertools.chain.from_iterable(directory_lists)
+        for directory, test in itertools.product(directories, test_list):
+            if test.startswith(directory):
+                return True
+        return False
+
+    def _skipped_tests_for_unsupported_features(self, test_list):
+        # Only check the runtime feature list of there are tests in the test_list that might get skipped.
+        # This is a performance optimization to avoid the subprocess call to DRT.
+        # If the port supports runtime feature detection, disable any tests
+        # for features missing from the runtime feature list.
+        # If _runtime_feature_list returns a non-None value, then prefer
+        # runtime feature detection over static feature detection.
+        if self._has_test_in_directories(self._missing_feature_to_skipped_tests().values(), test_list):
+            supported_feature_list = self._runtime_feature_list()
+            if supported_feature_list is not None:
+                return reduce(operator.add, [directories for feature, directories in self._missing_feature_to_skipped_tests().items() if feature not in supported_feature_list])
+
+        # Only check the symbols of there are tests in the test_list that might get skipped.
+        # This is a performance optimization to avoid the calling nm.
+        # Runtime feature detection not supported, fallback to static dectection:
+        # Disable any tests for symbols missing from the executable or libraries.
+        if self._has_test_in_directories(self._missing_symbol_to_skipped_tests().values(), test_list):
+            symbols_string = self._symbols_string()
+            if symbols_string is not None:
+                return reduce(operator.add, [directories for symbol_substring, directories in self._missing_symbol_to_skipped_tests().items() if symbol_substring not in symbols_string], [])
+
+        return []
+
+    def _wk2_port_name(self):
+        # By current convention, the WebKit2 name is always mac-wk2, win-wk2, not mac-leopard-wk2, etc,
+        # except for Qt because WebKit2 is only supported by Qt 5.0 (therefore: qt-5.0-wk2).
+        return "%s-wk2" % self.port_name
+
+
+class VirtualTestSuite(object):
+    def __init__(self, name, base, args, tests=None):
+        self.name = name
+        self.base = base
+        self.args = args
+        self.tests = tests or set()
+
+    def __repr__(self):
+        return "VirtualTestSuite('%s', '%s', %s)" % (self.name, self.base, self.args)
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/base_unittest.py b/Tools/Scripts/webkitpy/layout_tests/port/base_unittest.py
new file mode 100644
index 0000000..1fe75cc
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/port/base_unittest.py
@@ -0,0 +1,467 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#    * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import logging
+import optparse
+import sys
+import tempfile
+import unittest
+
+from webkitpy.common.system.executive import Executive, ScriptError
+from webkitpy.common.system import executive_mock
+from webkitpy.common.system.filesystem_mock import MockFileSystem
+from webkitpy.common.system.outputcapture import OutputCapture
+from webkitpy.common.system.path import abspath_to_uri
+from webkitpy.thirdparty.mock import Mock
+from webkitpy.tool.mocktool import MockOptions
+from webkitpy.common.system.executive_mock import MockExecutive, MockExecutive2
+from webkitpy.common.system.systemhost_mock import MockSystemHost
+
+from webkitpy.layout_tests.port import Port, Driver, DriverOutput
+from webkitpy.layout_tests.port.test import add_unit_tests_to_mock_filesystem, TestPort
+
+import config
+import config_mock
+
+class PortTest(unittest.TestCase):
+    def make_port(self, executive=None, with_tests=False, **kwargs):
+        host = MockSystemHost()
+        if executive:
+            host.executive = executive
+        if with_tests:
+            add_unit_tests_to_mock_filesystem(host.filesystem)
+            return TestPort(host, **kwargs)
+        return Port(host, **kwargs)
+
+    def test_default_child_processes(self):
+        port = self.make_port()
+        self.assertNotEquals(port.default_child_processes(), None)
+
+    def test_format_wdiff_output_as_html(self):
+        output = "OUTPUT %s %s %s" % (Port._WDIFF_DEL, Port._WDIFF_ADD, Port._WDIFF_END)
+        html = self.make_port()._format_wdiff_output_as_html(output)
+        expected_html = "<head><style>.del { background: #faa; } .add { background: #afa; }</style></head><pre>OUTPUT <span class=del> <span class=add> </span></pre>"
+        self.assertEqual(html, expected_html)
+
+    def test_wdiff_command(self):
+        port = self.make_port()
+        port._path_to_wdiff = lambda: "/path/to/wdiff"
+        command = port._wdiff_command("/actual/path", "/expected/path")
+        expected_command = [
+            "/path/to/wdiff",
+            "--start-delete=##WDIFF_DEL##",
+            "--end-delete=##WDIFF_END##",
+            "--start-insert=##WDIFF_ADD##",
+            "--end-insert=##WDIFF_END##",
+            "/actual/path",
+            "/expected/path",
+        ]
+        self.assertEqual(command, expected_command)
+
+    def _file_with_contents(self, contents, encoding="utf-8"):
+        new_file = tempfile.NamedTemporaryFile()
+        new_file.write(contents.encode(encoding))
+        new_file.flush()
+        return new_file
+
+    def test_pretty_patch_os_error(self):
+        port = self.make_port(executive=executive_mock.MockExecutive2(exception=OSError))
+        oc = OutputCapture()
+        oc.capture_output()
+        self.assertEqual(port.pretty_patch_text("patch.txt"),
+                         port._pretty_patch_error_html)
+
+        # This tests repeated calls to make sure we cache the result.
+        self.assertEqual(port.pretty_patch_text("patch.txt"),
+                         port._pretty_patch_error_html)
+        oc.restore_output()
+
+    def test_pretty_patch_script_error(self):
+        # FIXME: This is some ugly white-box test hacking ...
+        port = self.make_port(executive=executive_mock.MockExecutive2(exception=ScriptError))
+        port._pretty_patch_available = True
+        self.assertEqual(port.pretty_patch_text("patch.txt"),
+                         port._pretty_patch_error_html)
+
+        # This tests repeated calls to make sure we cache the result.
+        self.assertEqual(port.pretty_patch_text("patch.txt"),
+                         port._pretty_patch_error_html)
+
+    def integration_test_run_wdiff(self):
+        executive = Executive()
+        # This may fail on some systems.  We could ask the port
+        # object for the wdiff path, but since we don't know what
+        # port object to use, this is sufficient for now.
+        try:
+            wdiff_path = executive.run_command(["which", "wdiff"]).rstrip()
+        except Exception, e:
+            wdiff_path = None
+
+        port = self.make_port(executive=executive)
+        port._path_to_wdiff = lambda: wdiff_path
+
+        if wdiff_path:
+            # "with tempfile.NamedTemporaryFile() as actual" does not seem to work in Python 2.5
+            actual = self._file_with_contents(u"foo")
+            expected = self._file_with_contents(u"bar")
+            wdiff = port._run_wdiff(actual.name, expected.name)
+            expected_wdiff = "<head><style>.del { background: #faa; } .add { background: #afa; }</style></head><pre><span class=del>foo</span><span class=add>bar</span></pre>"
+            self.assertEqual(wdiff, expected_wdiff)
+            # Running the full wdiff_text method should give the same result.
+            port._wdiff_available = True  # In case it's somehow already disabled.
+            wdiff = port.wdiff_text(actual.name, expected.name)
+            self.assertEqual(wdiff, expected_wdiff)
+            # wdiff should still be available after running wdiff_text with a valid diff.
+            self.assertTrue(port._wdiff_available)
+            actual.close()
+            expected.close()
+
+            # Bogus paths should raise a script error.
+            self.assertRaises(ScriptError, port._run_wdiff, "/does/not/exist", "/does/not/exist2")
+            self.assertRaises(ScriptError, port.wdiff_text, "/does/not/exist", "/does/not/exist2")
+            # wdiff will still be available after running wdiff_text with invalid paths.
+            self.assertTrue(port._wdiff_available)
+
+        # If wdiff does not exist _run_wdiff should throw an OSError.
+        port._path_to_wdiff = lambda: "/invalid/path/to/wdiff"
+        self.assertRaises(OSError, port._run_wdiff, "foo", "bar")
+
+        # wdiff_text should not throw an error if wdiff does not exist.
+        self.assertEqual(port.wdiff_text("foo", "bar"), "")
+        # However wdiff should not be available after running wdiff_text if wdiff is missing.
+        self.assertFalse(port._wdiff_available)
+
+    def test_wdiff_text(self):
+        port = self.make_port()
+        port.wdiff_available = lambda: True
+        port._run_wdiff = lambda a, b: 'PASS'
+        self.assertEqual('PASS', port.wdiff_text(None, None))
+
+    def test_diff_text(self):
+        port = self.make_port()
+        # Make sure that we don't run into decoding exceptions when the
+        # filenames are unicode, with regular or malformed input (expected or
+        # actual input is always raw bytes, not unicode).
+        port.diff_text('exp', 'act', 'exp.txt', 'act.txt')
+        port.diff_text('exp', 'act', u'exp.txt', 'act.txt')
+        port.diff_text('exp', 'act', u'a\xac\u1234\u20ac\U00008000', 'act.txt')
+
+        port.diff_text('exp' + chr(255), 'act', 'exp.txt', 'act.txt')
+        port.diff_text('exp' + chr(255), 'act', u'exp.txt', 'act.txt')
+
+        # Though expected and actual files should always be read in with no
+        # encoding (and be stored as str objects), test unicode inputs just to
+        # be safe.
+        port.diff_text(u'exp', 'act', 'exp.txt', 'act.txt')
+        port.diff_text(
+            u'a\xac\u1234\u20ac\U00008000', 'act', 'exp.txt', 'act.txt')
+
+        # And make sure we actually get diff output.
+        diff = port.diff_text('foo', 'bar', 'exp.txt', 'act.txt')
+        self.assertTrue('foo' in diff)
+        self.assertTrue('bar' in diff)
+        self.assertTrue('exp.txt' in diff)
+        self.assertTrue('act.txt' in diff)
+        self.assertFalse('nosuchthing' in diff)
+
+    def test_default_configuration_notfound(self):
+        # Test that we delegate to the config object properly.
+        port = self.make_port(config=config_mock.MockConfig(default_configuration='default'))
+        self.assertEqual(port.default_configuration(), 'default')
+
+    def test_setup_test_run(self):
+        port = self.make_port()
+        # This routine is a no-op. We just test it for coverage.
+        port.setup_test_run()
+
+    def test_test_dirs(self):
+        port = self.make_port()
+        port.host.filesystem.write_text_file(port.layout_tests_dir() + '/canvas/test', '')
+        port.host.filesystem.write_text_file(port.layout_tests_dir() + '/css2.1/test', '')
+        dirs = port.test_dirs()
+        self.assertTrue('canvas' in dirs)
+        self.assertTrue('css2.1' in dirs)
+
+    def test_skipped_perf_tests(self):
+        port = self.make_port()
+
+        def add_text_file(dirname, filename, content='some content'):
+            dirname = port.host.filesystem.join(port.perf_tests_dir(), dirname)
+            port.host.filesystem.maybe_make_directory(dirname)
+            port.host.filesystem.write_text_file(port.host.filesystem.join(dirname, filename), content)
+
+        add_text_file('inspector', 'test1.html')
+        add_text_file('inspector', 'unsupported_test1.html')
+        add_text_file('inspector', 'test2.html')
+        add_text_file('inspector/resources', 'resource_file.html')
+        add_text_file('unsupported', 'unsupported_test2.html')
+        add_text_file('', 'Skipped', '\n'.join(['Layout', '', 'SunSpider', 'Supported/some-test.html']))
+        self.assertEqual(port.skipped_perf_tests(), ['Layout', 'SunSpider', 'Supported/some-test.html'])
+
+    def test_get_option__set(self):
+        options, args = optparse.OptionParser().parse_args([])
+        options.foo = 'bar'
+        port = self.make_port(options=options)
+        self.assertEqual(port.get_option('foo'), 'bar')
+
+    def test_get_option__unset(self):
+        port = self.make_port()
+        self.assertEqual(port.get_option('foo'), None)
+
+    def test_get_option__default(self):
+        port = self.make_port()
+        self.assertEqual(port.get_option('foo', 'bar'), 'bar')
+
+    def test_additional_platform_directory(self):
+        port = self.make_port(port_name='foo')
+        port.default_baseline_search_path = lambda: ['LayoutTests/platform/foo']
+        layout_test_dir = port.layout_tests_dir()
+        test_file = 'fast/test.html'
+
+        # No additional platform directory
+        self.assertEqual(
+            port.expected_baselines(test_file, '.txt'),
+            [(None, 'fast/test-expected.txt')])
+        self.assertEqual(port.baseline_path(), 'LayoutTests/platform/foo')
+
+        # Simple additional platform directory
+        port._options.additional_platform_directory = ['/tmp/local-baselines']
+        port._filesystem.write_text_file('/tmp/local-baselines/fast/test-expected.txt', 'foo')
+        self.assertEqual(
+            port.expected_baselines(test_file, '.txt'),
+            [('/tmp/local-baselines', 'fast/test-expected.txt')])
+        self.assertEqual(port.baseline_path(), '/tmp/local-baselines')
+
+        # Multiple additional platform directories
+        port._options.additional_platform_directory = ['/foo', '/tmp/local-baselines']
+        self.assertEqual(
+            port.expected_baselines(test_file, '.txt'),
+            [('/tmp/local-baselines', 'fast/test-expected.txt')])
+        self.assertEqual(port.baseline_path(), '/foo')
+
+    def test_nonexistant_expectations(self):
+        port = self.make_port(port_name='foo')
+        port.expectations_files = lambda: ['/mock-checkout/LayoutTests/platform/exists/TestExpectations', '/mock-checkout/LayoutTests/platform/nonexistant/TestExpectations']
+        port._filesystem.write_text_file('/mock-checkout/LayoutTests/platform/exists/TestExpectations', '')
+        self.assertEquals('\n'.join(port.expectations_dict().keys()), '/mock-checkout/LayoutTests/platform/exists/TestExpectations')
+
+    def test_additional_expectations(self):
+        port = self.make_port(port_name='foo')
+        port.port_name = 'foo'
+        port._filesystem.write_text_file('/mock-checkout/LayoutTests/platform/foo/TestExpectations', '')
+        port._filesystem.write_text_file(
+            '/tmp/additional-expectations-1.txt', 'content1\n')
+        port._filesystem.write_text_file(
+            '/tmp/additional-expectations-2.txt', 'content2\n')
+
+        self.assertEquals('\n'.join(port.expectations_dict().values()), '')
+
+        port._options.additional_expectations = [
+            '/tmp/additional-expectations-1.txt']
+        self.assertEquals('\n'.join(port.expectations_dict().values()), '\ncontent1\n')
+
+        port._options.additional_expectations = [
+            '/tmp/nonexistent-file', '/tmp/additional-expectations-1.txt']
+        self.assertEquals('\n'.join(port.expectations_dict().values()), '\ncontent1\n')
+
+        port._options.additional_expectations = [
+            '/tmp/additional-expectations-1.txt', '/tmp/additional-expectations-2.txt']
+        self.assertEquals('\n'.join(port.expectations_dict().values()), '\ncontent1\n\ncontent2\n')
+
+    def test_additional_env_var(self):
+        port = self.make_port(options=optparse.Values({'additional_env_var': ['FOO=BAR', 'BAR=FOO']}))
+        self.assertEqual(port.get_option('additional_env_var'), ['FOO=BAR', 'BAR=FOO'])
+        environment = port.setup_environ_for_server()
+        self.assertTrue(('FOO' in environment) & ('BAR' in environment))
+        self.assertEqual(environment['FOO'], 'BAR')
+        self.assertEqual(environment['BAR'], 'FOO')
+
+    def test_uses_test_expectations_file(self):
+        port = self.make_port(port_name='foo')
+        port.port_name = 'foo'
+        port.path_to_test_expectations_file = lambda: '/mock-results/TestExpectations'
+        self.assertFalse(port.uses_test_expectations_file())
+        port._filesystem = MockFileSystem({'/mock-results/TestExpectations': ''})
+        self.assertTrue(port.uses_test_expectations_file())
+
+    def test_find_no_paths_specified(self):
+        port = self.make_port(with_tests=True)
+        layout_tests_dir = port.layout_tests_dir()
+        tests = port.tests([])
+        self.assertNotEqual(len(tests), 0)
+
+    def test_find_one_test(self):
+        port = self.make_port(with_tests=True)
+        tests = port.tests(['failures/expected/image.html'])
+        self.assertEqual(len(tests), 1)
+
+    def test_find_glob(self):
+        port = self.make_port(with_tests=True)
+        tests = port.tests(['failures/expected/im*'])
+        self.assertEqual(len(tests), 2)
+
+    def test_find_with_skipped_directories(self):
+        port = self.make_port(with_tests=True)
+        tests = port.tests(['userscripts'])
+        self.assertTrue('userscripts/resources/iframe.html' not in tests)
+
+    def test_find_with_skipped_directories_2(self):
+        port = self.make_port(with_tests=True)
+        tests = port.tests(['userscripts/resources'])
+        self.assertEqual(tests, set([]))
+
+    def test_is_test_file(self):
+        filesystem = MockFileSystem()
+        self.assertTrue(Port._is_test_file(filesystem, '', 'foo.html'))
+        self.assertTrue(Port._is_test_file(filesystem, '', 'foo.shtml'))
+        self.assertTrue(Port._is_test_file(filesystem, '', 'foo.svg'))
+        self.assertTrue(Port._is_test_file(filesystem, '', 'test-ref-test.html'))
+        self.assertFalse(Port._is_test_file(filesystem, '', 'foo.png'))
+        self.assertFalse(Port._is_test_file(filesystem, '', 'foo-expected.html'))
+        self.assertFalse(Port._is_test_file(filesystem, '', 'foo-expected.svg'))
+        self.assertFalse(Port._is_test_file(filesystem, '', 'foo-expected.xht'))
+        self.assertFalse(Port._is_test_file(filesystem, '', 'foo-expected-mismatch.html'))
+        self.assertFalse(Port._is_test_file(filesystem, '', 'foo-expected-mismatch.svg'))
+        self.assertFalse(Port._is_test_file(filesystem, '', 'foo-expected-mismatch.xhtml'))
+        self.assertFalse(Port._is_test_file(filesystem, '', 'foo-ref.html'))
+        self.assertFalse(Port._is_test_file(filesystem, '', 'foo-notref.html'))
+        self.assertFalse(Port._is_test_file(filesystem, '', 'foo-notref.xht'))
+        self.assertFalse(Port._is_test_file(filesystem, '', 'foo-ref.xhtml'))
+        self.assertFalse(Port._is_test_file(filesystem, '', 'ref-foo.html'))
+        self.assertFalse(Port._is_test_file(filesystem, '', 'notref-foo.xhr'))
+
+    def test_parse_reftest_list(self):
+        port = self.make_port(with_tests=True)
+        port.host.filesystem.files['bar/reftest.list'] = "\n".join(["== test.html test-ref.html",
+        "",
+        "# some comment",
+        "!= test-2.html test-notref.html # more comments",
+        "== test-3.html test-ref.html",
+        "== test-3.html test-ref2.html",
+        "!= test-3.html test-notref.html"])
+
+        reftest_list = Port._parse_reftest_list(port.host.filesystem, 'bar')
+        self.assertEqual(reftest_list, {'bar/test.html': [('==', 'bar/test-ref.html')],
+            'bar/test-2.html': [('!=', 'bar/test-notref.html')],
+            'bar/test-3.html': [('==', 'bar/test-ref.html'), ('==', 'bar/test-ref2.html'), ('!=', 'bar/test-notref.html')]})
+
+    def test_reference_files(self):
+        port = self.make_port(with_tests=True)
+        self.assertEqual(port.reference_files('passes/svgreftest.svg'), [('==', port.layout_tests_dir() + '/passes/svgreftest-expected.svg')])
+        self.assertEqual(port.reference_files('passes/xhtreftest.svg'), [('==', port.layout_tests_dir() + '/passes/xhtreftest-expected.html')])
+        self.assertEqual(port.reference_files('passes/phpreftest.php'), [('!=', port.layout_tests_dir() + '/passes/phpreftest-expected-mismatch.svg')])
+
+    def test_operating_system(self):
+        self.assertEqual('mac', self.make_port().operating_system())
+
+    def test_http_server_supports_ipv6(self):
+        port = self.make_port()
+        self.assertTrue(port.http_server_supports_ipv6())
+        port.host.platform.os_name = 'cygwin'
+        self.assertFalse(port.http_server_supports_ipv6())
+        port.host.platform.os_name = 'win'
+        self.assertTrue(port.http_server_supports_ipv6())
+
+    def test_check_httpd_success(self):
+        port = self.make_port(executive=MockExecutive2())
+        port._path_to_apache = lambda: '/usr/sbin/httpd'
+        capture = OutputCapture()
+        capture.capture_output()
+        self.assertTrue(port.check_httpd())
+        _, _, logs = capture.restore_output()
+        self.assertEqual('', logs)
+
+    def test_httpd_returns_error_code(self):
+        port = self.make_port(executive=MockExecutive2(exit_code=1))
+        port._path_to_apache = lambda: '/usr/sbin/httpd'
+        capture = OutputCapture()
+        capture.capture_output()
+        self.assertFalse(port.check_httpd())
+        _, _, logs = capture.restore_output()
+        self.assertEqual('httpd seems broken. Cannot run http tests.\n', logs)
+
+    def test_test_exists(self):
+        port = self.make_port(with_tests=True)
+        self.assertTrue(port.test_exists('passes'))
+        self.assertTrue(port.test_exists('passes/text.html'))
+        self.assertFalse(port.test_exists('passes/does_not_exist.html'))
+
+        self.assertTrue(port.test_exists('virtual'))
+        self.assertFalse(port.test_exists('virtual/does_not_exist.html'))
+        self.assertTrue(port.test_exists('virtual/passes/text.html'))
+
+    def test_test_isfile(self):
+        port = self.make_port(with_tests=True)
+        self.assertFalse(port.test_isfile('passes'))
+        self.assertTrue(port.test_isfile('passes/text.html'))
+        self.assertFalse(port.test_isfile('passes/does_not_exist.html'))
+
+        self.assertFalse(port.test_isfile('virtual'))
+        self.assertTrue(port.test_isfile('virtual/passes/text.html'))
+        self.assertFalse(port.test_isfile('virtual/does_not_exist.html'))
+
+    def test_test_isdir(self):
+        port = self.make_port(with_tests=True)
+        self.assertTrue(port.test_isdir('passes'))
+        self.assertFalse(port.test_isdir('passes/text.html'))
+        self.assertFalse(port.test_isdir('passes/does_not_exist.html'))
+        self.assertFalse(port.test_isdir('passes/does_not_exist/'))
+
+        self.assertTrue(port.test_isdir('virtual'))
+        self.assertFalse(port.test_isdir('virtual/does_not_exist.html'))
+        self.assertFalse(port.test_isdir('virtual/does_not_exist/'))
+        self.assertFalse(port.test_isdir('virtual/passes/text.html'))
+
+    def test_tests(self):
+        port = self.make_port(with_tests=True)
+        tests = port.tests([])
+        self.assertTrue('passes/text.html' in tests)
+        self.assertTrue('virtual/passes/text.html' in tests)
+
+        tests = port.tests(['passes'])
+        self.assertTrue('passes/text.html' in tests)
+        self.assertTrue('passes/passes/test-virtual-passes.html' in tests)
+        self.assertFalse('virtual/passes/text.html' in tests)
+
+        tests = port.tests(['virtual/passes'])
+        self.assertFalse('passes/text.html' in tests)
+        self.assertTrue('virtual/passes/test-virtual-passes.html' in tests)
+        self.assertTrue('virtual/passes/passes/test-virtual-passes.html' in tests)
+        self.assertFalse('virtual/passes/test-virtual-virtual/passes.html' in tests)
+        self.assertFalse('virtual/passes/virtual/passes/test-virtual-passes.html' in tests)
+
+    def test_build_path(self):
+        port = self.make_port(options=optparse.Values({'build_directory': '/my-build-directory/'}))
+        self.assertEqual(port._build_path(), '/my-build-directory/Release')
+
+    def test_dont_require_http_server(self):
+        port = self.make_port()
+        self.assertEqual(port.requires_http_server(), False)
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/builders.py b/Tools/Scripts/webkitpy/layout_tests/port/builders.py
new file mode 100644
index 0000000..155ac89
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/port/builders.py
@@ -0,0 +1,154 @@
+#!/usr/bin/env python
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import re
+
+from webkitpy.common.memoized import memoized
+
+
+# In this dictionary, each item stores:
+# * port_name -- a fully qualified port name
+# * specifiers -- a set of specifiers, representing configurations covered by this builder.
+# * move_overwritten_baselines_to -- (optional) list of platform directories that we will copy an existing
+#      baseline to before pulling down a new baseline during rebaselining. This is useful
+#      for bringing up a new port, for example when adding a Lion was the most recent Mac version and
+#      we wanted to bring up Mountain Lion, we would want to copy an existing baseline in platform/mac
+#      to platform/mac-mountainlion before updating the platform/mac entry.
+# * rebaseline_override_dir -- (optional) directory to put baselines in instead of where you would normally put them.
+#      This is useful when we don't have bots that cover particular configurations; so, e.g., you might
+#      support mac-mountainlion but not have a mac-mountainlion bot yet, so you'd want to put the mac-lion
+#      results into platform/mac temporarily.
+
+_exact_matches = {
+    # These builders are on build.chromium.org.
+    "WebKit XP": {"port_name": "chromium-win-xp", "specifiers": set(["xp", "release"])},
+    "WebKit Win7": {"port_name": "chromium-win-win7", "specifiers": set(["win7", "release"])},
+    "WebKit Win7 (dbg)(1)": {"port_name": "chromium-win-win7", "specifiers": set(["win7", "debug"])},
+    "WebKit Win7 (dbg)(2)": {"port_name": "chromium-win-win7", "specifiers": set(["win7", "debug"])},
+    "WebKit Linux": {"port_name": "chromium-linux-x86_64", "specifiers": set(["linux", "x86_64", "release"])},
+    "WebKit Linux 32": {"port_name": "chromium-linux-x86", "specifiers": set(["linux", "x86"])},
+    "WebKit Linux (dbg)": {"port_name": "chromium-linux-x86_64", "specifiers": set(["linux", "debug"])},
+    "WebKit Mac10.6": {"port_name": "chromium-mac-snowleopard", "specifiers": set(["snowleopard"])},
+    "WebKit Mac10.6 (dbg)": {"port_name": "chromium-mac-snowleopard", "specifiers": set(["snowleopard", "debug"])},
+    "WebKit Mac10.7": {"port_name": "chromium-mac-lion", "specifiers": set(["lion", "release"])},
+    "WebKit Mac10.7 (dbg)": {"port_name": "chromium-mac-lion", "specifiers": set(["lion", "debug"])},
+    "WebKit Mac10.8": {"port_name": "chromium-mac-mountainlion", "specifiers": set(["mountainlion", "release"]),
+                       "move_overwritten_baselines_to": ["chromium-mac-lion"]},
+
+    # These builders are on build.webkit.org.
+    "Apple MountainLion Release WK1 (Tests)": {"port_name": "mac-mountainlion", "specifiers": set(["mountainlion"]), "rebaseline_override_dir": "mac"},
+    "Apple MountainLion Debug WK1 (Tests)": {"port_name": "mac-mountainlion", "specifiers": set(["mountainlion", "debug"]), "rebaseline_override_dir": "mac"},
+    "Apple MountainLion Release WK2 (Tests)": {"port_name": "mac-mountainlion", "specifiers": set(["mountainlion", "wk2"]), "rebaseline_override_dir": "mac"},
+    "Apple MountainLion Debug WK2 (Tests)": {"port_name": "mac-mountainlion", "specifiers": set(["mountainlion", "wk2", "debug"]), "rebaseline_override_dir": "mac"},
+    "Apple Lion Release WK1 (Tests)": {"port_name": "mac-lion", "specifiers": set(["lion"])},
+    "Apple Lion Debug WK1 (Tests)": {"port_name": "mac-lion", "specifiers": set(["lion", "debug"])},
+    "Apple Lion Release WK2 (Tests)": {"port_name": "mac-lion", "specifiers": set(["lion", "wk2"])},
+    "Apple Lion Debug WK2 (Tests)": {"port_name": "mac-lion", "specifiers": set(["lion", "wk2", "debug"])},
+
+    "Apple Win XP Debug (Tests)": {"port_name": "win-xp", "specifiers": set(["win", "debug"])},
+    # FIXME: Remove rebaseline_override_dir once there is an Apple buildbot that corresponds to platform/win.
+    "Apple Win 7 Release (Tests)": {"port_name": "win-7sp0", "specifiers": set(["win"]), "rebaseline_override_dir": "win"},
+
+    "GTK Linux 32-bit Release": {"port_name": "gtk", "specifiers": set(["gtk", "x86", "release"])},
+    "GTK Linux 64-bit Debug": {"port_name": "gtk", "specifiers": set(["gtk", "x86_64", "debug"])},
+    "GTK Linux 64-bit Release": {"port_name": "gtk", "specifiers": set(["gtk", "x86_64", "release"])},
+    "GTK Linux 64-bit Release WK2 (Tests)": {"port_name": "gtk", "specifiers": set(["gtk", "x86_64", "wk2", "release"])},
+
+    # FIXME: Remove rebaseline_override_dir once there are Qt bots for all the platform/qt-* directories.
+    "Qt Linux Release": {"port_name": "qt-linux", "specifiers": set(["win", "linux", "mac"]), "rebaseline_override_dir": "qt"},
+
+    "EFL Linux 64-bit Debug": {"port_name": "efl", "specifiers": set(["efl", "debug"])},
+    "EFL Linux 64-bit Release": {"port_name": "efl", "specifiers": set(["efl", "release"])},
+}
+
+
+_fuzzy_matches = {
+    # These builders are on build.webkit.org.
+    r"SnowLeopard": "mac-snowleopard",
+    r"Apple Lion": "mac-lion",
+    r"Windows": "win",
+    r"GTK": "gtk",
+    r"Qt": "qt",
+    r"Chromium Mac": "chromium-mac",
+    r"Chromium Linux": "chromium-linux",
+    r"Chromium Win": "chromium-win",
+}
+
+
+_ports_without_builders = [
+    "qt-mac",
+    "qt-win",
+    "qt-wk2",
+    # FIXME: Move to _extact_matches.
+    "chromium-android",
+]
+
+
+def builder_path_from_name(builder_name):
+    return re.sub(r'[\s().]', '_', builder_name)
+
+
+def all_builder_names():
+    return sorted(set(_exact_matches.keys()))
+
+
+def all_port_names():
+    return sorted(set(map(lambda x: x["port_name"], _exact_matches.values()) + _ports_without_builders))
+
+
+def coverage_specifiers_for_builder_name(builder_name):
+    return _exact_matches[builder_name].get("specifiers", set())
+
+
+def rebaseline_override_dir(builder_name):
+    return _exact_matches[builder_name].get("rebaseline_override_dir", None)
+
+
+def move_overwritten_baselines_to(builder_name):
+    return _exact_matches[builder_name].get("move_overwritten_baselines_to", [])
+
+
+def port_name_for_builder_name(builder_name):
+    if builder_name in _exact_matches:
+        return _exact_matches[builder_name]["port_name"]
+
+    for regexp, port_name in _fuzzy_matches.items():
+        if re.match(regexp, builder_name):
+            return port_name
+
+
+def builder_name_for_port_name(target_port_name):
+    for builder_name, builder_info in _exact_matches.items():
+        if builder_info['port_name'] == target_port_name and 'debug' not in builder_info['specifiers']:
+            return builder_name
+    return None
+
+
+def builder_path_for_port_name(port_name):
+    builder_path_from_name(builder_name_for_port_name(port_name))
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/builders_unittest.py b/Tools/Scripts/webkitpy/layout_tests/port/builders_unittest.py
new file mode 100644
index 0000000..1550df4
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/port/builders_unittest.py
@@ -0,0 +1,44 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#    * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import builders
+import unittest
+
+
+class BuildersTest(unittest.TestCase):
+    def test_path_from_name(self):
+        tests = {
+            'test': 'test',
+            'Mac 10.6 (dbg)(1)': 'Mac_10_6__dbg__1_',
+            '(.) ': '____',
+        }
+        for name, expected in tests.items():
+            self.assertEquals(expected, builders.builder_path_from_name(name))
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/chromium.py b/Tools/Scripts/webkitpy/layout_tests/port/chromium.py
new file mode 100755
index 0000000..a69f5a8
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/port/chromium.py
@@ -0,0 +1,452 @@
+#!/usr/bin/env python
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Chromium implementations of the Port interface."""
+
+import base64
+import errno
+import logging
+import re
+import signal
+import subprocess
+import sys
+import time
+
+from webkitpy.common.system import executive
+from webkitpy.common.system.path import cygpath
+from webkitpy.layout_tests.models.test_configuration import TestConfiguration
+from webkitpy.layout_tests.port.base import Port, VirtualTestSuite
+
+
+_log = logging.getLogger(__name__)
+
+
+class ChromiumPort(Port):
+    """Abstract base class for Chromium implementations of the Port class."""
+
+    ALL_SYSTEMS = (
+        ('snowleopard', 'x86'),
+        ('lion', 'x86'),
+        ('mountainlion', 'x86'),
+        ('xp', 'x86'),
+        ('win7', 'x86'),
+        ('lucid', 'x86'),
+        ('lucid', 'x86_64'),
+        # FIXME: Technically this should be 'arm', but adding a third architecture type breaks TestConfigurationConverter.
+        # If we need this to be 'arm' in the future, then we first have to fix TestConfigurationConverter.
+        ('icecreamsandwich', 'x86'))
+
+    ALL_BASELINE_VARIANTS = [
+        'chromium-mac-mountainlion', 'chromium-mac-lion', 'chromium-mac-snowleopard',
+        'chromium-win-win7', 'chromium-win-xp',
+        'chromium-linux-x86_64', 'chromium-linux-x86',
+    ]
+
+    CONFIGURATION_SPECIFIER_MACROS = {
+        'mac': ['snowleopard', 'lion', 'mountainlion'],
+        'win': ['xp', 'win7'],
+        'linux': ['lucid'],
+        'android': ['icecreamsandwich'],
+    }
+
+    DEFAULT_BUILD_DIRECTORIES = ('out',)
+
+    @classmethod
+    def _static_build_path(cls, filesystem, build_directory, chromium_base, webkit_base, configuration, comps):
+        if build_directory:
+            return filesystem.join(build_directory, configuration, *comps)
+
+        for directory in cls.DEFAULT_BUILD_DIRECTORIES:
+            base_dir = filesystem.join(chromium_base, directory, configuration)
+            if filesystem.exists(base_dir):
+                return filesystem.join(base_dir, *comps)
+
+        for directory in cls.DEFAULT_BUILD_DIRECTORIES:
+            base_dir = filesystem.join(webkit_base, directory, configuration)
+            if filesystem.exists(base_dir):
+                return filesystem.join(base_dir, *comps)
+
+        # We have to default to something, so pick the last one.
+        return filesystem.join(base_dir, *comps)
+
+    @classmethod
+    def _chromium_base_dir(cls, filesystem):
+        module_path = filesystem.path_to_module(cls.__module__)
+        offset = module_path.find('third_party')
+        if offset == -1:
+            return filesystem.join(module_path[0:module_path.find('Tools')], 'Source', 'WebKit', 'chromium')
+        else:
+            return module_path[0:offset]
+
+    def __init__(self, host, port_name, **kwargs):
+        super(ChromiumPort, self).__init__(host, port_name, **kwargs)
+        # All sub-classes override this, but we need an initial value for testing.
+        self._chromium_base_dir_path = None
+
+    def is_chromium(self):
+        return True
+
+    def default_max_locked_shards(self):
+        """Return the number of "locked" shards to run in parallel (like the http tests)."""
+        max_locked_shards = int(self.default_child_processes()) / 4
+        if not max_locked_shards:
+            return 1
+        return max_locked_shards
+
+    def default_pixel_tests(self):
+        return True
+
+    def default_baseline_search_path(self):
+        return map(self._webkit_baseline_path, self.FALLBACK_PATHS[self.version()])
+
+    def default_timeout_ms(self):
+        if self.get_option('configuration') == 'Debug':
+            return 12 * 1000
+        return 6 * 1000
+
+    def _check_file_exists(self, path_to_file, file_description,
+                           override_step=None, logging=True):
+        """Verify the file is present where expected or log an error.
+
+        Args:
+            file_name: The (human friendly) name or description of the file
+                you're looking for (e.g., "HTTP Server"). Used for error logging.
+            override_step: An optional string to be logged if the check fails.
+            logging: Whether or not log the error messages."""
+        if not self._filesystem.exists(path_to_file):
+            if logging:
+                _log.error('Unable to find %s' % file_description)
+                _log.error('    at %s' % path_to_file)
+                if override_step:
+                    _log.error('    %s' % override_step)
+                    _log.error('')
+            return False
+        return True
+
+    def check_build(self, needs_http):
+        result = True
+
+        dump_render_tree_binary_path = self._path_to_driver()
+        result = self._check_file_exists(dump_render_tree_binary_path,
+                                         'test driver') and result
+        if result and self.get_option('build'):
+            result = self._check_driver_build_up_to_date(
+                self.get_option('configuration'))
+        else:
+            _log.error('')
+
+        helper_path = self._path_to_helper()
+        if helper_path:
+            result = self._check_file_exists(helper_path,
+                                             'layout test helper') and result
+
+        if self.get_option('pixel_tests'):
+            result = self.check_image_diff(
+                'To override, invoke with --no-pixel-tests') and result
+
+        # It's okay if pretty patch and wdiff aren't available, but we will at least log messages.
+        self._pretty_patch_available = self.check_pretty_patch()
+        self._wdiff_available = self.check_wdiff()
+
+        return result
+
+    def check_sys_deps(self, needs_http):
+        result = super(ChromiumPort, self).check_sys_deps(needs_http)
+
+        cmd = [self._path_to_driver(), '--check-layout-test-sys-deps']
+
+        local_error = executive.ScriptError()
+
+        def error_handler(script_error):
+            local_error.exit_code = script_error.exit_code
+
+        output = self._executive.run_command(cmd, error_handler=error_handler)
+        if local_error.exit_code:
+            _log.error('System dependencies check failed.')
+            _log.error('To override, invoke with --nocheck-sys-deps')
+            _log.error('')
+            _log.error(output)
+            return False
+        return result
+
+    def check_image_diff(self, override_step=None, logging=True):
+        image_diff_path = self._path_to_image_diff()
+        return self._check_file_exists(image_diff_path, 'image diff exe',
+                                       override_step, logging)
+
+    def diff_image(self, expected_contents, actual_contents, tolerance=None):
+        # tolerance is not used in chromium. Make sure caller doesn't pass tolerance other than zero or None.
+        assert (tolerance is None) or tolerance == 0
+
+        # If only one of them exists, return that one.
+        if not actual_contents and not expected_contents:
+            return (None, 0, None)
+        if not actual_contents:
+            return (expected_contents, 0, None)
+        if not expected_contents:
+            return (actual_contents, 0, None)
+
+        tempdir = self._filesystem.mkdtemp()
+
+        expected_filename = self._filesystem.join(str(tempdir), "expected.png")
+        self._filesystem.write_binary_file(expected_filename, expected_contents)
+
+        actual_filename = self._filesystem.join(str(tempdir), "actual.png")
+        self._filesystem.write_binary_file(actual_filename, actual_contents)
+
+        diff_filename = self._filesystem.join(str(tempdir), "diff.png")
+
+        native_expected_filename = self._convert_path(expected_filename)
+        native_actual_filename = self._convert_path(actual_filename)
+        native_diff_filename = self._convert_path(diff_filename)
+
+        executable = self._path_to_image_diff()
+        # Note that although we are handed 'old', 'new', image_diff wants 'new', 'old'.
+        comand = [executable, '--diff', native_actual_filename, native_expected_filename, native_diff_filename]
+
+        result = None
+        err_str = None
+        try:
+            exit_code = self._executive.run_command(comand, return_exit_code=True)
+            if exit_code == 0:
+                # The images are the same.
+                result = None
+            elif exit_code == 1:
+                result = self._filesystem.read_binary_file(native_diff_filename)
+            else:
+                err_str = "image diff returned an exit code of %s" % exit_code
+        except OSError, e:
+            err_str = 'error running image diff: %s' % str(e)
+        finally:
+            self._filesystem.rmtree(str(tempdir))
+
+        return (result, 0, err_str or None)  # FIXME: how to get % diff?
+
+    def path_from_chromium_base(self, *comps):
+        """Returns the full path to path made by joining the top of the
+        Chromium source tree and the list of path components in |*comps|."""
+        if self._chromium_base_dir_path is None:
+            self._chromium_base_dir_path = self._chromium_base_dir(self._filesystem)
+        return self._filesystem.join(self._chromium_base_dir_path, *comps)
+
+    def setup_environ_for_server(self, server_name=None):
+        clean_env = super(ChromiumPort, self).setup_environ_for_server(server_name)
+        # Webkit Linux (valgrind layout) bot needs these envvars.
+        self._copy_value_from_environ_if_set(clean_env, 'VALGRIND_LIB')
+        self._copy_value_from_environ_if_set(clean_env, 'VALGRIND_LIB_INNER')
+        return clean_env
+
+    def default_results_directory(self):
+        try:
+            return self.path_from_chromium_base('webkit', self.get_option('configuration'), 'layout-test-results')
+        except AssertionError:
+            return self._build_path('layout-test-results')
+
+    def _missing_symbol_to_skipped_tests(self):
+        # FIXME: Should WebKitPort have these definitions also?
+        return {
+            "ff_mp3_decoder": ["webaudio/codec-tests/mp3"],
+            "ff_aac_decoder": ["webaudio/codec-tests/aac"],
+        }
+
+    def skipped_layout_tests(self, test_list):
+        # FIXME: Merge w/ WebKitPort.skipped_layout_tests()
+        return set(self._skipped_tests_for_unsupported_features(test_list))
+
+    def setup_test_run(self):
+        # Delete the disk cache if any to ensure a clean test run.
+        dump_render_tree_binary_path = self._path_to_driver()
+        cachedir = self._filesystem.dirname(dump_render_tree_binary_path)
+        cachedir = self._filesystem.join(cachedir, "cache")
+        if self._filesystem.exists(cachedir):
+            self._filesystem.rmtree(cachedir)
+
+    def start_helper(self):
+        helper_path = self._path_to_helper()
+        if helper_path:
+            _log.debug("Starting layout helper %s" % helper_path)
+            # Note: Not thread safe: http://bugs.python.org/issue2320
+            self._helper = subprocess.Popen([helper_path],
+                stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=None)
+            is_ready = self._helper.stdout.readline()
+            if not is_ready.startswith('ready'):
+                _log.error("layout_test_helper failed to be ready")
+
+    def stop_helper(self):
+        if self._helper:
+            _log.debug("Stopping layout test helper")
+            try:
+                self._helper.stdin.write("x\n")
+                self._helper.stdin.close()
+                self._helper.wait()
+            except IOError, e:
+                pass
+            finally:
+                self._helper = None
+
+
+    def exit_code_from_summarized_results(self, unexpected_results):
+        # Turn bots red for missing results.
+        return unexpected_results['num_regressions'] + unexpected_results['num_missing']
+
+    def configuration_specifier_macros(self):
+        return self.CONFIGURATION_SPECIFIER_MACROS
+
+    def all_baseline_variants(self):
+        return self.ALL_BASELINE_VARIANTS
+
+    def _generate_all_test_configurations(self):
+        """Returns a sequence of the TestConfigurations the port supports."""
+        # By default, we assume we want to test every graphics type in
+        # every configuration on every system.
+        test_configurations = []
+        for version, architecture in self.ALL_SYSTEMS:
+            for build_type in self.ALL_BUILD_TYPES:
+                test_configurations.append(TestConfiguration(version, architecture, build_type))
+        return test_configurations
+
+    try_builder_names = frozenset([
+        'linux_layout',
+        'mac_layout',
+        'win_layout',
+        'linux_layout_rel',
+        'mac_layout_rel',
+        'win_layout_rel',
+    ])
+
+    def warn_if_bug_missing_in_test_expectations(self):
+        return True
+
+    def expectations_files(self):
+        paths = [self.path_to_test_expectations_file()]
+        skia_expectations_path = self.path_from_chromium_base('skia', 'skia_test_expectations.txt')
+        # FIXME: we should probably warn if this file is missing in some situations.
+        # See the discussion in webkit.org/b/97699.
+        if self._filesystem.exists(skia_expectations_path):
+            paths.append(skia_expectations_path)
+
+        builder_name = self.get_option('builder_name', 'DUMMY_BUILDER_NAME')
+        if builder_name == 'DUMMY_BUILDER_NAME' or '(deps)' in builder_name or builder_name in self.try_builder_names:
+            paths.append(self.path_from_chromium_base('webkit', 'tools', 'layout_tests', 'test_expectations.txt'))
+        return paths
+
+    def repository_paths(self):
+        repos = super(ChromiumPort, self).repository_paths()
+        repos.append(('chromium', self.path_from_chromium_base('build')))
+        return repos
+
+    def _get_crash_log(self, name, pid, stdout, stderr, newer_than):
+        if stderr and 'AddressSanitizer' in stderr:
+            asan_filter_path = self.path_from_chromium_base('tools', 'valgrind', 'asan', 'asan_symbolize.py')
+            if self._filesystem.exists(asan_filter_path):
+                output = self._executive.run_command([asan_filter_path], input=stderr, decode_output=False)
+                stderr = self._executive.run_command(['c++filt'], input=output, decode_output=False)
+
+        return super(ChromiumPort, self)._get_crash_log(name, pid, stdout, stderr, newer_than)
+
+    def virtual_test_suites(self):
+        return [
+            VirtualTestSuite('platform/chromium/virtual/gpu/fast/canvas',
+                             'fast/canvas',
+                             ['--enable-accelerated-2d-canvas']),
+            VirtualTestSuite('platform/chromium/virtual/gpu/canvas/philip',
+                             'canvas/philip',
+                             ['--enable-accelerated-2d-canvas']),
+            VirtualTestSuite('platform/chromium/virtual/threaded/compositing/visibility',
+                             'compositing/visibility',
+                             ['--enable-threaded-compositing']),
+            VirtualTestSuite('platform/chromium/virtual/threaded/compositing/webgl',
+                             'compositing/webgl',
+                             ['--enable-threaded-compositing']),
+            VirtualTestSuite('platform/chromium/virtual/gpu/fast/hidpi',
+                             'fast/hidpi',
+                             ['--force-compositing-mode']),
+            VirtualTestSuite('platform/chromium/virtual/softwarecompositing',
+                             'compositing',
+                             ['--enable-software-compositing']),
+            VirtualTestSuite('platform/chromium/virtual/deferred/fast/images',
+                             'fast/images',
+                             ['--enable-deferred-image-decoding', '--enable-per-tile-painting', '--force-compositing-mode']),
+        ]
+
+    #
+    # PROTECTED METHODS
+    #
+    # These routines should only be called by other methods in this file
+    # or any subclasses.
+    #
+
+    def _build_path(self, *comps):
+        return self._build_path_with_configuration(None, *comps)
+
+    def _build_path_with_configuration(self, configuration, *comps):
+        # Note that we don't implement --root or do the option caching that the
+        # base class does, because chromium doesn't use 'webkit-build-directory' and
+        # hence finding the right directory is relatively fast.
+        configuration = configuration or self.get_option('configuration')
+        return self._static_build_path(self._filesystem, self.get_option('build_directory'),
+            self.path_from_chromium_base(), self.path_from_webkit_base(), configuration, comps)
+
+    def _path_to_image_diff(self):
+        binary_name = 'ImageDiff'
+        return self._build_path(binary_name)
+
+    def _check_driver_build_up_to_date(self, configuration):
+        if configuration in ('Debug', 'Release'):
+            try:
+                debug_path = self._path_to_driver('Debug')
+                release_path = self._path_to_driver('Release')
+
+                debug_mtime = self._filesystem.mtime(debug_path)
+                release_mtime = self._filesystem.mtime(release_path)
+
+                if (debug_mtime > release_mtime and configuration == 'Release' or
+                    release_mtime > debug_mtime and configuration == 'Debug'):
+                    most_recent_binary = 'Release' if configuration == 'Debug' else 'Debug'
+                    _log.warning('You are running the %s binary. However the %s binary appears to be more recent. '
+                                 'Please pass --%s.', configuration, most_recent_binary, most_recent_binary.lower())
+                    _log.warning('')
+            # This will fail if we don't have both a debug and release binary.
+            # That's fine because, in this case, we must already be running the
+            # most up-to-date one.
+            except OSError:
+                pass
+        return True
+
+    def _chromium_baseline_path(self, platform):
+        if platform is None:
+            platform = self.name()
+        return self.path_from_webkit_base('LayoutTests', 'platform', platform)
+
+    def _convert_path(self, path):
+        """Handles filename conversion for subprocess command line args."""
+        # See note above in diff_image() for why we need this.
+        if sys.platform == 'cygwin':
+            return cygpath(path)
+        return path
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/chromium_android.py b/Tools/Scripts/webkitpy/layout_tests/port/chromium_android.py
new file mode 100644
index 0000000..b8ac55a
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/port/chromium_android.py
@@ -0,0 +1,675 @@
+#!/usr/bin/env python
+# Copyright (C) 2012 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import copy
+import logging
+import os
+import re
+import subprocess
+import threading
+import time
+
+from webkitpy.layout_tests.port import chromium
+from webkitpy.layout_tests.port import driver
+from webkitpy.layout_tests.port import factory
+from webkitpy.layout_tests.port import server_process
+
+
+_log = logging.getLogger(__name__)
+
+
+# The root directory for test resources, which has the same structure as the
+# source root directory of Chromium.
+# This path is defined in Chromium's base/test/test_support_android.cc.
+DEVICE_SOURCE_ROOT_DIR = '/data/local/tmp/'
+COMMAND_LINE_FILE = DEVICE_SOURCE_ROOT_DIR + 'chrome-native-tests-command-line'
+
+# The directory to put tools and resources of DumpRenderTree.
+# If change this, must also change Tools/DumpRenderTree/chromium/TestShellAndroid.cpp
+# and Chromium's webkit/support/platform_support_android.cc.
+DEVICE_DRT_DIR = DEVICE_SOURCE_ROOT_DIR + 'drt/'
+DEVICE_FORWARDER_PATH = DEVICE_DRT_DIR + 'forwarder'
+
+# Path on the device where the test framework will create the fifo pipes.
+DEVICE_FIFO_PATH = '/data/data/org.chromium.native_test/files/'
+
+DRT_APP_PACKAGE = 'org.chromium.native_test'
+DRT_ACTIVITY_FULL_NAME = DRT_APP_PACKAGE + '/.ChromeNativeTestActivity'
+DRT_APP_CACHE_DIR = DEVICE_DRT_DIR + 'cache/'
+
+SCALING_GOVERNORS_PATTERN = "/sys/devices/system/cpu/cpu*/cpufreq/scaling_governor"
+
+# All the test cases are still served to DumpRenderTree through file protocol,
+# but we use a file-to-http feature to bridge the file request to host's http
+# server to get the real test files and corresponding resources.
+TEST_PATH_PREFIX = '/all-tests'
+
+# All ports the Android forwarder to forward.
+# 8000, 8080 and 8443 are for http/https tests.
+# 8880 and 9323 are for websocket tests
+# (see http_server.py, apache_http_server.py and websocket_server.py).
+FORWARD_PORTS = '8000 8080 8443 8880 9323'
+
+MS_TRUETYPE_FONTS_DIR = '/usr/share/fonts/truetype/msttcorefonts/'
+MS_TRUETYPE_FONTS_PACKAGE = 'ttf-mscorefonts-installer'
+
+# Timeout in seconds to wait for start/stop of DumpRenderTree.
+DRT_START_STOP_TIMEOUT_SECS = 10
+
+# List of fonts that layout tests expect, copied from DumpRenderTree/chromium/TestShellX11.cpp.
+HOST_FONT_FILES = [
+    [[MS_TRUETYPE_FONTS_DIR], 'Arial.ttf', MS_TRUETYPE_FONTS_PACKAGE],
+    [[MS_TRUETYPE_FONTS_DIR], 'Arial_Bold.ttf', MS_TRUETYPE_FONTS_PACKAGE],
+    [[MS_TRUETYPE_FONTS_DIR], 'Arial_Bold_Italic.ttf', MS_TRUETYPE_FONTS_PACKAGE],
+    [[MS_TRUETYPE_FONTS_DIR], 'Arial_Italic.ttf', MS_TRUETYPE_FONTS_PACKAGE],
+    [[MS_TRUETYPE_FONTS_DIR], 'Comic_Sans_MS.ttf', MS_TRUETYPE_FONTS_PACKAGE],
+    [[MS_TRUETYPE_FONTS_DIR], 'Comic_Sans_MS_Bold.ttf', MS_TRUETYPE_FONTS_PACKAGE],
+    [[MS_TRUETYPE_FONTS_DIR], 'Courier_New.ttf', MS_TRUETYPE_FONTS_PACKAGE],
+    [[MS_TRUETYPE_FONTS_DIR], 'Courier_New_Bold.ttf', MS_TRUETYPE_FONTS_PACKAGE],
+    [[MS_TRUETYPE_FONTS_DIR], 'Courier_New_Bold_Italic.ttf', MS_TRUETYPE_FONTS_PACKAGE],
+    [[MS_TRUETYPE_FONTS_DIR], 'Courier_New_Italic.ttf', MS_TRUETYPE_FONTS_PACKAGE],
+    [[MS_TRUETYPE_FONTS_DIR], 'Georgia.ttf', MS_TRUETYPE_FONTS_PACKAGE],
+    [[MS_TRUETYPE_FONTS_DIR], 'Georgia_Bold.ttf', MS_TRUETYPE_FONTS_PACKAGE],
+    [[MS_TRUETYPE_FONTS_DIR], 'Georgia_Bold_Italic.ttf', MS_TRUETYPE_FONTS_PACKAGE],
+    [[MS_TRUETYPE_FONTS_DIR], 'Georgia_Italic.ttf', MS_TRUETYPE_FONTS_PACKAGE],
+    [[MS_TRUETYPE_FONTS_DIR], 'Impact.ttf', MS_TRUETYPE_FONTS_PACKAGE],
+    [[MS_TRUETYPE_FONTS_DIR], 'Trebuchet_MS.ttf', MS_TRUETYPE_FONTS_PACKAGE],
+    [[MS_TRUETYPE_FONTS_DIR], 'Trebuchet_MS_Bold.ttf', MS_TRUETYPE_FONTS_PACKAGE],
+    [[MS_TRUETYPE_FONTS_DIR], 'Trebuchet_MS_Bold_Italic.ttf', MS_TRUETYPE_FONTS_PACKAGE],
+    [[MS_TRUETYPE_FONTS_DIR], 'Trebuchet_MS_Italic.ttf', MS_TRUETYPE_FONTS_PACKAGE],
+    [[MS_TRUETYPE_FONTS_DIR], 'Times_New_Roman.ttf', MS_TRUETYPE_FONTS_PACKAGE],
+    [[MS_TRUETYPE_FONTS_DIR], 'Times_New_Roman_Bold.ttf', MS_TRUETYPE_FONTS_PACKAGE],
+    [[MS_TRUETYPE_FONTS_DIR], 'Times_New_Roman_Bold_Italic.ttf', MS_TRUETYPE_FONTS_PACKAGE],
+    [[MS_TRUETYPE_FONTS_DIR], 'Times_New_Roman_Italic.ttf', MS_TRUETYPE_FONTS_PACKAGE],
+    [[MS_TRUETYPE_FONTS_DIR], 'Verdana.ttf', MS_TRUETYPE_FONTS_PACKAGE],
+    [[MS_TRUETYPE_FONTS_DIR], 'Verdana_Bold.ttf', MS_TRUETYPE_FONTS_PACKAGE],
+    [[MS_TRUETYPE_FONTS_DIR], 'Verdana_Bold_Italic.ttf', MS_TRUETYPE_FONTS_PACKAGE],
+    [[MS_TRUETYPE_FONTS_DIR], 'Verdana_Italic.ttf', MS_TRUETYPE_FONTS_PACKAGE],
+    # The Microsoft font EULA
+    [['/usr/share/doc/ttf-mscorefonts-installer/'], 'READ_ME!.gz', MS_TRUETYPE_FONTS_PACKAGE],
+    # Other fonts: Arabic, CJK, Indic, Thai, etc.
+    [['/usr/share/fonts/truetype/ttf-dejavu/'], 'DejaVuSans.ttf', 'ttf-dejavu'],
+    [['/usr/share/fonts/truetype/kochi/'], 'kochi-mincho.ttf', 'ttf-kochi-mincho'],
+    [['/usr/share/fonts/truetype/ttf-indic-fonts-core/'], 'lohit_hi.ttf', 'ttf-indic-fonts-core'],
+    [['/usr/share/fonts/truetype/ttf-indic-fonts-core/'], 'lohit_ta.ttf', 'ttf-indic-fonts-core'],
+    [['/usr/share/fonts/truetype/ttf-indic-fonts-core/'], 'MuktiNarrow.ttf', 'ttf-indic-fonts-core'],
+    [['/usr/share/fonts/truetype/thai/', '/usr/share/fonts/truetype/tlwg/'], 'Garuda.ttf', 'fonts-tlwg-garuda'],
+    [['/usr/share/fonts/truetype/ttf-indic-fonts-core/', '/usr/share/fonts/truetype/ttf-punjabi-fonts/'], 'lohit_pa.ttf', 'ttf-indic-fonts-core'],
+]
+
+DEVICE_FONTS_DIR = DEVICE_DRT_DIR + 'fonts/'
+
+# The layout tests directory on device, which has two usages:
+# 1. as a virtual path in file urls that will be bridged to HTTP.
+# 2. pointing to some files that are pushed to the device for tests that
+# don't work on file-over-http (e.g. blob protocol tests).
+DEVICE_LAYOUT_TESTS_DIR = DEVICE_SOURCE_ROOT_DIR + 'third_party/WebKit/LayoutTests/'
+
+# Test resources that need to be accessed as files directly.
+# Each item can be the relative path of a directory or a file.
+TEST_RESOURCES_TO_PUSH = [
+    # Blob tests need to access files directly.
+    'editing/pasteboard/resources',
+    'fast/files/resources',
+    'http/tests/local/resources',
+    'http/tests/local/formdata/resources',
+    # User style URLs are accessed as local files in webkit_support.
+    'http/tests/security/resources/cssStyle.css',
+    # Media tests need to access audio/video as files.
+    'media/content',
+    'compositing/resources/video.mp4',
+]
+
+MD5SUM_DEVICE_FILE_NAME = 'md5sum_bin'
+MD5SUM_DEVICE_PATH = '/data/local/tmp/' + MD5SUM_DEVICE_FILE_NAME
+
+class ChromiumAndroidPort(chromium.ChromiumPort):
+    port_name = 'chromium-android'
+
+    FALLBACK_PATHS = [
+        'chromium-android',
+        'chromium-linux',
+        'chromium-win',
+        'chromium',
+    ]
+
+    def __init__(self, host, port_name, **kwargs):
+        super(ChromiumAndroidPort, self).__init__(host, port_name, **kwargs)
+
+        self._operating_system = 'android'
+        self._version = 'icecreamsandwich'
+
+        self._host_port = factory.PortFactory(host).get('chromium', **kwargs)
+        self._server_process_constructor = self._android_server_process_constructor
+
+        if hasattr(self._options, 'adb_device'):
+            self._devices = self._options.adb_device
+        else:
+            self._devices = []
+
+    @staticmethod
+    def _android_server_process_constructor(port, server_name, cmd_line, env=None):
+        return server_process.ServerProcess(port, server_name, cmd_line, env,
+                                            universal_newlines=True, treat_no_data_as_crash=True)
+
+    def additional_drt_flag(self):
+        # The Chromium port for Android always uses the hardware GPU path.
+        return ['--encode-binary', '--enable-hardware-gpu',
+                '--force-compositing-mode',
+                '--enable-accelerated-fixed-position']
+
+    def default_timeout_ms(self):
+        # Android platform has less computing power than desktop platforms.
+        # Using 10 seconds allows us to pass most slow tests which are not
+        # marked as slow tests on desktop platforms.
+        return 10 * 1000
+
+    def driver_stop_timeout(self):
+        # DRT doesn't respond to closing stdin, so we might as well stop the driver immediately.
+        return 0.0
+
+    def default_child_processes(self):
+        return len(self._get_devices())
+
+    def default_baseline_search_path(self):
+        return map(self._webkit_baseline_path, self.FALLBACK_PATHS)
+
+    def check_wdiff(self, logging=True):
+        return self._host_port.check_wdiff(logging)
+
+    def check_build(self, needs_http):
+        result = super(ChromiumAndroidPort, self).check_build(needs_http)
+        result = self._check_file_exists(self._path_to_md5sum(), 'md5sum utility') and result
+        result = self._check_file_exists(self._path_to_forwarder(), 'forwarder utility') and result
+        if not result:
+            _log.error('For complete Android build requirements, please see:')
+            _log.error('')
+            _log.error('    http://code.google.com/p/chromium/wiki/AndroidBuildInstructions')
+
+        return result
+
+    def check_sys_deps(self, needs_http):
+        for (font_dirs, font_file, package) in HOST_FONT_FILES:
+            exists = False
+            for font_dir in font_dirs:
+                font_path = font_dir + font_file
+                if self._check_file_exists(font_path, '', logging=False):
+                    exists = True
+                    break
+            if not exists:
+                _log.error('You are missing %s under %s. Try installing %s. See build instructions.' % (font_file, font_dirs, package))
+                return False
+        return True
+
+    def expectations_files(self):
+        # LayoutTests/platform/chromium-android/TestExpectations should contain only the rules to
+        # skip tests for the features not supported or not testable on Android.
+        # Other rules should be in LayoutTests/platform/chromium/TestExpectations.
+        android_expectations_file = self.path_from_webkit_base('LayoutTests', 'platform', 'chromium-android', 'TestExpectations')
+        return super(ChromiumAndroidPort, self).expectations_files() + [android_expectations_file]
+
+    def requires_http_server(self):
+        """Chromium Android runs tests on devices, and uses the HTTP server to
+        serve the actual layout tests to DumpRenderTree."""
+        return True
+
+    def start_http_server(self, additional_dirs=None, number_of_servers=0):
+        if not additional_dirs:
+            additional_dirs = {}
+        additional_dirs[TEST_PATH_PREFIX] = self.layout_tests_dir()
+        super(ChromiumAndroidPort, self).start_http_server(additional_dirs, number_of_servers)
+
+    def create_driver(self, worker_number, no_timeout=False):
+        # We don't want the default DriverProxy which is not compatible with our driver.
+        # See comments in ChromiumAndroidDriver.start().
+        return ChromiumAndroidDriver(self, worker_number, pixel_tests=self.get_option('pixel_tests'),
+                                     # Force no timeout to avoid DumpRenderTree timeouts before NRWT.
+                                     no_timeout=True)
+
+    def driver_cmd_line(self):
+        # Override to return the actual DumpRenderTree command line.
+        return self.create_driver(0)._drt_cmd_line(self.get_option('pixel_tests'), [])
+
+    # Overridden private functions.
+
+    def _build_path(self, *comps):
+        return self._host_port._build_path(*comps)
+
+    def _build_path_with_configuration(self, configuration, *comps):
+        return self._host_port._build_path_with_configuration(configuration, *comps)
+
+    def _path_to_apache(self):
+        return self._host_port._path_to_apache()
+
+    def _path_to_apache_config_file(self):
+        return self._host_port._path_to_apache_config_file()
+
+    def _path_to_driver(self, configuration=None):
+        return self._build_path_with_configuration(configuration, 'DumpRenderTree_apk/DumpRenderTree-debug.apk')
+
+    def _path_to_helper(self):
+        return None
+
+    def _path_to_forwarder(self):
+        return self._build_path('forwarder')
+
+    def _path_to_md5sum(self):
+        return self._build_path(MD5SUM_DEVICE_FILE_NAME)
+
+    def _path_to_image_diff(self):
+        return self._host_port._path_to_image_diff()
+
+    def _path_to_lighttpd(self):
+        return self._host_port._path_to_lighttpd()
+
+    def _path_to_lighttpd_modules(self):
+        return self._host_port._path_to_lighttpd_modules()
+
+    def _path_to_lighttpd_php(self):
+        return self._host_port._path_to_lighttpd_php()
+
+    def _path_to_wdiff(self):
+        return self._host_port._path_to_wdiff()
+
+    def _shut_down_http_server(self, pid):
+        return self._host_port._shut_down_http_server(pid)
+
+    def _driver_class(self):
+        return ChromiumAndroidDriver
+
+    # Local private functions.
+
+    def _get_devices(self):
+        if not self._devices:
+            re_device = re.compile('^([a-zA-Z0-9_:.-]+)\tdevice$', re.MULTILINE)
+            result = self._executive.run_command(['adb', 'devices'], error_handler=self._executive.ignore_error)
+            self._devices = re_device.findall(result)
+            if not self._devices:
+                raise AssertionError('No devices attached. Result of "adb devices": %s' % result)
+        return self._devices
+
+    def _get_device_serial(self, worker_number):
+        devices = self._get_devices()
+        if worker_number >= len(devices):
+            raise AssertionError('Worker number exceeds available number of devices')
+        return devices[worker_number]
+
+
+class ChromiumAndroidDriver(driver.Driver):
+    def __init__(self, port, worker_number, pixel_tests, no_timeout=False):
+        super(ChromiumAndroidDriver, self).__init__(port, worker_number, pixel_tests, no_timeout)
+        self._cmd_line = None
+        self._in_fifo_path = DEVICE_FIFO_PATH + 'stdin.fifo'
+        self._out_fifo_path = DEVICE_FIFO_PATH + 'test.fifo'
+        self._err_fifo_path = DEVICE_FIFO_PATH + 'stderr.fifo'
+        self._read_stdout_process = None
+        self._read_stderr_process = None
+        self._forwarder_process = None
+        self._has_setup = False
+        self._original_governors = {}
+        self._device_serial = port._get_device_serial(worker_number)
+        self._adb_command = ['adb', '-s', self._device_serial]
+
+    def __del__(self):
+        self._teardown_performance()
+        super(ChromiumAndroidDriver, self).__del__()
+
+    def _setup_md5sum_and_push_data_if_needed(self):
+        self._md5sum_path = self._port._path_to_md5sum()
+        if not self._file_exists_on_device(MD5SUM_DEVICE_PATH):
+            if not self._push_to_device(self._md5sum_path, MD5SUM_DEVICE_PATH):
+                raise AssertionError('Could not push md5sum to device')
+
+        self._push_executable()
+        self._push_fonts()
+        self._push_test_resources()
+
+    def _setup_test(self):
+        if self._has_setup:
+            return
+
+        self._setup_md5sum_and_push_data_if_needed()
+        self._has_setup = True
+        self._run_adb_command(['root'])
+        self._setup_performance()
+        # Required by webkit_support::GetWebKitRootDirFilePath().
+        # Other directories will be created automatically by adb push.
+        self._run_adb_command(['shell', 'mkdir', '-p', DEVICE_SOURCE_ROOT_DIR + 'chrome'])
+
+        # Allow the DumpRenderTree app to fully access the directory.
+        # The native code needs the permission to write temporary files and create pipes here.
+        self._run_adb_command(['shell', 'mkdir', '-p', DEVICE_DRT_DIR])
+        self._run_adb_command(['shell', 'chmod', '777', DEVICE_DRT_DIR])
+
+        # Delete the disk cache if any to ensure a clean test run.
+        # This is like what's done in ChromiumPort.setup_test_run but on the device.
+        self._run_adb_command(['shell', 'rm', '-r', DRT_APP_CACHE_DIR])
+
+    def _log_error(self, message):
+        _log.error('[%s] %s' % (self._device_serial, message))
+
+    def _log_debug(self, message):
+        _log.debug('[%s] %s' % (self._device_serial, message))
+
+    def _abort(self, message):
+        raise AssertionError('[%s] %s' % (self._device_serial, message))
+
+    @staticmethod
+    def _extract_hashes_from_md5sum_output(md5sum_output):
+        assert md5sum_output
+        return [line.split('  ')[0] for line in md5sum_output]
+
+    def _push_file_if_needed(self, host_file, device_file):
+        assert os.path.exists(host_file)
+        device_hashes = self._extract_hashes_from_md5sum_output(
+                self._port.host.executive.popen(self._adb_command + ['shell', MD5SUM_DEVICE_PATH, device_file],
+                                                stdout=subprocess.PIPE).stdout)
+        host_hashes = self._extract_hashes_from_md5sum_output(
+                self._port.host.executive.popen(args=['%s_host' % self._md5sum_path, host_file],
+                                                stdout=subprocess.PIPE).stdout)
+        if host_hashes and device_hashes == host_hashes:
+            return
+        self._push_to_device(host_file, device_file)
+
+    def _push_executable(self):
+        self._push_file_if_needed(self._port._path_to_forwarder(), DEVICE_FORWARDER_PATH)
+        self._push_file_if_needed(self._port._build_path('DumpRenderTree.pak'), DEVICE_DRT_DIR + 'DumpRenderTree.pak')
+        self._push_file_if_needed(self._port._build_path('DumpRenderTree_resources'), DEVICE_DRT_DIR + 'DumpRenderTree_resources')
+        self._push_file_if_needed(self._port._build_path('android_main_fonts.xml'), DEVICE_DRT_DIR + 'android_main_fonts.xml')
+        self._push_file_if_needed(self._port._build_path('android_fallback_fonts.xml'), DEVICE_DRT_DIR + 'android_fallback_fonts.xml')
+        self._run_adb_command(['uninstall', DRT_APP_PACKAGE])
+        drt_host_path = self._port._path_to_driver()
+        install_result = self._run_adb_command(['install', drt_host_path])
+        if install_result.find('Success') == -1:
+            self._abort('Failed to install %s onto device: %s' % (drt_host_path, install_result))
+
+    def _push_fonts(self):
+        self._log_debug('Pushing fonts')
+        path_to_ahem_font = self._port._build_path('AHEM____.TTF')
+        self._push_file_if_needed(path_to_ahem_font, DEVICE_FONTS_DIR + 'AHEM____.TTF')
+        for (host_dirs, font_file, package) in HOST_FONT_FILES:
+            for host_dir in host_dirs:
+                host_font_path = host_dir + font_file
+                if self._port._check_file_exists(host_font_path, '', logging=False):
+                    self._push_file_if_needed(host_font_path, DEVICE_FONTS_DIR + font_file)
+
+    def _push_test_resources(self):
+        self._log_debug('Pushing test resources')
+        for resource in TEST_RESOURCES_TO_PUSH:
+            self._push_file_if_needed(self._port.layout_tests_dir() + '/' + resource, DEVICE_LAYOUT_TESTS_DIR + resource)
+
+    def _run_adb_command(self, cmd, ignore_error=False):
+        self._log_debug('Run adb command: ' + str(cmd))
+        if ignore_error:
+            error_handler = self._port._executive.ignore_error
+        else:
+            error_handler = None
+        result = self._port._executive.run_command(self._adb_command + cmd, error_handler=error_handler)
+        # Limit the length to avoid too verbose output of commands like 'adb logcat' and 'cat /data/tombstones/tombstone01'
+        # whose outputs are normally printed in later logs.
+        self._log_debug('Run adb result: ' + result[:80])
+        return result
+
+    def _link_device_file(self, from_file, to_file, ignore_error=False):
+        # rm to_file first to make sure that ln succeeds.
+        self._run_adb_command(['shell', 'rm', to_file], ignore_error)
+        return self._run_adb_command(['shell', 'ln', '-s', from_file, to_file], ignore_error)
+
+    def _push_to_device(self, host_path, device_path, ignore_error=False):
+        return self._run_adb_command(['push', host_path, device_path], ignore_error)
+
+    def _pull_from_device(self, device_path, host_path, ignore_error=False):
+        return self._run_adb_command(['pull', device_path, host_path], ignore_error)
+
+    def _get_last_stacktrace(self):
+        tombstones = self._run_adb_command(['shell', 'ls', '-n', '/data/tombstones'])
+        if not tombstones or tombstones.startswith('/data/tombstones: No such file or directory'):
+            self._log_error('DRT crashed, but no tombstone found!')
+            return ''
+        tombstones = tombstones.rstrip().split('\n')
+        last_tombstone = tombstones[0].split()
+        for tombstone in tombstones[1:]:
+            # Format of fields:
+            # 0          1      2      3     4          5     6
+            # permission uid    gid    size  date       time  filename
+            # -rw------- 1000   1000   45859 2011-04-13 06:00 tombstone_00
+            fields = tombstone.split()
+            if (fields[4] + fields[5] >= last_tombstone[4] + last_tombstone[5]):
+                last_tombstone = fields
+            else:
+                break
+
+        # Use Android tool vendor/google/tools/stack to convert the raw
+        # stack trace into a human readable format, if needed.
+        # It takes a long time, so don't do it here.
+        return '%s\n%s' % (' '.join(last_tombstone),
+                           self._run_adb_command(['shell', 'cat', '/data/tombstones/' + last_tombstone[6]]))
+
+    def _get_logcat(self):
+        return self._run_adb_command(['logcat', '-d', '-v', 'threadtime'])
+
+    def _setup_performance(self):
+        # Disable CPU scaling and drop ram cache to reduce noise in tests
+        if not self._original_governors:
+            governor_files = self._run_adb_command(['shell', 'ls', SCALING_GOVERNORS_PATTERN])
+            if governor_files.find('No such file or directory') == -1:
+                for file in governor_files.split():
+                    self._original_governors[file] = self._run_adb_command(['shell', 'cat', file]).strip()
+                    self._run_adb_command(['shell', 'echo', 'performance', '>', file])
+
+    def _teardown_performance(self):
+        for file, original_content in self._original_governors.items():
+            self._run_adb_command(['shell', 'echo', original_content, '>', file])
+        self._original_governors = {}
+
+    def _command_wrapper(cls, wrapper_option):
+        # Ignore command wrapper which is not applicable on Android.
+        return []
+
+    def _get_crash_log(self, stdout, stderr, newer_than):
+        if not stdout:
+            stdout = ''
+        stdout += '********* [%s] Logcat:\n%s' % (self._device_serial, self._get_logcat())
+        if not stderr:
+            stderr = ''
+        stderr += '********* [%s] Tombstone file:\n%s' % (self._device_serial, self._get_last_stacktrace())
+        return super(ChromiumAndroidDriver, self)._get_crash_log(stdout, stderr, newer_than)
+
+    def cmd_line(self, pixel_tests, per_test_args):
+        # The returned command line is used to start _server_process. In our case, it's an interactive 'adb shell'.
+        # The command line passed to the DRT process is returned by _drt_cmd_line() instead.
+        return self._adb_command + ['shell']
+
+    def _file_exists_on_device(self, full_file_path):
+        assert full_file_path.startswith('/')
+        return self._run_adb_command(['shell', 'ls', full_file_path]).strip() == full_file_path
+
+    def _drt_cmd_line(self, pixel_tests, per_test_args):
+        return driver.Driver.cmd_line(self, pixel_tests, per_test_args) + ['--create-stdin-fifo', '--separate-stderr-fifo']
+
+    @staticmethod
+    def _loop_with_timeout(condition, timeout_secs):
+        deadline = time.time() + timeout_secs
+        while time.time() < deadline:
+            if condition():
+                return True
+        return False
+
+    def _all_pipes_created(self):
+        return (self._file_exists_on_device(self._in_fifo_path) and
+                self._file_exists_on_device(self._out_fifo_path) and
+                self._file_exists_on_device(self._err_fifo_path))
+
+    def _remove_all_pipes(self):
+        for file in [self._in_fifo_path, self._out_fifo_path, self._err_fifo_path]:
+            self._run_adb_command(['shell', 'rm', file])
+
+        return (not self._file_exists_on_device(self._in_fifo_path) and
+                not self._file_exists_on_device(self._out_fifo_path) and
+                not self._file_exists_on_device(self._err_fifo_path))
+
+    def run_test(self, driver_input, stop_when_done):
+        base = self._port.lookup_virtual_test_base(driver_input.test_name)
+        if base:
+            driver_input = copy.copy(driver_input)
+            driver_input.args = self._port.lookup_virtual_test_args(driver_input.test_name)
+            driver_input.test_name = base
+        return super(ChromiumAndroidDriver, self).run_test(driver_input, stop_when_done)
+
+    def start(self, pixel_tests, per_test_args):
+        # Only one driver instance is allowed because of the nature of Android activity.
+        # The single driver needs to restart DumpRenderTree when the command line changes.
+        cmd_line = self._drt_cmd_line(pixel_tests, per_test_args)
+        if cmd_line != self._cmd_line:
+            self.stop()
+            self._cmd_line = cmd_line
+        super(ChromiumAndroidDriver, self).start(pixel_tests, per_test_args)
+
+    def _start(self, pixel_tests, per_test_args):
+        self._setup_test()
+
+        for retries in range(3):
+            if self._start_once(pixel_tests, per_test_args):
+                return
+            self._log_error('Failed to start DumpRenderTree application. Retries=%d. Log:%s' % (retries, self._get_logcat()))
+            self.stop()
+            time.sleep(2)
+        self._abort('Failed to start DumpRenderTree application multiple times. Give up.')
+
+    def _start_once(self, pixel_tests, per_test_args):
+        super(ChromiumAndroidDriver, self)._start(pixel_tests, per_test_args)
+
+        self._log_debug('Starting forwarder')
+        self._forwarder_process = self._port._server_process_constructor(
+            self._port, 'Forwarder', self._adb_command + ['shell', '%s -D %s' % (DEVICE_FORWARDER_PATH, FORWARD_PORTS)])
+        self._forwarder_process.start()
+
+        self._run_adb_command(['logcat', '-c'])
+        self._run_adb_command(['shell', 'echo'] + self._cmd_line + ['>', COMMAND_LINE_FILE])
+        start_result = self._run_adb_command(['shell', 'am', 'start', '-e', 'RunInSubThread', '-n', DRT_ACTIVITY_FULL_NAME])
+        if start_result.find('Exception') != -1:
+            self._log_error('Failed to start DumpRenderTree application. Exception:\n' + start_result)
+            return False
+
+        if not ChromiumAndroidDriver._loop_with_timeout(self._all_pipes_created, DRT_START_STOP_TIMEOUT_SECS):
+            return False
+
+        # Read back the shell prompt to ensure adb shell ready.
+        deadline = time.time() + DRT_START_STOP_TIMEOUT_SECS
+        self._server_process.start()
+        self._read_prompt(deadline)
+        self._log_debug('Interactive shell started')
+
+        # Start a process to read from the stdout fifo of the DumpRenderTree app and print to stdout.
+        self._log_debug('Redirecting stdout to ' + self._out_fifo_path)
+        self._read_stdout_process = self._port._server_process_constructor(
+            self._port, 'ReadStdout', self._adb_command + ['shell', 'cat', self._out_fifo_path])
+        self._read_stdout_process.start()
+
+        # Start a process to read from the stderr fifo of the DumpRenderTree app and print to stdout.
+        self._log_debug('Redirecting stderr to ' + self._err_fifo_path)
+        self._read_stderr_process = self._port._server_process_constructor(
+            self._port, 'ReadStderr', self._adb_command + ['shell', 'cat', self._err_fifo_path])
+        self._read_stderr_process.start()
+
+        self._log_debug('Redirecting stdin to ' + self._in_fifo_path)
+        self._server_process.write('cat >%s\n' % self._in_fifo_path)
+
+        # Combine the stdout and stderr pipes into self._server_process.
+        self._server_process.replace_outputs(self._read_stdout_process._proc.stdout, self._read_stderr_process._proc.stdout)
+
+        def deadlock_detector(processes, normal_startup_event):
+            if not ChromiumAndroidDriver._loop_with_timeout(lambda: normal_startup_event.is_set(), DRT_START_STOP_TIMEOUT_SECS):
+                # If normal_startup_event is not set in time, the main thread must be blocked at
+                # reading/writing the fifo. Kill the fifo reading/writing processes to let the
+                # main thread escape from the deadlocked state. After that, the main thread will
+                # treat this as a crash.
+                self._log_error('Deadlock detected. Processes killed.')
+                for i in processes:
+                    i.kill()
+
+        # Start a thread to kill the pipe reading/writing processes on deadlock of the fifos during startup.
+        normal_startup_event = threading.Event()
+        threading.Thread(name='DeadlockDetector', target=deadlock_detector,
+                         args=([self._server_process, self._read_stdout_process, self._read_stderr_process], normal_startup_event)).start()
+
+        output = ''
+        line = self._server_process.read_stdout_line(deadline)
+        while not self._server_process.timed_out and not self.has_crashed() and line.rstrip() != '#READY':
+            output += line
+            line = self._server_process.read_stdout_line(deadline)
+
+        if self._server_process.timed_out and not self.has_crashed():
+            # DumpRenderTree crashes during startup, or when the deadlock detector detected
+            # deadlock and killed the fifo reading/writing processes.
+            _log.error('Failed to start DumpRenderTree: \n%s' % output)
+            return False
+        else:
+            # Inform the deadlock detector that the startup is successful without deadlock.
+            normal_startup_event.set()
+            return True
+
+    def stop(self):
+        self._run_adb_command(['shell', 'am', 'force-stop', DRT_APP_PACKAGE])
+
+        if self._read_stdout_process:
+            self._read_stdout_process.kill()
+            self._read_stdout_process = None
+
+        if self._read_stderr_process:
+            self._read_stderr_process.kill()
+            self._read_stderr_process = None
+
+        super(ChromiumAndroidDriver, self).stop()
+
+        if self._forwarder_process:
+            self._forwarder_process.kill()
+            self._forwarder_process = None
+
+        if self._has_setup:
+            if not ChromiumAndroidDriver._loop_with_timeout(self._remove_all_pipes, DRT_START_STOP_TIMEOUT_SECS):
+                raise AssertionError('Failed to remove fifo files. May be locked.')
+
+    def _command_from_driver_input(self, driver_input):
+        command = super(ChromiumAndroidDriver, self)._command_from_driver_input(driver_input)
+        if command.startswith('/'):
+            # Convert the host file path to a device file path. See comment of
+            # DEVICE_LAYOUT_TESTS_DIR for details.
+            # FIXME: what happens if command lies outside of the layout_tests_dir on the host?
+            command = DEVICE_LAYOUT_TESTS_DIR + self._port.relative_test_filename(command)
+        return command
+
+    def _read_prompt(self, deadline):
+        last_char = ''
+        while True:
+            current_char = self._server_process.read_stdout(deadline, 1)
+            if current_char == ' ':
+                if last_char in ('#', '$'):
+                    return
+            last_char = current_char
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/chromium_android_unittest.py b/Tools/Scripts/webkitpy/layout_tests/port/chromium_android_unittest.py
new file mode 100644
index 0000000..fce69c6
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/port/chromium_android_unittest.py
@@ -0,0 +1,294 @@
+# Copyright (C) 2012 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#    * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import optparse
+import StringIO
+import time
+import unittest
+import sys
+
+from webkitpy.common.system import executive_mock
+from webkitpy.common.system.executive_mock import MockExecutive2
+from webkitpy.common.system.systemhost_mock import MockSystemHost
+
+from webkitpy.layout_tests.port import chromium_android
+from webkitpy.layout_tests.port import chromium_port_testcase
+from webkitpy.layout_tests.port import driver
+from webkitpy.layout_tests.port import driver_unittest
+from webkitpy.tool.mocktool import MockOptions
+
+class MockRunCommand(object):
+    def __init__(self):
+        self._mock_logcat = ''
+        self._mock_devices_output = ''
+        self._mock_devices = []
+        self._mock_ls_tombstones = ''
+
+    def mock_run_command_fn(self, args):
+        if args[0] != 'adb':
+            return ''
+        if args[1] == 'devices':
+            return self._mock_devices_output
+
+        assert len(args) > 3
+        assert args[1] == '-s'
+        assert args[2] in self._mock_devices
+        if args[3] == 'shell':
+            if args[4:] == ['ls', '-n', '/data/tombstones']:
+                return self._mock_ls_tombstones
+            elif args[4] == 'cat':
+                return args[5] + '\nmock_contents\n'
+        elif args[3] == 'logcat':
+            return self._mock_logcat
+        return ''
+
+    def mock_no_device(self):
+        self._mock_devices = []
+        self._mock_devices_output = 'List of devices attached'
+
+    def mock_one_device(self):
+        self._mock_devices = ['123456789ABCDEF0']
+        self._mock_devices_output = ('List of devices attached\n'
+                                     '%s\tdevice\n' % self._mock_devices[0])
+
+    def mock_two_devices(self):
+        self._mock_devices = ['123456789ABCDEF0', '23456789ABCDEF01']
+        self._mock_devices_output = ('* daemon not running. starting it now on port 5037 *'
+                                     '* daemon started successfully *'
+                                     'List of devices attached\n'
+                                     '%s\tdevice\n'
+                                     '%s\tdevice\n' % (self._mock_devices[0], self._mock_devices[1]))
+
+    def mock_no_tombstone_dir(self):
+        self._mock_ls_tombstones = '/data/tombstones: No such file or directory'
+
+    def mock_no_tombstone_file(self):
+        self._mock_ls_tombstones = ''
+
+    def mock_ten_tombstones(self):
+        self._mock_ls_tombstones = ('-rw------- 1000     1000       218643 2012-04-26 18:15 tombstone_00\n'
+                                    '-rw------- 1000     1000       241695 2012-04-26 18:15 tombstone_01\n'
+                                    '-rw------- 1000     1000       219472 2012-04-26 18:16 tombstone_02\n'
+                                    '-rw------- 1000     1000        45316 2012-04-27 16:33 tombstone_03\n'
+                                    '-rw------- 1000     1000        82022 2012-04-23 16:57 tombstone_04\n'
+                                    '-rw------- 1000     1000        82015 2012-04-23 16:57 tombstone_05\n'
+                                    '-rw------- 1000     1000        81974 2012-04-23 16:58 tombstone_06\n'
+                                    '-rw------- 1000     1000       237409 2012-04-26 17:41 tombstone_07\n'
+                                    '-rw------- 1000     1000       276089 2012-04-26 18:15 tombstone_08\n'
+                                    '-rw------- 1000     1000       219618 2012-04-26 18:15 tombstone_09\n')
+
+    def mock_logcat(self, content):
+        self._mock_logcat = content
+
+
+class ChromiumAndroidPortTest(chromium_port_testcase.ChromiumPortTestCase):
+    port_name = 'chromium-android'
+    port_maker = chromium_android.ChromiumAndroidPort
+
+    def make_port(self, **kwargs):
+        port = super(ChromiumAndroidPortTest, self).make_port(**kwargs)
+        self.mock_run_command = MockRunCommand()
+        self.mock_run_command.mock_one_device()
+        port._executive = MockExecutive2(run_command_fn=self.mock_run_command.mock_run_command_fn)
+        return port
+
+    def test_attributes(self):
+        port = self.make_port()
+        self.assertEquals(port.baseline_path(), port._webkit_baseline_path('chromium-android'))
+
+    def test_default_timeout_ms(self):
+        self.assertEquals(self.make_port(options=optparse.Values({'configuration': 'Release'})).default_timeout_ms(), 10000)
+        self.assertEquals(self.make_port(options=optparse.Values({'configuration': 'Debug'})).default_timeout_ms(), 10000)
+
+    def test_expectations_files(self):
+        # FIXME: override this test temporarily while we're still upstreaming the android port and
+        # using a custom expectations file.
+        pass
+
+    def test_get_devices_no_device(self):
+        port = self.make_port()
+        self.mock_run_command.mock_no_device()
+        self.assertRaises(AssertionError, port._get_devices)
+
+    def test_get_devices_one_device(self):
+        port = self.make_port()
+        self.mock_run_command.mock_one_device()
+        self.assertEquals(self.mock_run_command._mock_devices, port._get_devices())
+        self.assertEquals(1, port.default_child_processes())
+
+    def test_get_devices_two_devices(self):
+        port = self.make_port()
+        self.mock_run_command.mock_two_devices()
+        self.assertEquals(self.mock_run_command._mock_devices, port._get_devices())
+        self.assertEquals(2, port.default_child_processes())
+
+    def test_get_device_serial_no_device(self):
+        port = self.make_port()
+        self.mock_run_command.mock_no_device()
+        self.assertRaises(AssertionError, port._get_device_serial, 0)
+
+    def test_get_device_serial_one_device(self):
+        port = self.make_port()
+        self.mock_run_command.mock_one_device()
+        self.assertEquals(self.mock_run_command._mock_devices[0], port._get_device_serial(0))
+        self.assertRaises(AssertionError, port._get_device_serial, 1)
+
+    def test_get_device_serial_two_devices(self):
+        port = self.make_port()
+        self.mock_run_command.mock_two_devices()
+        self.assertEquals(self.mock_run_command._mock_devices[0], port._get_device_serial(0))
+        self.assertEquals(self.mock_run_command._mock_devices[1], port._get_device_serial(1))
+        self.assertRaises(AssertionError, port._get_device_serial, 2)
+
+    def test_must_require_http_server(self):
+        port = self.make_port()
+        self.assertEquals(port.requires_http_server(), True)
+
+
+class ChromiumAndroidDriverTest(unittest.TestCase):
+    def setUp(self):
+        self.mock_run_command = MockRunCommand()
+        self.mock_run_command.mock_one_device()
+        self.port = chromium_android.ChromiumAndroidPort(
+                MockSystemHost(executive=MockExecutive2(run_command_fn=self.mock_run_command.mock_run_command_fn)),
+                'chromium-android')
+        self.driver = chromium_android.ChromiumAndroidDriver(self.port, worker_number=0, pixel_tests=True)
+
+    def test_get_last_stacktrace(self):
+        self.mock_run_command.mock_no_tombstone_dir()
+        self.assertEquals(self.driver._get_last_stacktrace(), '')
+
+        self.mock_run_command.mock_no_tombstone_file()
+        self.assertEquals(self.driver._get_last_stacktrace(), '')
+
+        self.mock_run_command.mock_ten_tombstones()
+        self.assertEquals(self.driver._get_last_stacktrace(),
+                          '-rw------- 1000 1000 45316 2012-04-27 16:33 tombstone_03\n'
+                          '/data/tombstones/tombstone_03\nmock_contents\n')
+
+    def test_get_crash_log(self):
+        self.mock_run_command.mock_logcat('logcat contents\n')
+        self.mock_run_command.mock_ten_tombstones()
+        self.driver._crashed_process_name = 'foo'
+        self.driver._crashed_pid = 1234
+        self.assertEquals(self.driver._get_crash_log('out bar\nout baz\n', 'err bar\nerr baz\n', newer_than=None),
+            ('err bar\n'
+             'err baz\n'
+             '********* [123456789ABCDEF0] Tombstone file:\n'
+             '-rw------- 1000 1000 45316 2012-04-27 16:33 tombstone_03\n'
+             '/data/tombstones/tombstone_03\n'
+             'mock_contents\n',
+             u'crash log for foo (pid 1234):\n'
+             u'STDOUT: out bar\n'
+             u'STDOUT: out baz\n'
+             u'STDOUT: ********* [123456789ABCDEF0] Logcat:\n'
+             u'STDOUT: logcat contents\n'
+             u'STDERR: err bar\n'
+             u'STDERR: err baz\n'
+             u'STDERR: ********* [123456789ABCDEF0] Tombstone file:\n'
+             u'STDERR: -rw------- 1000 1000 45316 2012-04-27 16:33 tombstone_03\n'
+             u'STDERR: /data/tombstones/tombstone_03\n'
+             u'STDERR: mock_contents\n'))
+
+        self.driver._crashed_process_name = None
+        self.driver._crashed_pid = None
+        self.assertEquals(self.driver._get_crash_log(None, None, newer_than=None),
+            ('********* [123456789ABCDEF0] Tombstone file:\n'
+             '-rw------- 1000 1000 45316 2012-04-27 16:33 tombstone_03\n'
+             '/data/tombstones/tombstone_03\n'
+             'mock_contents\n',
+             u'crash log for <unknown process name> (pid <unknown>):\n'
+             u'STDOUT: ********* [123456789ABCDEF0] Logcat:\n'
+             u'STDOUT: logcat contents\n'
+             u'STDERR: ********* [123456789ABCDEF0] Tombstone file:\n'
+             u'STDERR: -rw------- 1000 1000 45316 2012-04-27 16:33 tombstone_03\n'
+             u'STDERR: /data/tombstones/tombstone_03\n'
+             u'STDERR: mock_contents\n'))
+
+    def test_cmd_line(self):
+        cmd_line = self.driver.cmd_line(True, ['anything'])
+        self.assertEquals(['adb', '-s', self.mock_run_command._mock_devices[0], 'shell'], cmd_line)
+
+    def test_drt_cmd_line(self):
+        cmd_line = self.driver._drt_cmd_line(True, ['--a'])
+        self.assertTrue('--a' in cmd_line)
+        self.assertTrue('--create-stdin-fifo' in cmd_line)
+        self.assertTrue('--separate-stderr-fifo' in cmd_line)
+
+    def test_read_prompt(self):
+        self.driver._server_process = driver_unittest.MockServerProcess(lines=['root@android:/ # '])
+        self.assertEquals(self.driver._read_prompt(time.time() + 1), None)
+        self.driver._server_process = driver_unittest.MockServerProcess(lines=['$ '])
+        self.assertEquals(self.driver._read_prompt(time.time() + 1), None)
+
+    def test_command_from_driver_input(self):
+        driver_input = driver.DriverInput('foo/bar/test.html', 10, 'checksum', True)
+        expected_command = "/data/local/tmp/third_party/WebKit/LayoutTests/foo/bar/test.html'--pixel-test'checksum\n"
+        if (sys.platform != "cygwin"):
+            self.assertEquals(self.driver._command_from_driver_input(driver_input), expected_command)
+
+        driver_input = driver.DriverInput('http/tests/foo/bar/test.html', 10, 'checksum', True)
+        expected_command = "http://127.0.0.1:8000/foo/bar/test.html'--pixel-test'checksum\n"
+        self.assertEquals(self.driver._command_from_driver_input(driver_input), expected_command)
+
+
+class ChromiumAndroidDriverTwoDriversTest(unittest.TestCase):
+    def test_two_drivers(self):
+        mock_run_command = MockRunCommand()
+        mock_run_command.mock_two_devices()
+        port = chromium_android.ChromiumAndroidPort(
+                MockSystemHost(executive=MockExecutive2(run_command_fn=mock_run_command.mock_run_command_fn)),
+                'chromium-android')
+        driver0 = chromium_android.ChromiumAndroidDriver(port, worker_number=0, pixel_tests=True)
+        driver1 = chromium_android.ChromiumAndroidDriver(port, worker_number=1, pixel_tests=True)
+
+        cmd_line0 = driver0.cmd_line(True, ['anything'])
+        self.assertEquals(['adb', '-s', mock_run_command._mock_devices[0], 'shell'], cmd_line0)
+
+        cmd_line1 = driver1.cmd_line(True, ['anything'])
+        self.assertEquals(['adb', '-s', mock_run_command._mock_devices[1], 'shell'], cmd_line1)
+
+
+class ChromiumAndroidTwoPortsTest(unittest.TestCase):
+    def test_options_with_two_ports(self):
+        options = MockOptions(additional_drt_flag=['--foo=bar', '--foo=baz'])
+        mock_run_command = MockRunCommand()
+        mock_run_command.mock_two_devices()
+        port0 = chromium_android.ChromiumAndroidPort(
+                MockSystemHost(executive=MockExecutive2(run_command_fn=mock_run_command.mock_run_command_fn)),
+                'chromium-android', options=options)
+        port1 = chromium_android.ChromiumAndroidPort(
+                MockSystemHost(executive=MockExecutive2(run_command_fn=mock_run_command.mock_run_command_fn)),
+                'chromium-android', options=options)
+        cmd_line = port1.driver_cmd_line()
+        self.assertEquals(cmd_line.count('--encode-binary'), 1)
+        self.assertEquals(cmd_line.count('--enable-hardware-gpu'), 1)
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/chromium_linux.py b/Tools/Scripts/webkitpy/layout_tests/port/chromium_linux.py
new file mode 100644
index 0000000..7c37fd1
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/port/chromium_linux.py
@@ -0,0 +1,170 @@
+#!/usr/bin/env python
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import logging
+
+from webkitpy.layout_tests.port import chromium
+from webkitpy.layout_tests.port import config
+
+
+_log = logging.getLogger(__name__)
+
+
+class ChromiumLinuxPort(chromium.ChromiumPort):
+    port_name = 'chromium-linux'
+
+    SUPPORTED_ARCHITECTURES = ('x86', 'x86_64')
+
+    FALLBACK_PATHS = {
+        'x86_64': [
+            'chromium-linux',
+            'chromium-win',
+            'chromium',
+        ],
+        'x86': [
+            'chromium-linux-x86',
+            'chromium-linux',
+            'chromium-win',
+            'chromium',
+        ],
+    }
+
+    DEFAULT_BUILD_DIRECTORIES = ('sconsbuild', 'out')
+
+    @classmethod
+    def _determine_driver_path_statically(cls, host, options):
+        config_object = config.Config(host.executive, host.filesystem)
+        build_directory = getattr(options, 'build_directory', None)
+        webkit_base = config_object.path_from_webkit_base()
+        chromium_base = cls._chromium_base_dir(host.filesystem)
+        if hasattr(options, 'configuration') and options.configuration:
+            configuration = options.configuration
+        else:
+            configuration = config_object.default_configuration()
+        return cls._static_build_path(host.filesystem, build_directory, chromium_base, webkit_base, configuration, ['DumpRenderTree'])
+
+    @staticmethod
+    def _determine_architecture(filesystem, executive, driver_path):
+        file_output = ''
+        if filesystem.exists(driver_path):
+            # The --dereference flag tells file to follow symlinks
+            file_output = executive.run_command(['file', '--dereference', driver_path], return_stderr=True)
+
+        if 'ELF 32-bit LSB executable' in file_output:
+            return 'x86'
+        if 'ELF 64-bit LSB executable' in file_output:
+            return 'x86_64'
+        if file_output:
+            _log.warning('Could not determine architecture from "file" output: %s' % file_output)
+
+        # We don't know what the architecture is; default to 'x86' because
+        # maybe we're rebaselining and the binary doesn't actually exist,
+        # or something else weird is going on. It's okay to do this because
+        # if we actually try to use the binary, check_build() should fail.
+        return 'x86_64'
+
+    @classmethod
+    def determine_full_port_name(cls, host, options, port_name):
+        if port_name.endswith('-linux'):
+            return port_name + '-' + cls._determine_architecture(host.filesystem, host.executive, cls._determine_driver_path_statically(host, options))
+        return port_name
+
+    def __init__(self, host, port_name, **kwargs):
+        chromium.ChromiumPort.__init__(self, host, port_name, **kwargs)
+        (base, arch) = port_name.rsplit('-', 1)
+        assert base == 'chromium-linux'
+        assert arch in self.SUPPORTED_ARCHITECTURES
+        assert port_name in ('chromium-linux', 'chromium-linux-x86', 'chromium-linux-x86_64')
+        self._version = 'lucid'  # We only support lucid right now.
+        self._architecture = arch
+
+    def default_baseline_search_path(self):
+        port_names = self.FALLBACK_PATHS[self._architecture]
+        return map(self._webkit_baseline_path, port_names)
+
+    def _modules_to_search_for_symbols(self):
+        return [self._build_path('libffmpegsumo.so')]
+
+    def check_build(self, needs_http):
+        result = chromium.ChromiumPort.check_build(self, needs_http)
+        if not result:
+            _log.error('For complete Linux build requirements, please see:')
+            _log.error('')
+            _log.error('    http://code.google.com/p/chromium/wiki/LinuxBuildInstructions')
+        return result
+
+    def operating_system(self):
+        return 'linux'
+
+    #
+    # PROTECTED METHODS
+    #
+
+    def _check_apache_install(self):
+        result = self._check_file_exists(self._path_to_apache(), "apache2")
+        result = self._check_file_exists(self._path_to_apache_config_file(), "apache2 config file") and result
+        if not result:
+            _log.error('    Please install using: "sudo apt-get install apache2 libapache2-mod-php5"')
+            _log.error('')
+        return result
+
+    def _check_lighttpd_install(self):
+        result = self._check_file_exists(
+            self._path_to_lighttpd(), "LigHTTPd executable")
+        result = self._check_file_exists(self._path_to_lighttpd_php(), "PHP CGI executable") and result
+        result = self._check_file_exists(self._path_to_lighttpd_modules(), "LigHTTPd modules") and result
+        if not result:
+            _log.error('    Please install using: "sudo apt-get install lighttpd php5-cgi"')
+            _log.error('')
+        return result
+
+    def _wdiff_missing_message(self):
+        return 'wdiff is not installed; please install using "sudo apt-get install wdiff"'
+
+    def _path_to_apache(self):
+        if self._is_redhat_based():
+            return '/usr/sbin/httpd'
+        else:
+            return '/usr/sbin/apache2'
+
+    def _path_to_lighttpd(self):
+        return "/usr/sbin/lighttpd"
+
+    def _path_to_lighttpd_modules(self):
+        return "/usr/lib/lighttpd"
+
+    def _path_to_lighttpd_php(self):
+        return "/usr/bin/php-cgi"
+
+    def _path_to_driver(self, configuration=None):
+        binary_name = self.driver_name()
+        return self._build_path_with_configuration(configuration, binary_name)
+
+    def _path_to_helper(self):
+        return None
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/chromium_linux_unittest.py b/Tools/Scripts/webkitpy/layout_tests/port/chromium_linux_unittest.py
new file mode 100644
index 0000000..169c2f4
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/port/chromium_linux_unittest.py
@@ -0,0 +1,118 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#    * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from webkitpy.common.system import executive_mock
+from webkitpy.common.system.systemhost_mock import MockSystemHost
+from webkitpy.tool.mocktool import MockOptions
+
+from webkitpy.layout_tests.port import chromium_linux
+from webkitpy.layout_tests.port import chromium_port_testcase
+
+
+class ChromiumLinuxPortTest(chromium_port_testcase.ChromiumPortTestCase):
+    port_name = 'chromium-linux'
+    port_maker = chromium_linux.ChromiumLinuxPort
+
+    def assert_architecture(self, port_name=None, file_output=None, expected_architecture=None):
+        host = MockSystemHost()
+        host.filesystem.exists = lambda x: 'DumpRenderTree' in x
+        if file_output:
+            host.executive = executive_mock.MockExecutive2(file_output)
+
+        port = self.make_port(host, port_name=port_name)
+        self.assertEquals(port.architecture(), expected_architecture)
+        if expected_architecture == 'x86':
+            self.assertTrue(port.baseline_path().endswith('chromium-linux-x86'))
+            self.assertTrue(port.baseline_search_path()[0].endswith('chromium-linux-x86'))
+            self.assertTrue(port.baseline_search_path()[1].endswith('chromium-linux'))
+        else:
+            self.assertTrue(port.baseline_path().endswith('chromium-linux'))
+            self.assertTrue(port.baseline_search_path()[0].endswith('chromium-linux'))
+
+    def test_architectures(self):
+        self.assert_architecture(port_name='chromium-linux-x86',
+                                 expected_architecture='x86')
+        self.assert_architecture(port_name='chromium-linux-x86_64',
+                                 expected_architecture='x86_64')
+        self.assert_architecture(file_output='ELF 32-bit LSB executable',
+                                 expected_architecture='x86')
+        self.assert_architecture(file_output='ELF 64-bit LSB executable',
+                                 expected_architecture='x86_64')
+
+    def test_check_illegal_port_names(self):
+        # FIXME: Check that, for now, these are illegal port names.
+        # Eventually we should be able to do the right thing here.
+        self.assertRaises(AssertionError, chromium_linux.ChromiumLinuxPort, MockSystemHost(), port_name='chromium-x86-linux')
+
+    def test_determine_architecture_fails(self):
+        # Test that we default to 'x86' if the driver doesn't exist.
+        port = self.make_port()
+        self.assertEquals(port.architecture(), 'x86_64')
+
+        # Test that we default to 'x86' on an unknown architecture.
+        host = MockSystemHost()
+        host.filesystem.exists = lambda x: True
+        host.executive = executive_mock.MockExecutive2('win32')
+        port = self.make_port(host=host)
+        self.assertEquals(port.architecture(), 'x86_64')
+
+        # Test that we raise errors if something weird happens.
+        host.executive = executive_mock.MockExecutive2(exception=AssertionError)
+        self.assertRaises(AssertionError, chromium_linux.ChromiumLinuxPort, host, self.port_name)
+
+    def test_operating_system(self):
+        self.assertEqual('linux', self.make_port().operating_system())
+
+    def test_build_path(self):
+        # Test that optional paths are used regardless of whether they exist.
+        options = MockOptions(configuration='Release', build_directory='/foo')
+        self.assert_build_path(options, ['/mock-checkout/Source/WebKit/chromium/out/Release'], '/foo/Release')
+
+        # Test that optional relative paths are returned unmodified.
+        options = MockOptions(configuration='Release', build_directory='foo')
+        self.assert_build_path(options, ['/mock-checkout/Source/WebKit/chromium/out/Release'], 'foo/Release')
+
+        # Test that we look in a chromium directory before the webkit directory.
+        options = MockOptions(configuration='Release', build_directory=None)
+        self.assert_build_path(options, ['/mock-checkout/Source/WebKit/chromium/out/Release', '/mock-checkout/out/Release'], '/mock-checkout/Source/WebKit/chromium/out/Release')
+
+        # Test that we prefer the legacy dir over the new dir.
+        options = MockOptions(configuration='Release', build_directory=None)
+        self.assert_build_path(options, ['/mock-checkout/Source/WebKit/chromium/sconsbuild/Release', '/mock-checkout/Source/WebKit/chromium/out/Release'], '/mock-checkout/Source/WebKit/chromium/sconsbuild/Release')
+
+    def test_driver_name_option(self):
+        self.assertTrue(self.make_port()._path_to_driver().endswith('DumpRenderTree'))
+        self.assertTrue(self.make_port(options=MockOptions(driver_name='OtherDriver'))._path_to_driver().endswith('OtherDriver'))
+
+    def test_path_to_image_diff(self):
+        self.assertEquals(self.make_port()._path_to_image_diff(), '/mock-checkout/out/Release/ImageDiff')
+
+if __name__ == '__main__':
+    port_testcase.main()
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/chromium_mac.py b/Tools/Scripts/webkitpy/layout_tests/port/chromium_mac.py
new file mode 100644
index 0000000..21b7e31
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/port/chromium_mac.py
@@ -0,0 +1,137 @@
+#!/usr/bin/env python
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Chromium Mac implementation of the Port interface."""
+
+import logging
+import signal
+
+from webkitpy.layout_tests.port import chromium
+
+
+_log = logging.getLogger(__name__)
+
+
+class ChromiumMacPort(chromium.ChromiumPort):
+    SUPPORTED_OS_VERSIONS = ('snowleopard', 'lion', 'mountainlion', 'future')
+    port_name = 'chromium-mac'
+
+    FALLBACK_PATHS = {
+        'snowleopard': [
+            'chromium-mac-snowleopard',
+            'chromium-mac-lion',
+            'chromium-mac',
+            'chromium',
+        ],
+        'lion': [
+            'chromium-mac-lion',
+            'chromium-mac',
+            'chromium',
+        ],
+        'mountainlion': [
+            'chromium-mac',
+            'chromium',
+        ],
+        'future': [
+            'chromium-mac',
+            'chromium',
+        ],
+    }
+
+    DEFAULT_BUILD_DIRECTORIES = ('xcodebuild', 'out')
+
+    @classmethod
+    def determine_full_port_name(cls, host, options, port_name):
+        if port_name.endswith('-mac'):
+            return port_name + '-' + host.platform.os_version
+        return port_name
+
+    def __init__(self, host, port_name, **kwargs):
+        chromium.ChromiumPort.__init__(self, host, port_name, **kwargs)
+        self._version = port_name[port_name.index('chromium-mac-') + len('chromium-mac-'):]
+        assert self._version in self.SUPPORTED_OS_VERSIONS
+
+    def _modules_to_search_for_symbols(self):
+        return [self._build_path('ffmpegsumo.so')]
+
+    def check_build(self, needs_http):
+        result = chromium.ChromiumPort.check_build(self, needs_http)
+        if not result:
+            _log.error('For complete Mac build requirements, please see:')
+            _log.error('')
+            _log.error('    http://code.google.com/p/chromium/wiki/MacBuildInstructions')
+
+        return result
+
+    def operating_system(self):
+        return 'mac'
+
+    def expectations_files(self):
+        # FIXME: This is a temporary hack while getting the 10.8 baselines up to date.
+        # See https://bugs.webkit.org/show_bug.cgi?id=99505
+        files = super(ChromiumMacPort, self).expectations_files()
+        if self.name() == 'chromium-mac-mountainlion':
+            files.append(self._filesystem.join(self._webkit_baseline_path(self.name()), 'TestExpectations'))
+        return files
+
+    #
+    # PROTECTED METHODS
+    #
+
+    def _lighttpd_path(self, *comps):
+        return self.path_from_chromium_base('third_party', 'lighttpd', 'mac', *comps)
+
+    def _wdiff_missing_message(self):
+        return 'wdiff is not installed; please install from MacPorts or elsewhere'
+
+    def _path_to_apache(self):
+        return '/usr/sbin/httpd'
+
+    def _path_to_apache_config_file(self):
+        return self._filesystem.join(self.layout_tests_dir(), 'http', 'conf', 'apache2-httpd.conf')
+
+    def _path_to_lighttpd(self):
+        return self._lighttpd_path('bin', 'lighttpd')
+
+    def _path_to_lighttpd_modules(self):
+        return self._lighttpd_path('lib')
+
+    def _path_to_lighttpd_php(self):
+        return self._lighttpd_path('bin', 'php-cgi')
+
+    def _path_to_driver(self, configuration=None):
+        # FIXME: make |configuration| happy with case-sensitive file systems.
+        return self._build_path_with_configuration(configuration, self.driver_name() + '.app', 'Contents', 'MacOS', self.driver_name())
+
+    def _path_to_helper(self):
+        binary_name = 'LayoutTestHelper'
+        return self._build_path(binary_name)
+
+    def _path_to_wdiff(self):
+        return 'wdiff'
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/chromium_mac_unittest.py b/Tools/Scripts/webkitpy/layout_tests/port/chromium_mac_unittest.py
new file mode 100644
index 0000000..b02af5c
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/port/chromium_mac_unittest.py
@@ -0,0 +1,110 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#    * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from webkitpy.layout_tests.port import chromium_mac
+from webkitpy.layout_tests.port import chromium_port_testcase
+from webkitpy.tool.mocktool import MockOptions
+
+
+class ChromiumMacPortTest(chromium_port_testcase.ChromiumPortTestCase):
+    os_name = 'mac'
+    os_version = 'snowleopard'
+    port_name = 'chromium-mac'
+    port_maker = chromium_mac.ChromiumMacPort
+
+    def assert_name(self, port_name, os_version_string, expected):
+        port = self.make_port(os_version=os_version_string, port_name=port_name)
+        self.assertEquals(expected, port.name())
+
+    def test_versions(self):
+        self.assertTrue(self.make_port().name() in ('chromium-mac-snowleopard', 'chromium-mac-lion', 'chromium-mac-mountainlion', 'chromium-mac-future'))
+
+        self.assert_name(None, 'snowleopard', 'chromium-mac-snowleopard')
+        self.assert_name('chromium-mac', 'snowleopard', 'chromium-mac-snowleopard')
+        self.assert_name('chromium-mac-snowleopard', 'leopard', 'chromium-mac-snowleopard')
+        self.assert_name('chromium-mac-snowleopard', 'snowleopard', 'chromium-mac-snowleopard')
+
+        self.assert_name(None, 'lion', 'chromium-mac-lion')
+        self.assert_name(None, 'mountainlion', 'chromium-mac-mountainlion')
+        self.assert_name(None, 'future', 'chromium-mac-future')
+
+        self.assert_name('chromium-mac', 'lion', 'chromium-mac-lion')
+        self.assert_name('chromium-mac-future', 'snowleopard', 'chromium-mac-future')
+        self.assert_name('chromium-mac-future', 'lion', 'chromium-mac-future')
+        self.assert_name('chromium-mac-future', 'mountainlion', 'chromium-mac-future')
+
+        self.assertRaises(AssertionError, self.assert_name, None, 'tiger', 'should-raise-assertion-so-this-value-does-not-matter')
+
+    def test_baseline_path(self):
+        port = self.make_port(port_name='chromium-mac-snowleopard')
+        self.assertEquals(port.baseline_path(), port._webkit_baseline_path('chromium-mac-snowleopard'))
+
+        port = self.make_port(port_name='chromium-mac-lion')
+        self.assertEquals(port.baseline_path(), port._webkit_baseline_path('chromium-mac-lion'))
+
+        port = self.make_port(port_name='chromium-mac-mountainlion')
+        self.assertEquals(port.baseline_path(), port._webkit_baseline_path('chromium-mac'))
+
+        port = self.make_port(port_name='chromium-mac-future')
+        self.assertEquals(port.baseline_path(), port._webkit_baseline_path('chromium-mac'))
+
+    def test_operating_system(self):
+        self.assertEqual('mac', self.make_port().operating_system())
+
+    def test_build_path(self):
+        # Test that optional paths are used regardless of whether they exist.
+        options = MockOptions(configuration='Release', build_directory='/foo')
+        self.assert_build_path(options, ['/mock-checkout/Source/WebKit/chromium/out/Release'], '/foo/Release')
+
+        # Test that optional relative paths are returned unmodified.
+        options = MockOptions(configuration='Release', build_directory='foo')
+        self.assert_build_path(options, ['/mock-checkout/Source/WebKit/chromium/out/Release'], 'foo/Release')
+
+        # Test that we look in a chromium directory before the webkit directory.
+        options = MockOptions(configuration='Release', build_directory=None)
+        self.assert_build_path(options, ['/mock-checkout/Source/WebKit/chromium/out/Release', '/mock-checkout/out/Release'], '/mock-checkout/Source/WebKit/chromium/out/Release')
+
+        # Test that we prefer the legacy dir over the new dir.
+        options = MockOptions(configuration='Release', build_directory=None)
+        self.assert_build_path(options, ['/mock-checkout/Source/WebKit/chromium/xcodebuild/Release', '/mock-checkout/Source/WebKit/chromium/out/Release'], '/mock-checkout/Source/WebKit/chromium/xcodebuild/Release')
+
+    def test_driver_name_option(self):
+        self.assertTrue(self.make_port()._path_to_driver().endswith('DumpRenderTree'))
+        self.assertTrue(self.make_port(options=MockOptions(driver_name='OtherDriver'))._path_to_driver().endswith('OtherDriver'))
+
+    def test_path_to_image_diff(self):
+        self.assertEquals(self.make_port()._path_to_image_diff(), '/mock-checkout/out/Release/ImageDiff')
+
+    def test_ml_expectations(self):
+        self.assertTrue(self.make_port(port_name='chromium-mac-mountainlion').expectations_files()[-1].endswith('-mountainlion/TestExpectations'))
+        self.assertFalse(self.make_port(port_name='chromium-mac-lion').expectations_files()[-1].endswith('-mountainlion/TestExpectations'))
+
+if __name__ == '__main__':
+    port_testcase.main()
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/chromium_port_testcase.py b/Tools/Scripts/webkitpy/layout_tests/port/chromium_port_testcase.py
new file mode 100644
index 0000000..a082a13
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/port/chromium_port_testcase.py
@@ -0,0 +1,223 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#    * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from webkitpy.common.system import logtesting
+from webkitpy.common.system.executive_mock import MockExecutive2
+from webkitpy.common.system.systemhost_mock import MockSystemHost
+from webkitpy.tool.mocktool import MockOptions
+
+import chromium_android
+import chromium_linux
+import chromium_mac
+import chromium_win
+
+from webkitpy.layout_tests.models.test_configuration import TestConfiguration
+from webkitpy.layout_tests.port import port_testcase
+
+
+class ChromiumPortTestCase(port_testcase.PortTestCase):
+
+    def test_check_build(self):
+        port = self.make_port()
+        port.check_build(needs_http=True)
+
+    def test_default_max_locked_shards(self):
+        port = self.make_port()
+        port.default_child_processes = lambda: 16
+        self.assertEquals(port.default_max_locked_shards(), 4)
+        port.default_child_processes = lambda: 2
+        self.assertEquals(port.default_max_locked_shards(), 1)
+
+    def test_default_timeout_ms(self):
+        self.assertEquals(self.make_port(options=MockOptions(configuration='Release')).default_timeout_ms(), 6000)
+        self.assertEquals(self.make_port(options=MockOptions(configuration='Debug')).default_timeout_ms(), 12000)
+
+    def test_default_pixel_tests(self):
+        self.assertEquals(self.make_port().default_pixel_tests(), True)
+
+    def test_missing_symbol_to_skipped_tests(self):
+        # Test that we get the chromium skips and not the webkit default skips
+        port = self.make_port()
+        skip_dict = port._missing_symbol_to_skipped_tests()
+        self.assertTrue('ff_mp3_decoder' in skip_dict)
+        self.assertFalse('WebGLShader' in skip_dict)
+
+    def test_all_test_configurations(self):
+        """Validate the complete set of configurations this port knows about."""
+        port = self.make_port()
+        self.assertEquals(set(port.all_test_configurations()), set([
+            TestConfiguration('icecreamsandwich', 'x86', 'debug'),
+            TestConfiguration('icecreamsandwich', 'x86', 'release'),
+            TestConfiguration('snowleopard', 'x86', 'debug'),
+            TestConfiguration('snowleopard', 'x86', 'release'),
+            TestConfiguration('lion', 'x86', 'debug'),
+            TestConfiguration('lion', 'x86', 'release'),
+            TestConfiguration('mountainlion', 'x86', 'debug'),
+            TestConfiguration('mountainlion', 'x86', 'release'),
+            TestConfiguration('xp', 'x86', 'debug'),
+            TestConfiguration('xp', 'x86', 'release'),
+            TestConfiguration('win7', 'x86', 'debug'),
+            TestConfiguration('win7', 'x86', 'release'),
+            TestConfiguration('lucid', 'x86', 'debug'),
+            TestConfiguration('lucid', 'x86', 'release'),
+            TestConfiguration('lucid', 'x86_64', 'debug'),
+            TestConfiguration('lucid', 'x86_64', 'release'),
+        ]))
+
+    class TestMacPort(chromium_mac.ChromiumMacPort):
+        def __init__(self, options=None):
+            options = options or MockOptions()
+            chromium_mac.ChromiumMacPort.__init__(self, MockSystemHost(os_name='mac', os_version='leopard'), 'chromium-mac-leopard', options=options)
+
+        def default_configuration(self):
+            self.default_configuration_called = True
+            return 'default'
+
+    class TestAndroidPort(chromium_android.ChromiumAndroidPort):
+        def __init__(self, options=None):
+            options = options or MockOptions()
+            chromium_win.ChromiumAndroidPort.__init__(self, MockSystemHost(os_name='android', os_version='icecreamsandwich'), 'chromium-android', options=options)
+
+        def default_configuration(self):
+            self.default_configuration_called = True
+            return 'default'
+
+    class TestLinuxPort(chromium_linux.ChromiumLinuxPort):
+        def __init__(self, options=None):
+            options = options or MockOptions()
+            chromium_linux.ChromiumLinuxPort.__init__(self, MockSystemHost(os_name='linux', os_version='lucid'), 'chromium-linux-x86', options=options)
+
+        def default_configuration(self):
+            self.default_configuration_called = True
+            return 'default'
+
+    class TestWinPort(chromium_win.ChromiumWinPort):
+        def __init__(self, options=None):
+            options = options or MockOptions()
+            chromium_win.ChromiumWinPort.__init__(self, MockSystemHost(os_name='win', os_version='xp'), 'chromium-win-xp', options=options)
+
+        def default_configuration(self):
+            self.default_configuration_called = True
+            return 'default'
+
+    def test_default_configuration(self):
+        mock_options = MockOptions()
+        port = ChromiumPortTestCase.TestLinuxPort(options=mock_options)
+        self.assertEquals(mock_options.configuration, 'default')
+        self.assertTrue(port.default_configuration_called)
+
+        mock_options = MockOptions(configuration=None)
+        port = ChromiumPortTestCase.TestLinuxPort(mock_options)
+        self.assertEquals(mock_options.configuration, 'default')
+        self.assertTrue(port.default_configuration_called)
+
+    def test_diff_image(self):
+        class TestPort(ChromiumPortTestCase.TestLinuxPort):
+            def _path_to_image_diff(self):
+                return "/path/to/image_diff"
+
+        port = ChromiumPortTestCase.TestLinuxPort()
+        mock_image_diff = "MOCK Image Diff"
+
+        def mock_run_command(args):
+            port._filesystem.write_binary_file(args[4], mock_image_diff)
+            return 1
+
+        # Images are different.
+        port._executive = MockExecutive2(run_command_fn=mock_run_command)
+        self.assertEquals(mock_image_diff, port.diff_image("EXPECTED", "ACTUAL")[0])
+
+        # Images are the same.
+        port._executive = MockExecutive2(exit_code=0)
+        self.assertEquals(None, port.diff_image("EXPECTED", "ACTUAL")[0])
+
+        # There was some error running image_diff.
+        port._executive = MockExecutive2(exit_code=2)
+        exception_raised = False
+        try:
+            port.diff_image("EXPECTED", "ACTUAL")
+        except ValueError, e:
+            exception_raised = True
+        self.assertFalse(exception_raised)
+
+    def test_diff_image_crashed(self):
+        port = ChromiumPortTestCase.TestLinuxPort()
+        port._executive = MockExecutive2(exit_code=2)
+        self.assertEquals(port.diff_image("EXPECTED", "ACTUAL"), (None, 0, 'image diff returned an exit code of 2'))
+
+    def test_expectations_files(self):
+        port = self.make_port()
+        port.port_name = 'chromium'
+
+        expectations_path = port.path_to_test_expectations_file()
+        chromium_overrides_path = port.path_from_chromium_base(
+            'webkit', 'tools', 'layout_tests', 'test_expectations.txt')
+        skia_overrides_path = port.path_from_chromium_base(
+            'skia', 'skia_test_expectations.txt')
+
+        port._filesystem.write_text_file(skia_overrides_path, 'dummay text')
+
+        port._options.builder_name = 'DUMMY_BUILDER_NAME'
+        self.assertEquals(port.expectations_files(), [expectations_path, skia_overrides_path, chromium_overrides_path])
+
+        port._options.builder_name = 'builder (deps)'
+        self.assertEquals(port.expectations_files(), [expectations_path, skia_overrides_path, chromium_overrides_path])
+
+        # A builder which does NOT observe the Chromium test_expectations,
+        # but still observes the Skia test_expectations...
+        port._options.builder_name = 'builder'
+        self.assertEquals(port.expectations_files(), [expectations_path, skia_overrides_path])
+
+    def test_expectations_ordering(self):
+        # since we don't implement self.port_name in ChromiumPort.
+        pass
+
+
+class ChromiumPortLoggingTest(logtesting.LoggingTestCase):
+    def test_check_sys_deps(self):
+        port = ChromiumPortTestCase.TestLinuxPort()
+
+        # Success
+        port._executive = MockExecutive2(exit_code=0)
+        self.assertTrue(port.check_sys_deps(needs_http=False))
+
+        # Failure
+        port._executive = MockExecutive2(exit_code=1,
+            output='testing output failure')
+        self.assertFalse(port.check_sys_deps(needs_http=False))
+        self.assertLog([
+            'ERROR: System dependencies check failed.\n',
+            'ERROR: To override, invoke with --nocheck-sys-deps\n',
+            'ERROR: \n',
+            'ERROR: testing output failure\n'])
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/chromium_unittest.py b/Tools/Scripts/webkitpy/layout_tests/port/chromium_unittest.py
new file mode 100644
index 0000000..87de41c
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/port/chromium_unittest.py
@@ -0,0 +1,69 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#    * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import StringIO
+import time
+import unittest
+
+from webkitpy.common.system import logtesting
+from webkitpy.common.system.executive_mock import MockExecutive2
+from webkitpy.common.system.systemhost_mock import MockSystemHost
+from webkitpy.layout_tests.port.config_mock import MockConfig
+from webkitpy.thirdparty.mock import Mock
+from webkitpy.tool.mocktool import MockOptions
+
+import chromium
+import chromium_mac
+
+from webkitpy.layout_tests.port import chromium_port_testcase
+from webkitpy.layout_tests.port.driver import DriverInput
+
+
+class ChromiumPortLoggingTest(logtesting.LoggingTestCase):
+
+    # FIXME: put this someplace more useful
+    def test_check_sys_deps(self):
+        port = chromium_port_testcase.ChromiumPortTestCase.TestLinuxPort()
+
+        # Success
+        port._executive = MockExecutive2(exit_code=0)
+        self.assertTrue(port.check_sys_deps(needs_http=False))
+
+        # Failure
+        port._executive = MockExecutive2(exit_code=1,
+            output='testing output failure')
+        self.assertFalse(port.check_sys_deps(needs_http=False))
+        self.assertLog([
+            'ERROR: System dependencies check failed.\n',
+            'ERROR: To override, invoke with --nocheck-sys-deps\n',
+            'ERROR: \n',
+            'ERROR: testing output failure\n'])
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/chromium_win.py b/Tools/Scripts/webkitpy/layout_tests/port/chromium_win.py
new file mode 100755
index 0000000..3266c39
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/port/chromium_win.py
@@ -0,0 +1,157 @@
+#!/usr/bin/env python
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Chromium Win implementation of the Port interface."""
+
+import os
+import logging
+
+import chromium
+
+
+_log = logging.getLogger(__name__)
+
+
+class ChromiumWinPort(chromium.ChromiumPort):
+    port_name = 'chromium-win'
+
+    # FIXME: Figure out how to unify this with base.TestConfiguration.all_systems()?
+    SUPPORTED_VERSIONS = ('xp', 'win7')
+
+    FALLBACK_PATHS = {
+        'xp': [
+            'chromium-win-xp',
+            'chromium-win',
+            'chromium',
+        ],
+        'win7': [
+            'chromium-win',
+            'chromium',
+        ],
+    }
+
+    DEFAULT_BUILD_DIRECTORIES = ('build', 'out')
+
+    @classmethod
+    def determine_full_port_name(cls, host, options, port_name):
+        if port_name.endswith('-win'):
+            assert host.platform.is_win()
+            # We don't maintain separate baselines for vista, so we pretend it is win7.
+            if host.platform.os_version in ('vista', '7sp0', '7sp1', 'future'):
+                version = 'win7'
+            else:
+                version = host.platform.os_version
+            port_name = port_name + '-' + version
+        return port_name
+
+    def __init__(self, host, port_name, **kwargs):
+        chromium.ChromiumPort.__init__(self, host, port_name, **kwargs)
+        self._version = port_name[port_name.index('chromium-win-') + len('chromium-win-'):]
+        assert self._version in self.SUPPORTED_VERSIONS, "%s is not in %s" % (self._version, self.SUPPORTED_VERSIONS)
+
+    def setup_environ_for_server(self, server_name=None):
+        env = chromium.ChromiumPort.setup_environ_for_server(self, server_name)
+
+        # FIXME: lighttpd depends on some environment variable we're not whitelisting.
+        # We should add the variable to an explicit whitelist in base.Port.
+        # FIXME: This is a temporary hack to get the cr-win bot online until
+        # someone from the cr-win port can take a look.
+        for key, value in os.environ.items():
+            if key not in env:
+                env[key] = value
+
+        # Put the cygwin directory first in the path to find cygwin1.dll.
+        env["PATH"] = "%s;%s" % (self.path_from_chromium_base("third_party", "cygwin", "bin"), env["PATH"])
+        # Configure the cygwin directory so that pywebsocket finds proper
+        # python executable to run cgi program.
+        env["CYGWIN_PATH"] = self.path_from_chromium_base("third_party", "cygwin", "bin")
+        if self.get_option('register_cygwin'):
+            setup_mount = self.path_from_chromium_base("third_party", "cygwin", "setup_mount.bat")
+            self._executive.run_command([setup_mount])  # Paths are all absolute, so this does not require a cwd.
+        return env
+
+    def _modules_to_search_for_symbols(self):
+        # FIXME: we should return the path to the ffmpeg equivalents to detect if we have the mp3 and aac codecs installed.
+        # See https://bugs.webkit.org/show_bug.cgi?id=89706.
+        return []
+
+    def check_build(self, needs_http):
+        result = chromium.ChromiumPort.check_build(self, needs_http)
+        if not result:
+            _log.error('For complete Windows build requirements, please see:')
+            _log.error('')
+            _log.error('    http://dev.chromium.org/developers/how-tos/build-instructions-windows')
+        return result
+
+    def operating_system(self):
+        return 'win'
+
+    def relative_test_filename(self, filename):
+        path = filename[len(self.layout_tests_dir()) + 1:]
+        return path.replace('\\', '/')
+
+    #
+    # PROTECTED ROUTINES
+    #
+
+    def _uses_apache(self):
+        return False
+
+    def _lighttpd_path(self, *comps):
+        return self.path_from_chromium_base('third_party', 'lighttpd', 'win', *comps)
+
+    def _path_to_apache(self):
+        return self.path_from_chromium_base('third_party', 'cygwin', 'usr', 'sbin', 'httpd')
+
+    def _path_to_apache_config_file(self):
+        return self._filesystem.join(self.layout_tests_dir(), 'http', 'conf', 'cygwin-httpd.conf')
+
+    def _path_to_lighttpd(self):
+        return self._lighttpd_path('LightTPD.exe')
+
+    def _path_to_lighttpd_modules(self):
+        return self._lighttpd_path('lib')
+
+    def _path_to_lighttpd_php(self):
+        return self._lighttpd_path('php5', 'php-cgi.exe')
+
+    def _path_to_driver(self, configuration=None):
+        binary_name = '%s.exe' % self.driver_name()
+        return self._build_path_with_configuration(configuration, binary_name)
+
+    def _path_to_helper(self):
+        binary_name = 'LayoutTestHelper.exe'
+        return self._build_path(binary_name)
+
+    def _path_to_image_diff(self):
+        binary_name = 'ImageDiff.exe'
+        return self._build_path(binary_name)
+
+    def _path_to_wdiff(self):
+        return self.path_from_chromium_base('third_party', 'cygwin', 'bin', 'wdiff.exe')
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/chromium_win_unittest.py b/Tools/Scripts/webkitpy/layout_tests/port/chromium_win_unittest.py
new file mode 100644
index 0000000..dc184fc
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/port/chromium_win_unittest.py
@@ -0,0 +1,125 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#    * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import os
+import unittest
+
+from webkitpy.common.system import outputcapture
+from webkitpy.common.system.executive_mock import MockExecutive
+from webkitpy.common.system.filesystem_mock import MockFileSystem
+from webkitpy.layout_tests.port import chromium_port_testcase
+from webkitpy.layout_tests.port import chromium_win
+from webkitpy.tool.mocktool import MockOptions
+
+
+class ChromiumWinTest(chromium_port_testcase.ChromiumPortTestCase):
+    port_name = 'chromium-win'
+    port_maker = chromium_win.ChromiumWinPort
+    os_name = 'win'
+    os_version = 'xp'
+
+    def test_uses_apache(self):
+        self.assertFalse(self.make_port()._uses_apache())
+
+    def test_setup_environ_for_server(self):
+        port = self.make_port()
+        port._executive = MockExecutive(should_log=True)
+        output = outputcapture.OutputCapture()
+        # FIXME: This test should not use the real os.environ
+        orig_environ = os.environ.copy()
+        env = output.assert_outputs(self, port.setup_environ_for_server)
+        self.assertEqual(orig_environ["PATH"], os.environ["PATH"])
+        self.assertNotEqual(env["PATH"], os.environ["PATH"])
+
+    def test_setup_environ_for_server_cygpath(self):
+        port = self.make_port()
+        env = port.setup_environ_for_server(port.driver_name())
+        self.assertEquals(env['CYGWIN_PATH'], '/mock-checkout/Source/WebKit/chromium/third_party/cygwin/bin')
+
+    def test_setup_environ_for_server_register_cygwin(self):
+        port = self.make_port(options=MockOptions(register_cygwin=True, results_directory='/'))
+        port._executive = MockExecutive(should_log=True)
+        expected_stderr = "MOCK run_command: ['/mock-checkout/Source/WebKit/chromium/third_party/cygwin/setup_mount.bat'], cwd=None\n"
+        output = outputcapture.OutputCapture()
+        output.assert_outputs(self, port.setup_environ_for_server, expected_stderr=expected_stderr)
+
+    def assert_name(self, port_name, os_version_string, expected):
+        port = self.make_port(port_name=port_name, os_version=os_version_string)
+        self.assertEquals(expected, port.name())
+
+    def test_versions(self):
+        port = self.make_port()
+        self.assertTrue(port.name() in ('chromium-win-xp', 'chromium-win-win7'))
+
+        self.assert_name(None, 'xp', 'chromium-win-xp')
+        self.assert_name('chromium-win', 'xp', 'chromium-win-xp')
+        self.assert_name('chromium-win-xp', 'xp', 'chromium-win-xp')
+        self.assert_name('chromium-win-xp', '7sp0', 'chromium-win-xp')
+
+        self.assert_name(None, '7sp0', 'chromium-win-win7')
+        self.assert_name(None, 'vista', 'chromium-win-win7')
+        self.assert_name('chromium-win', '7sp0', 'chromium-win-win7')
+        self.assert_name('chromium-win-win7', 'xp', 'chromium-win-win7')
+        self.assert_name('chromium-win-win7', '7sp0', 'chromium-win-win7')
+        self.assert_name('chromium-win-win7', 'vista', 'chromium-win-win7')
+
+        self.assertRaises(AssertionError, self.assert_name, None, 'w2k', 'chromium-win-xp')
+
+    def test_baseline_path(self):
+        port = self.make_port(port_name='chromium-win-xp')
+        self.assertEquals(port.baseline_path(), port._webkit_baseline_path('chromium-win-xp'))
+
+        port = self.make_port(port_name='chromium-win-win7')
+        self.assertEquals(port.baseline_path(), port._webkit_baseline_path('chromium-win'))
+
+    def test_build_path(self):
+        # Test that optional paths are used regardless of whether they exist.
+        options = MockOptions(configuration='Release', build_directory='/foo')
+        self.assert_build_path(options, ['/mock-checkout/Source/WebKit/chromium/out/Release'], '/foo/Release')
+
+        # Test that optional relative paths are returned unmodified.
+        options = MockOptions(configuration='Release', build_directory='foo')
+        self.assert_build_path(options, ['/mock-checkout/Source/WebKit/chromium/out/Release'], 'foo/Release')
+
+        # Test that we look in a chromium directory before the webkit directory.
+        options = MockOptions(configuration='Release', build_directory=None)
+        self.assert_build_path(options, ['/mock-checkout/Source/WebKit/chromium/out/Release', '/mock-checkout/out/Release'], '/mock-checkout/Source/WebKit/chromium/out/Release')
+
+        # Test that we prefer the legacy dir over the new dir.
+        options = MockOptions(configuration='Release', build_directory=None)
+        self.assert_build_path(options, ['/mock-checkout/Source/WebKit/chromium/build/Release', '/mock-checkout/Source/WebKit/chromium/out'], '/mock-checkout/Source/WebKit/chromium/build/Release')
+
+    def test_operating_system(self):
+        self.assertEqual('win', self.make_port().operating_system())
+
+    def test_driver_name_option(self):
+        self.assertTrue(self.make_port()._path_to_driver().endswith('DumpRenderTree.exe'))
+        self.assertTrue(self.make_port(options=MockOptions(driver_name='OtherDriver'))._path_to_driver().endswith('OtherDriver.exe'))
+
+    def test_path_to_image_diff(self):
+        self.assertEquals(self.make_port()._path_to_image_diff(), '/mock-checkout/out/Release/ImageDiff.exe')
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/config.py b/Tools/Scripts/webkitpy/layout_tests/port/config.py
new file mode 100644
index 0000000..dd8f331
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/port/config.py
@@ -0,0 +1,168 @@
+#!/usr/bin/env python
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Wrapper objects for WebKit-specific utility routines."""
+
+# FIXME: This file needs to be unified with common/checkout/scm.py and
+# common/config/ports.py .
+
+from webkitpy.common.system import logutils
+from webkitpy.common.system import executive
+
+
+_log = logutils.get_logger(__file__)
+
+#
+# FIXME: This is used to record if we've already hit the filesystem to look
+# for a default configuration. We cache this to speed up the unit tests,
+# but this can be reset with clear_cached_configuration(). This should be
+# replaced with us consistently using MockConfigs() for tests that don't
+# hit the filesystem at all and provide a reliable value.
+#
+_have_determined_configuration = False
+_configuration = "Release"
+
+
+def clear_cached_configuration():
+    global _have_determined_configuration, _configuration
+    _have_determined_configuration = False
+    _configuration = "Release"
+
+
+class Config(object):
+    _FLAGS_FROM_CONFIGURATIONS = {
+        "Debug": "--debug",
+        "Release": "--release",
+    }
+
+    def __init__(self, executive, filesystem, port_implementation=None):
+        self._executive = executive
+        self._filesystem = filesystem
+        self._webkit_base_dir = None
+        self._default_configuration = None
+        self._build_directories = {}
+        self._port_implementation = port_implementation
+
+    def build_directory(self, configuration):
+        """Returns the path to the build directory for the configuration."""
+        if configuration:
+            flags = ["--configuration", self.flag_for_configuration(configuration)]
+        else:
+            configuration = ""
+            flags = []
+
+        if self._port_implementation:
+            flags.append('--' + self._port_implementation)
+
+        if not self._build_directories.get(configuration):
+            args = ["perl", self.script_path("webkit-build-directory")] + flags
+            output = self._executive.run_command(args, cwd=self.webkit_base_dir(), return_stderr=False).rstrip()
+            parts = output.split("\n")
+            self._build_directories[configuration] = parts[0]
+
+            if len(parts) == 2:
+                default_configuration = parts[1][len(parts[0]):]
+                if default_configuration.startswith("/"):
+                    default_configuration = default_configuration[1:]
+                self._build_directories[default_configuration] = parts[1]
+
+        return self._build_directories[configuration]
+
+    def flag_for_configuration(self, configuration):
+        return self._FLAGS_FROM_CONFIGURATIONS[configuration]
+
+    def default_configuration(self):
+        """Returns the default configuration for the user.
+
+        Returns the value set by 'set-webkit-configuration', or "Release"
+        if that has not been set. This mirrors the logic in webkitdirs.pm."""
+        if not self._default_configuration:
+            self._default_configuration = self._determine_configuration()
+        if not self._default_configuration:
+            self._default_configuration = 'Release'
+        if self._default_configuration not in self._FLAGS_FROM_CONFIGURATIONS:
+            _log.warn("Configuration \"%s\" is not a recognized value.\n" % self._default_configuration)
+            _log.warn("Scripts may fail.  See 'set-webkit-configuration --help'.")
+        return self._default_configuration
+
+    def path_from_webkit_base(self, *comps):
+        return self._filesystem.join(self.webkit_base_dir(), *comps)
+
+    # FIXME: We should only have one implementation of this logic,
+    # if scm.find_checkout_root() is broken for Chromium, we should fix (or at least wrap) it!
+    def webkit_base_dir(self):
+        """Returns the absolute path to the top of the WebKit tree.
+
+        Raises an AssertionError if the top dir can't be determined."""
+        # Note: this code somewhat duplicates the code in
+        # scm.find_checkout_root(). However, that code only works if the top
+        # of the SCM repository also matches the top of the WebKit tree. The
+        # Chromium ports, for example, only check out subdirectories like
+        # Tools/Scripts, and so we still have to do additional work
+        # to find the top of the tree.
+        #
+        # This code will also work if there is no SCM system at all.
+        if not self._webkit_base_dir:
+            config_module_path = self._filesystem.path_to_module(self.__module__)
+            self._webkit_base_dir = config_module_path[0:config_module_path.find('Tools') - 1]
+        return self._webkit_base_dir
+
+    def script_path(self, script_name):
+        # This is intentionally relative. Callers should pass the checkout_root/webkit_base_dir to run_command as the cwd.
+        return self._filesystem.join("Tools", "Scripts", script_name)
+
+    def _determine_configuration(self):
+        # This mirrors the logic in webkitdirs.pm:determineConfiguration().
+        #
+        # FIXME: See the comment at the top of the file regarding unit tests
+        # and our use of global mutable static variables.
+        # FIXME: We should just @memoize this method and then this will only
+        # be read once per object lifetime (which should be sufficiently fast).
+        global _have_determined_configuration, _configuration
+        if not _have_determined_configuration:
+            contents = self._read_configuration()
+            if not contents:
+                contents = "Release"
+            if contents == "Deployment":
+                contents = "Release"
+            if contents == "Development":
+                contents = "Debug"
+            _configuration = contents
+            _have_determined_configuration = True
+        return _configuration
+
+    def _read_configuration(self):
+        try:
+            configuration_path = self._filesystem.join(self.build_directory(None), "Configuration")
+            if not self._filesystem.exists(configuration_path):
+                return None
+        except (OSError, executive.ScriptError):
+            return None
+
+        return self._filesystem.read_text_file(configuration_path).rstrip()
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/config_mock.py b/Tools/Scripts/webkitpy/layout_tests/port/config_mock.py
new file mode 100644
index 0000000..5476e4b
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/port/config_mock.py
@@ -0,0 +1,67 @@
+#!/usr/bin/env python
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Wrapper objects for WebKit-specific utility routines."""
+
+from webkitpy.common.system.filesystem_mock import MockFileSystem
+
+
+class MockConfig(object):
+    _FLAGS_FROM_CONFIGURATIONS = {
+        "Debug": "--debug",
+        "Release": "--release",
+    }
+
+    def __init__(self, filesystem=None, default_configuration='Release', port_implementation=None):
+        self._filesystem = filesystem or MockFileSystem()
+        self._default_configuration = default_configuration
+        self._port_implmentation = port_implementation
+
+    def flag_for_configuration(self, configuration):
+        return self._FLAGS_FROM_CONFIGURATIONS[configuration]
+
+    def build_directory(self, configuration):
+        return "/mock-build"
+
+    def build_dumprendertree(self, configuration):
+        return True
+
+    def default_configuration(self):
+        return self._default_configuration
+
+    def path_from_webkit_base(self, *comps):
+        # FIXME: This could use self._filesystem.join, but that doesn't handle empty lists.
+        return self.webkit_base_dir() + "/" + "/".join(list(comps))
+
+    def script_path(self, script_name):
+        # This is intentionally relative. Callers should pass the checkout_root/webkit_base_dir to run_command as the cwd.
+        return self._filesystem.join("Tools", "Scripts", script_name)
+
+    def webkit_base_dir(self):
+        return "/mock-checkout"
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/config_standalone.py b/Tools/Scripts/webkitpy/layout_tests/port/config_standalone.py
new file mode 100644
index 0000000..5b04831
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/port/config_standalone.py
@@ -0,0 +1,70 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#    * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""FIXME: This script is used by
+config_unittest.test_default_configuration__standalone() to read the
+default configuration to work around any possible caching / reset bugs. See
+https://bugs.webkit.org/show_bug?id=49360 for the motivation. We can remove
+this test when we remove the global configuration cache in config.py."""
+
+import os
+import unittest
+import sys
+
+
+# Ensure that webkitpy is in PYTHONPATH.
+this_dir = os.path.abspath(sys.path[0])
+up = os.path.dirname
+script_dir = up(up(up(this_dir)))
+if script_dir not in sys.path:
+    sys.path.append(script_dir)
+
+from webkitpy.common.system import executive
+from webkitpy.common.system import executive_mock
+from webkitpy.common.system import filesystem
+from webkitpy.common.system import filesystem_mock
+
+import config
+
+
+def main(argv=None):
+    if not argv:
+        argv = sys.argv
+
+    if len(argv) == 3 and argv[1] == '--mock':
+        e = executive_mock.MockExecutive2(output='foo\nfoo/%s' % argv[2])
+        fs = filesystem_mock.MockFileSystem({'foo/Configuration': argv[2]})
+    else:
+        e = executive.Executive()
+        fs = filesystem.FileSystem()
+
+    c = config.Config(e, fs)
+    print c.default_configuration()
+
+if __name__ == '__main__':
+    main()
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/config_unittest.py b/Tools/Scripts/webkitpy/layout_tests/port/config_unittest.py
new file mode 100644
index 0000000..96ba5ff
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/port/config_unittest.py
@@ -0,0 +1,188 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#    * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import os
+import sys
+import unittest
+
+from webkitpy.common.system.executive import Executive, ScriptError
+from webkitpy.common.system.executive_mock import MockExecutive, MockExecutive2
+from webkitpy.common.system.filesystem import FileSystem
+from webkitpy.common.system.filesystem_mock import MockFileSystem
+from webkitpy.common.system.outputcapture import OutputCapture
+
+import config
+
+
+class ConfigTest(unittest.TestCase):
+    def setUp(self):
+        config.clear_cached_configuration()
+
+    def tearDown(self):
+        config.clear_cached_configuration()
+
+    def make_config(self, output='', files=None, exit_code=0, exception=None, run_command_fn=None, stderr='', port_implementation=None):
+        e = MockExecutive2(output=output, exit_code=exit_code, exception=exception, run_command_fn=run_command_fn, stderr=stderr)
+        fs = MockFileSystem(files)
+        return config.Config(e, fs, port_implementation=port_implementation)
+
+    def assert_configuration(self, contents, expected):
+        # This tests that a configuration file containing
+        # _contents_ ends up being interpreted as _expected_.
+        output = 'foo\nfoo/%s' % contents
+        c = self.make_config(output, {'foo/Configuration': contents})
+        self.assertEqual(c.default_configuration(), expected)
+
+    def test_build_directory(self):
+        # --top-level
+        def mock_webkit_build_directory(arg_list):
+            if arg_list == ['--top-level']:
+                return '/WebKitBuild/'
+            elif arg_list == ['--configuration', '--debug']:
+                return '/WebKitBuild/Debug'
+            elif arg_list == ['--configuration', '--release']:
+                return '/WebKitBuild/Release'
+            elif arg_list == []:
+                return '/WebKitBuild/\n/WebKitBuild//Debug\n'
+            return 'Error'
+
+        def mock_run_command(arg_list):
+            if 'webkit-build-directory' in arg_list[1]:
+                return mock_webkit_build_directory(arg_list[2:])
+            return 'Error'
+
+        c = self.make_config(run_command_fn=mock_run_command)
+        self.assertEqual(c.build_directory(None), '/WebKitBuild/')
+
+        # Test again to check caching
+        self.assertEqual(c.build_directory(None), '/WebKitBuild/')
+
+        # Test other values
+        self.assertTrue(c.build_directory('Release').endswith('/Release'))
+        self.assertTrue(c.build_directory('Debug').endswith('/Debug'))
+        self.assertRaises(KeyError, c.build_directory, 'Unknown')
+
+        # Test that stderr output from webkit-build-directory won't mangle the build dir
+        c = self.make_config(output='/WebKitBuild/', stderr="mock stderr output from webkit-build-directory")
+        self.assertEqual(c.build_directory(None), '/WebKitBuild/')
+
+    def test_build_directory_passes_port_implementation(self):
+        def mock_run_command(arg_list):
+            self.assetEquals('--gtk' in arg_list)
+            return '/tmp'
+
+        c = self.make_config(run_command_fn=mock_run_command, port_implementation='gtk')
+
+    def test_default_configuration__release(self):
+        self.assert_configuration('Release', 'Release')
+
+    def test_default_configuration__debug(self):
+        self.assert_configuration('Debug', 'Debug')
+
+    def test_default_configuration__deployment(self):
+        self.assert_configuration('Deployment', 'Release')
+
+    def test_default_configuration__development(self):
+        self.assert_configuration('Development', 'Debug')
+
+    def test_default_configuration__notfound(self):
+        # This tests what happens if the default configuration file doesn't exist.
+        c = self.make_config(output='foo\nfoo/Release', files={'foo/Configuration': None})
+        self.assertEqual(c.default_configuration(), "Release")
+
+    def test_default_configuration__unknown(self):
+        # Ignore the warning about an unknown configuration value.
+        oc = OutputCapture()
+        oc.capture_output()
+        self.assert_configuration('Unknown', 'Unknown')
+        oc.restore_output()
+
+    def test_default_configuration__standalone(self):
+        # FIXME: This test runs a standalone python script to test
+        # reading the default configuration to work around any possible
+        # caching / reset bugs. See https://bugs.webkit.org/show_bug.cgi?id=49360
+        # for the motivation. We can remove this test when we remove the
+        # global configuration cache in config.py.
+        e = Executive()
+        fs = FileSystem()
+        c = config.Config(e, fs)
+        script = c.path_from_webkit_base('Tools', 'Scripts', 'webkitpy', 'layout_tests', 'port', 'config_standalone.py')
+
+        # Note: don't use 'Release' here, since that's the normal default.
+        expected = 'Debug'
+
+        # FIXME: Why are we running a python subprocess here??
+        args = [sys.executable, script, '--mock', expected]
+        actual = e.run_command(args).rstrip()
+        self.assertEqual(actual, expected)
+
+    def test_default_configuration__no_perl(self):
+        # We need perl to run webkit-build-directory to find out where the
+        # default configuration file is. See what happens if perl isn't
+        # installed. (We should get the default value, 'Release').
+        c = self.make_config(exception=OSError)
+        actual = c.default_configuration()
+        self.assertEqual(actual, 'Release')
+
+    def test_default_configuration__scripterror(self):
+        # We run webkit-build-directory to find out where the default
+        # configuration file is. See what happens if that script fails.
+        # (We should get the default value, 'Release').
+        c = self.make_config(exception=ScriptError())
+        actual = c.default_configuration()
+        self.assertEqual(actual, 'Release')
+
+    def test_path_from_webkit_base(self):
+        c = config.Config(MockExecutive(), MockFileSystem())
+        self.assertTrue(c.path_from_webkit_base('foo'))
+
+    def test_webkit_base_dir(self):
+        # FIXME: We use a real filesystem here. Should this move to a mocked one?
+        executive = Executive()
+        filesystem = FileSystem()
+        c = config.Config(executive, filesystem)
+        base_dir = c.webkit_base_dir()
+        self.assertTrue(base_dir)
+        self.assertNotEqual(base_dir[-1], '/')
+
+        # FIXME: Once we use a MockFileSystem for this test we don't need to save the orig_cwd.
+        orig_cwd = filesystem.getcwd()
+        if sys.platform == 'win32':
+            filesystem.chdir(os.environ['USERPROFILE'])
+        else:
+            filesystem.chdir(os.environ['HOME'])
+        c = config.Config(executive, filesystem)
+        try:
+            base_dir_2 = c.webkit_base_dir()
+            self.assertEqual(base_dir, base_dir_2)
+        finally:
+            filesystem.chdir(orig_cwd)
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/driver.py b/Tools/Scripts/webkitpy/layout_tests/port/driver.py
new file mode 100644
index 0000000..7993d05
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/port/driver.py
@@ -0,0 +1,539 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the Google name nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import base64
+import copy
+import logging
+import re
+import shlex
+import sys
+import time
+import os
+
+from webkitpy.common.system import path
+
+
+_log = logging.getLogger(__name__)
+
+
+class DriverInput(object):
+    def __init__(self, test_name, timeout, image_hash, should_run_pixel_test, args=None):
+        self.test_name = test_name
+        self.timeout = timeout  # in ms
+        self.image_hash = image_hash
+        self.should_run_pixel_test = should_run_pixel_test
+        self.args = args or []
+
+
+class DriverOutput(object):
+    """Groups information about a output from driver for easy passing
+    and post-processing of data."""
+
+    strip_patterns = []
+    strip_patterns.append((re.compile('at \(-?[0-9]+,-?[0-9]+\) *'), ''))
+    strip_patterns.append((re.compile('size -?[0-9]+x-?[0-9]+ *'), ''))
+    strip_patterns.append((re.compile('text run width -?[0-9]+: '), ''))
+    strip_patterns.append((re.compile('text run width -?[0-9]+ [a-zA-Z ]+: '), ''))
+    strip_patterns.append((re.compile('RenderButton {BUTTON} .*'), 'RenderButton {BUTTON}'))
+    strip_patterns.append((re.compile('RenderImage {INPUT} .*'), 'RenderImage {INPUT}'))
+    strip_patterns.append((re.compile('RenderBlock {INPUT} .*'), 'RenderBlock {INPUT}'))
+    strip_patterns.append((re.compile('RenderTextControl {INPUT} .*'), 'RenderTextControl {INPUT}'))
+    strip_patterns.append((re.compile('\([0-9]+px'), 'px'))
+    strip_patterns.append((re.compile(' *" *\n +" *'), ' '))
+    strip_patterns.append((re.compile('" +$'), '"'))
+    strip_patterns.append((re.compile('- '), '-'))
+    strip_patterns.append((re.compile('\n( *)"\s+'), '\n\g<1>"'))
+    strip_patterns.append((re.compile('\s+"\n'), '"\n'))
+    strip_patterns.append((re.compile('scrollWidth [0-9]+'), 'scrollWidth'))
+    strip_patterns.append((re.compile('scrollHeight [0-9]+'), 'scrollHeight'))
+    strip_patterns.append((re.compile('scrollX [0-9]+'), 'scrollX'))
+    strip_patterns.append((re.compile('scrollY [0-9]+'), 'scrollY'))
+    strip_patterns.append((re.compile('scrolled to [0-9]+,[0-9]+'), 'scrolled'))
+
+    def __init__(self, text, image, image_hash, audio, crash=False,
+            test_time=0, measurements=None, timeout=False, error='', crashed_process_name='??',
+            crashed_pid=None, crash_log=None):
+        # FIXME: Args could be renamed to better clarify what they do.
+        self.text = text
+        self.image = image  # May be empty-string if the test crashes.
+        self.image_hash = image_hash
+        self.image_diff = None  # image_diff gets filled in after construction.
+        self.audio = audio  # Binary format is port-dependent.
+        self.crash = crash
+        self.crashed_process_name = crashed_process_name
+        self.crashed_pid = crashed_pid
+        self.crash_log = crash_log
+        self.test_time = test_time
+        self.measurements = measurements
+        self.timeout = timeout
+        self.error = error  # stderr output
+
+    def has_stderr(self):
+        return bool(self.error)
+
+    def strip_metrics(self):
+        if not self.text:
+            return
+        for pattern in self.strip_patterns:
+            self.text = re.sub(pattern[0], pattern[1], self.text)
+
+
+class Driver(object):
+    """object for running test(s) using DumpRenderTree/WebKitTestRunner."""
+
+    def __init__(self, port, worker_number, pixel_tests, no_timeout=False):
+        """Initialize a Driver to subsequently run tests.
+
+        Typically this routine will spawn DumpRenderTree in a config
+        ready for subsequent input.
+
+        port - reference back to the port object.
+        worker_number - identifier for a particular worker/driver instance
+        """
+        self._port = port
+        self._worker_number = worker_number
+        self._no_timeout = no_timeout
+
+        self._driver_tempdir = None
+        # WebKitTestRunner can report back subprocess crashes by printing
+        # "#CRASHED - PROCESSNAME".  Since those can happen at any time
+        # and ServerProcess won't be aware of them (since the actual tool
+        # didn't crash, just a subprocess) we record the crashed subprocess name here.
+        self._crashed_process_name = None
+        self._crashed_pid = None
+
+        # WebKitTestRunner can report back subprocesses that became unresponsive
+        # This could mean they crashed.
+        self._subprocess_was_unresponsive = False
+
+        # stderr reading is scoped on a per-test (not per-block) basis, so we store the accumulated
+        # stderr output, as well as if we've seen #EOF on this driver instance.
+        # FIXME: We should probably remove _read_first_block and _read_optional_image_block and
+        # instead scope these locally in run_test.
+        self.error_from_test = str()
+        self.err_seen_eof = False
+        self._server_process = None
+
+        self._measurements = {}
+
+    def __del__(self):
+        self.stop()
+
+    def run_test(self, driver_input, stop_when_done):
+        """Run a single test and return the results.
+
+        Note that it is okay if a test times out or crashes and leaves
+        the driver in an indeterminate state. The upper layers of the program
+        are responsible for cleaning up and ensuring things are okay.
+
+        Returns a DriverOuput object.
+        """
+        start_time = time.time()
+        self.start(driver_input.should_run_pixel_test, driver_input.args)
+        test_begin_time = time.time()
+        self.error_from_test = str()
+        self.err_seen_eof = False
+
+        command = self._command_from_driver_input(driver_input)
+        deadline = test_begin_time + int(driver_input.timeout) / 1000.0
+
+        self._server_process.write(command)
+        text, audio = self._read_first_block(deadline)  # First block is either text or audio
+        image, actual_image_hash = self._read_optional_image_block(deadline)  # The second (optional) block is image data.
+
+        crashed = self.has_crashed()
+        timed_out = self._server_process.timed_out
+
+        if stop_when_done or crashed or timed_out:
+            # We call stop() even if we crashed or timed out in order to get any remaining stdout/stderr output.
+            # In the timeout case, we kill the hung process as well.
+            out, err = self._server_process.stop(self._port.driver_stop_timeout() if stop_when_done else 0.0)
+            if out:
+                text += out
+            if err:
+                self.error_from_test += err
+            self._server_process = None
+
+        crash_log = None
+        if crashed:
+            self.error_from_test, crash_log = self._get_crash_log(text, self.error_from_test, newer_than=start_time)
+
+            # If we don't find a crash log use a placeholder error message instead.
+            if not crash_log:
+                pid_str = str(self._crashed_pid) if self._crashed_pid else "unknown pid"
+                crash_log = 'No crash log found for %s:%s.\n' % (self._crashed_process_name, pid_str)
+                # If we were unresponsive append a message informing there may not have been a crash.
+                if self._subprocess_was_unresponsive:
+                    crash_log += 'Process failed to become responsive before timing out.\n'
+
+                # Print stdout and stderr to the placeholder crash log; we want as much context as possible.
+                if self.error_from_test:
+                    crash_log += '\nstdout:\n%s\nstderr:\n%s\n' % (text, self.error_from_test)
+
+        return DriverOutput(text, image, actual_image_hash, audio,
+            crash=crashed, test_time=time.time() - test_begin_time, measurements=self._measurements,
+            timeout=timed_out, error=self.error_from_test,
+            crashed_process_name=self._crashed_process_name,
+            crashed_pid=self._crashed_pid, crash_log=crash_log)
+
+    def _get_crash_log(self, stdout, stderr, newer_than):
+        return self._port._get_crash_log(self._crashed_process_name, self._crashed_pid, stdout, stderr, newer_than)
+
+    # FIXME: Seems this could just be inlined into callers.
+    @classmethod
+    def _command_wrapper(cls, wrapper_option):
+        # Hook for injecting valgrind or other runtime instrumentation,
+        # used by e.g. tools/valgrind/valgrind_tests.py.
+        return shlex.split(wrapper_option) if wrapper_option else []
+
+    HTTP_DIR = "http/tests/"
+    HTTP_LOCAL_DIR = "http/tests/local/"
+
+    def is_http_test(self, test_name):
+        return test_name.startswith(self.HTTP_DIR) and not test_name.startswith(self.HTTP_LOCAL_DIR)
+
+    def test_to_uri(self, test_name):
+        """Convert a test name to a URI."""
+        if not self.is_http_test(test_name):
+            return path.abspath_to_uri(self._port.host.platform, self._port.abspath_for_test(test_name))
+
+        relative_path = test_name[len(self.HTTP_DIR):]
+
+        # TODO(dpranke): remove the SSL reference?
+        if relative_path.startswith("ssl/"):
+            return "https://127.0.0.1:8443/" + relative_path
+        return "http://127.0.0.1:8000/" + relative_path
+
+    def uri_to_test(self, uri):
+        """Return the base layout test name for a given URI.
+
+        This returns the test name for a given URI, e.g., if you passed in
+        "file:///src/LayoutTests/fast/html/keygen.html" it would return
+        "fast/html/keygen.html".
+
+        """
+        if uri.startswith("file:///"):
+            prefix = path.abspath_to_uri(self._port.host.platform, self._port.layout_tests_dir())
+            if not prefix.endswith('/'):
+                prefix += '/'
+            return uri[len(prefix):]
+        if uri.startswith("http://"):
+            return uri.replace('http://127.0.0.1:8000/', self.HTTP_DIR)
+        if uri.startswith("https://"):
+            return uri.replace('https://127.0.0.1:8443/', self.HTTP_DIR)
+        raise NotImplementedError('unknown url type: %s' % uri)
+
+    def has_crashed(self):
+        if self._server_process is None:
+            return False
+        if self._crashed_process_name:
+            return True
+        if self._server_process.has_crashed():
+            self._crashed_process_name = self._server_process.name()
+            self._crashed_pid = self._server_process.pid()
+            return True
+        return False
+
+    def start(self, pixel_tests, per_test_args):
+        # FIXME: Callers shouldn't normally call this, since this routine
+        # may not be specifying the correct combination of pixel test and
+        # per_test args.
+        #
+        # The only reason we have this routine at all is so the perftestrunner
+        # can pause before running a test; it might be better to push that
+        # into run_test() directly.
+        if not self._server_process:
+            self._start(pixel_tests, per_test_args)
+
+    def _start(self, pixel_tests, per_test_args):
+        self.stop()
+        self._driver_tempdir = self._port._filesystem.mkdtemp(prefix='%s-' % self._port.driver_name())
+        server_name = self._port.driver_name()
+        environment = self._port.setup_environ_for_server(server_name)
+        environment['DYLD_LIBRARY_PATH'] = self._port._build_path()
+        environment['DYLD_FRAMEWORK_PATH'] = self._port._build_path()
+        # FIXME: We're assuming that WebKitTestRunner checks this DumpRenderTree-named environment variable.
+        environment['DUMPRENDERTREE_TEMP'] = str(self._driver_tempdir)
+        environment['LOCAL_RESOURCE_ROOT'] = self._port.layout_tests_dir()
+        if 'WEBKITOUTPUTDIR' in os.environ:
+            environment['WEBKITOUTPUTDIR'] = os.environ['WEBKITOUTPUTDIR']
+        self._crashed_process_name = None
+        self._crashed_pid = None
+        self._server_process = self._port._server_process_constructor(self._port, server_name, self.cmd_line(pixel_tests, per_test_args), environment)
+        self._server_process.start()
+
+    def stop(self):
+        if self._server_process:
+            self._server_process.stop(self._port.driver_stop_timeout())
+            self._server_process = None
+
+        if self._driver_tempdir:
+            self._port._filesystem.rmtree(str(self._driver_tempdir))
+            self._driver_tempdir = None
+
+    def cmd_line(self, pixel_tests, per_test_args):
+        cmd = self._command_wrapper(self._port.get_option('wrapper'))
+        cmd.append(self._port._path_to_driver())
+        if self._port.get_option('gc_between_tests'):
+            cmd.append('--gc-between-tests')
+        if self._port.get_option('complex_text'):
+            cmd.append('--complex-text')
+        if self._port.get_option('threaded'):
+            cmd.append('--threaded')
+        if self._no_timeout:
+            cmd.append('--no-timeout')
+        # FIXME: We need to pass --timeout=SECONDS to WebKitTestRunner for WebKit2.
+
+        cmd.extend(self._port.get_option('additional_drt_flag', []))
+        cmd.extend(self._port.additional_drt_flag())
+
+        cmd.extend(per_test_args)
+
+        cmd.append('-')
+        return cmd
+
+    def _check_for_driver_crash(self, error_line):
+        if error_line == "#CRASHED\n":
+            # This is used on Windows to report that the process has crashed
+            # See http://trac.webkit.org/changeset/65537.
+            self._crashed_process_name = self._server_process.name()
+            self._crashed_pid = self._server_process.pid()
+        elif (error_line.startswith("#CRASHED - ")
+            or error_line.startswith("#PROCESS UNRESPONSIVE - ")):
+            # WebKitTestRunner uses this to report that the WebProcess subprocess crashed.
+            match = re.match('#(?:CRASHED|PROCESS UNRESPONSIVE) - (\S+)', error_line)
+            self._crashed_process_name = match.group(1) if match else 'WebProcess'
+            match = re.search('pid (\d+)', error_line)
+            pid = int(match.group(1)) if match else None
+            self._crashed_pid = pid
+            # FIXME: delete this after we're sure this code is working :)
+            _log.debug('%s crash, pid = %s, error_line = %s' % (self._crashed_process_name, str(pid), error_line))
+            if error_line.startswith("#PROCESS UNRESPONSIVE - "):
+                self._subprocess_was_unresponsive = True
+                # We want to show this since it's not a regular crash and probably we don't have a crash log.
+                self.error_from_test += error_line
+            return True
+        return self.has_crashed()
+
+    def _command_from_driver_input(self, driver_input):
+        # FIXME: performance tests pass in full URLs instead of test names.
+        if driver_input.test_name.startswith('http://') or driver_input.test_name.startswith('https://')  or driver_input.test_name == ('about:blank'):
+            command = driver_input.test_name
+        elif self.is_http_test(driver_input.test_name):
+            command = self.test_to_uri(driver_input.test_name)
+        else:
+            command = self._port.abspath_for_test(driver_input.test_name)
+            if sys.platform == 'cygwin':
+                command = path.cygpath(command)
+
+        assert not driver_input.image_hash or driver_input.should_run_pixel_test
+
+        # ' is the separator between arguments.
+        if driver_input.should_run_pixel_test:
+            command += "'--pixel-test"
+        if driver_input.image_hash:
+            command += "'" + driver_input.image_hash
+        return command + "\n"
+
+    def _read_first_block(self, deadline):
+        # returns (text_content, audio_content)
+        block = self._read_block(deadline)
+        if block.malloc:
+            self._measurements['Malloc'] = float(block.malloc)
+        if block.js_heap:
+            self._measurements['JSHeap'] = float(block.js_heap)
+        if block.content_type == 'audio/wav':
+            return (None, block.decoded_content)
+        return (block.decoded_content, None)
+
+    def _read_optional_image_block(self, deadline):
+        # returns (image, actual_image_hash)
+        block = self._read_block(deadline, wait_for_stderr_eof=True)
+        if block.content and block.content_type == 'image/png':
+            return (block.decoded_content, block.content_hash)
+        return (None, block.content_hash)
+
+    def _read_header(self, block, line, header_text, header_attr, header_filter=None):
+        if line.startswith(header_text) and getattr(block, header_attr) is None:
+            value = line.split()[1]
+            if header_filter:
+                value = header_filter(value)
+            setattr(block, header_attr, value)
+            return True
+        return False
+
+    def _process_stdout_line(self, block, line):
+        if (self._read_header(block, line, 'Content-Type: ', 'content_type')
+            or self._read_header(block, line, 'Content-Transfer-Encoding: ', 'encoding')
+            or self._read_header(block, line, 'Content-Length: ', '_content_length', int)
+            or self._read_header(block, line, 'ActualHash: ', 'content_hash')
+            or self._read_header(block, line, 'DumpMalloc: ', 'malloc')
+            or self._read_header(block, line, 'DumpJSHeap: ', 'js_heap')):
+            return
+        # Note, we're not reading ExpectedHash: here, but we could.
+        # If the line wasn't a header, we just append it to the content.
+        block.content += line
+
+    def _strip_eof(self, line):
+        if line and line.endswith("#EOF\n"):
+            return line[:-5], True
+        return line, False
+
+    def _read_block(self, deadline, wait_for_stderr_eof=False):
+        block = ContentBlock()
+        out_seen_eof = False
+
+        while not self.has_crashed():
+            if out_seen_eof and (self.err_seen_eof or not wait_for_stderr_eof):
+                break
+
+            if self.err_seen_eof:
+                out_line = self._server_process.read_stdout_line(deadline)
+                err_line = None
+            elif out_seen_eof:
+                out_line = None
+                err_line = self._server_process.read_stderr_line(deadline)
+            else:
+                out_line, err_line = self._server_process.read_either_stdout_or_stderr_line(deadline)
+
+            if self._server_process.timed_out or self.has_crashed():
+                break
+
+            if out_line:
+                assert not out_seen_eof
+                out_line, out_seen_eof = self._strip_eof(out_line)
+            if err_line:
+                assert not self.err_seen_eof
+                err_line, self.err_seen_eof = self._strip_eof(err_line)
+
+            if out_line:
+                if out_line[-1] != "\n":
+                    _log.error("Last character read from DRT stdout line was not a newline!  This indicates either a NRWT or DRT bug.")
+                content_length_before_header_check = block._content_length
+                self._process_stdout_line(block, out_line)
+                # FIXME: Unlike HTTP, DRT dumps the content right after printing a Content-Length header.
+                # Don't wait until we're done with headers, just read the binary blob right now.
+                if content_length_before_header_check != block._content_length:
+                    block.content = self._server_process.read_stdout(deadline, block._content_length)
+
+            if err_line:
+                if self._check_for_driver_crash(err_line):
+                    break
+                self.error_from_test += err_line
+
+        block.decode_content()
+        return block
+
+
+class ContentBlock(object):
+    def __init__(self):
+        self.content_type = None
+        self.encoding = None
+        self.content_hash = None
+        self._content_length = None
+        # Content is treated as binary data even though the text output is usually UTF-8.
+        self.content = str()  # FIXME: Should be bytearray() once we require Python 2.6.
+        self.decoded_content = None
+        self.malloc = None
+        self.js_heap = None
+
+    def decode_content(self):
+        if self.encoding == 'base64' and self.content is not None:
+            self.decoded_content = base64.b64decode(self.content)
+        else:
+            self.decoded_content = self.content
+
+class DriverProxy(object):
+    """A wrapper for managing two Driver instances, one with pixel tests and
+    one without. This allows us to handle plain text tests and ref tests with a
+    single driver."""
+
+    def __init__(self, port, worker_number, driver_instance_constructor, pixel_tests, no_timeout):
+        self._port = port
+        self._worker_number = worker_number
+        self._driver_instance_constructor = driver_instance_constructor
+        self._no_timeout = no_timeout
+
+        # FIXME: We shouldn't need to create a driver until we actually run a test.
+        self._driver = self._make_driver(pixel_tests)
+        self._running_drivers = {}
+        self._running_drivers[self._cmd_line_as_key(pixel_tests, [])] = self._driver
+
+    def _make_driver(self, pixel_tests):
+        return self._driver_instance_constructor(self._port, self._worker_number, pixel_tests, self._no_timeout)
+
+    # FIXME: this should be a @classmethod (or implemented on Port instead).
+    def is_http_test(self, test_name):
+        return self._driver.is_http_test(test_name)
+
+    # FIXME: this should be a @classmethod (or implemented on Port instead).
+    def test_to_uri(self, test_name):
+        return self._driver.test_to_uri(test_name)
+
+    # FIXME: this should be a @classmethod (or implemented on Port instead).
+    def uri_to_test(self, uri):
+        return self._driver.uri_to_test(uri)
+
+    def run_test(self, driver_input, stop_when_done):
+        base = self._port.lookup_virtual_test_base(driver_input.test_name)
+        if base:
+            virtual_driver_input = copy.copy(driver_input)
+            virtual_driver_input.test_name = base
+            virtual_driver_input.args = self._port.lookup_virtual_test_args(driver_input.test_name)
+            return self.run_test(virtual_driver_input, stop_when_done)
+
+        pixel_tests_needed = driver_input.should_run_pixel_test
+        cmd_line_key = self._cmd_line_as_key(pixel_tests_needed, driver_input.args)
+        if not cmd_line_key in self._running_drivers:
+            self._running_drivers[cmd_line_key] = self._make_driver(pixel_tests_needed)
+
+        return self._running_drivers[cmd_line_key].run_test(driver_input, stop_when_done)
+
+    def start(self):
+        # FIXME: Callers shouldn't normally call this, since this routine
+        # may not be specifying the correct combination of pixel test and
+        # per_test args.
+        #
+        # The only reason we have this routine at all is so the perftestrunner
+        # can pause before running a test; it might be better to push that
+        # into run_test() directly.
+        self._driver.start(self._port.get_option('pixel_tests'), [])
+
+    def has_crashed(self):
+        return any(driver.has_crashed() for driver in self._running_drivers.values())
+
+    def stop(self):
+        for driver in self._running_drivers.values():
+            driver.stop()
+
+    # FIXME: this should be a @classmethod (or implemented on Port instead).
+    def cmd_line(self, pixel_tests=None, per_test_args=None):
+        return self._driver.cmd_line(pixel_tests, per_test_args or [])
+
+    def _cmd_line_as_key(self, pixel_tests, per_test_args):
+        return ' '.join(self.cmd_line(pixel_tests, per_test_args))
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/driver_unittest.py b/Tools/Scripts/webkitpy/layout_tests/port/driver_unittest.py
new file mode 100644
index 0000000..5b11ade
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/port/driver_unittest.py
@@ -0,0 +1,268 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#    * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from webkitpy.common.system.systemhost_mock import MockSystemHost
+
+from webkitpy.layout_tests.port import Port, Driver, DriverOutput
+from webkitpy.layout_tests.port.server_process_mock import MockServerProcess
+
+# FIXME: remove the dependency on TestWebKitPort
+from webkitpy.layout_tests.port.port_testcase import TestWebKitPort
+
+
+class DriverOutputTest(unittest.TestCase):
+    def test_strip_metrics(self):
+        patterns = [
+            ('RenderView at (0,0) size 800x600', 'RenderView '),
+            ('text run at (0,0) width 100: "some text"', '"some text"'),
+            ('RenderBlock {HTML} at (0,0) size 800x600', 'RenderBlock {HTML} '),
+            ('RenderBlock {INPUT} at (29,3) size 12x12 [color=#000000]', 'RenderBlock {INPUT}'),
+
+            ('RenderBlock (floating) {DT} at (5,5) size 79x310 [border: (5px solid #000000)]',
+            'RenderBlock (floating) {DT} [border: px solid #000000)]'),
+
+            ('\n    "truncate text    "\n', '\n    "truncate text"\n'),
+
+            ('RenderText {#text} at (0,3) size 41x12\n    text run at (0,3) width 41: "whimper "\n',
+            'RenderText {#text} \n    "whimper"\n'),
+
+            ("""text run at (0,0) width 109: ".one {color: green;}"
+          text run at (109,0) width 0: " "
+          text run at (0,17) width 81: ".1 {color: red;}"
+          text run at (81,17) width 0: " "
+          text run at (0,34) width 102: ".a1 {color: green;}"
+          text run at (102,34) width 0: " "
+          text run at (0,51) width 120: "P.two {color: purple;}"
+          text run at (120,51) width 0: " "\n""",
+            '".one {color: green;}  .1 {color: red;}  .a1 {color: green;}  P.two {color: purple;}"\n'),
+
+            ('text-- other text', 'text--other text'),
+
+            (' some output   "truncate trailing spaces at end of line after text"   \n',
+            ' some output   "truncate trailing spaces at end of line after text"\n'),
+
+            (r'scrollWidth 120', r'scrollWidth'),
+            (r'scrollHeight 120', r'scrollHeight'),
+        ]
+
+        for pattern in patterns:
+            driver_output = DriverOutput(pattern[0], None, None, None)
+            driver_output.strip_metrics()
+            self.assertEqual(driver_output.text, pattern[1])
+
+
+class DriverTest(unittest.TestCase):
+    def make_port(self):
+        return Port(MockSystemHost())
+
+    def _assert_wrapper(self, wrapper_string, expected_wrapper):
+        wrapper = Driver(self.make_port(), None, pixel_tests=False)._command_wrapper(wrapper_string)
+        self.assertEqual(wrapper, expected_wrapper)
+
+    def test_command_wrapper(self):
+        self._assert_wrapper(None, [])
+        self._assert_wrapper("valgrind", ["valgrind"])
+
+        # Validate that shlex works as expected.
+        command_with_spaces = "valgrind --smc-check=\"check with spaces!\" --foo"
+        expected_parse = ["valgrind", "--smc-check=check with spaces!", "--foo"]
+        self._assert_wrapper(command_with_spaces, expected_parse)
+
+    def test_test_to_uri(self):
+        port = self.make_port()
+        driver = Driver(port, None, pixel_tests=False)
+        self.assertEqual(driver.test_to_uri('foo/bar.html'), 'file://%s/foo/bar.html' % port.layout_tests_dir())
+        self.assertEqual(driver.test_to_uri('http/tests/foo.html'), 'http://127.0.0.1:8000/foo.html')
+        self.assertEqual(driver.test_to_uri('http/tests/ssl/bar.html'), 'https://127.0.0.1:8443/ssl/bar.html')
+
+    def test_uri_to_test(self):
+        port = self.make_port()
+        driver = Driver(port, None, pixel_tests=False)
+        self.assertEqual(driver.uri_to_test('file://%s/foo/bar.html' % port.layout_tests_dir()), 'foo/bar.html')
+        self.assertEqual(driver.uri_to_test('http://127.0.0.1:8000/foo.html'), 'http/tests/foo.html')
+        self.assertEqual(driver.uri_to_test('https://127.0.0.1:8443/ssl/bar.html'), 'http/tests/ssl/bar.html')
+
+    def test_read_block(self):
+        port = TestWebKitPort()
+        driver = Driver(port, 0, pixel_tests=False)
+        driver._server_process = MockServerProcess(lines=[
+            'ActualHash: foobar',
+            'Content-Type: my_type',
+            'Content-Transfer-Encoding: none',
+            "#EOF",
+        ])
+        content_block = driver._read_block(0)
+        self.assertEquals(content_block.content_type, 'my_type')
+        self.assertEquals(content_block.encoding, 'none')
+        self.assertEquals(content_block.content_hash, 'foobar')
+        driver._server_process = None
+
+    def test_read_binary_block(self):
+        port = TestWebKitPort()
+        driver = Driver(port, 0, pixel_tests=True)
+        driver._server_process = MockServerProcess(lines=[
+            'ActualHash: actual',
+            'ExpectedHash: expected',
+            'Content-Type: image/png',
+            'Content-Length: 9',
+            "12345678",
+            "#EOF",
+        ])
+        content_block = driver._read_block(0)
+        self.assertEquals(content_block.content_type, 'image/png')
+        self.assertEquals(content_block.content_hash, 'actual')
+        self.assertEquals(content_block.content, '12345678\n')
+        self.assertEquals(content_block.decoded_content, '12345678\n')
+        driver._server_process = None
+
+    def test_read_base64_block(self):
+        port = TestWebKitPort()
+        driver = Driver(port, 0, pixel_tests=True)
+        driver._server_process = MockServerProcess(lines=[
+            'ActualHash: actual',
+            'ExpectedHash: expected',
+            'Content-Type: image/png',
+            'Content-Transfer-Encoding: base64',
+            'Content-Length: 12',
+            'MTIzNDU2NzgK#EOF',
+        ])
+        content_block = driver._read_block(0)
+        self.assertEquals(content_block.content_type, 'image/png')
+        self.assertEquals(content_block.content_hash, 'actual')
+        self.assertEquals(content_block.encoding, 'base64')
+        self.assertEquals(content_block.content, 'MTIzNDU2NzgK')
+        self.assertEquals(content_block.decoded_content, '12345678\n')
+
+    def test_no_timeout(self):
+        port = TestWebKitPort()
+        driver = Driver(port, 0, pixel_tests=True, no_timeout=True)
+        self.assertEquals(driver.cmd_line(True, []), ['/mock-build/DumpRenderTree', '--no-timeout', '-'])
+
+    def test_check_for_driver_crash(self):
+        port = TestWebKitPort()
+        driver = Driver(port, 0, pixel_tests=True)
+
+        class FakeServerProcess(object):
+            def __init__(self, crashed):
+                self.crashed = crashed
+
+            def pid(self):
+                return 1234
+
+            def name(self):
+                return 'FakeServerProcess'
+
+            def has_crashed(self):
+                return self.crashed
+
+            def stop(self, timeout):
+                pass
+
+        def assert_crash(driver, error_line, crashed, name, pid, unresponsive=False):
+            self.assertEquals(driver._check_for_driver_crash(error_line), crashed)
+            self.assertEquals(driver._crashed_process_name, name)
+            self.assertEquals(driver._crashed_pid, pid)
+            self.assertEquals(driver._subprocess_was_unresponsive, unresponsive)
+            driver.stop()
+
+        driver._server_process = FakeServerProcess(False)
+        assert_crash(driver, '', False, None, None)
+
+        driver._crashed_process_name = None
+        driver._crashed_pid = None
+        driver._server_process = FakeServerProcess(False)
+        driver._subprocess_was_unresponsive = False
+        assert_crash(driver, '#CRASHED\n', True, 'FakeServerProcess', 1234)
+
+        driver._crashed_process_name = None
+        driver._crashed_pid = None
+        driver._server_process = FakeServerProcess(False)
+        driver._subprocess_was_unresponsive = False
+        assert_crash(driver, '#CRASHED - WebProcess\n', True, 'WebProcess', None)
+
+        driver._crashed_process_name = None
+        driver._crashed_pid = None
+        driver._server_process = FakeServerProcess(False)
+        driver._subprocess_was_unresponsive = False
+        assert_crash(driver, '#CRASHED - WebProcess (pid 8675)\n', True, 'WebProcess', 8675)
+
+        driver._crashed_process_name = None
+        driver._crashed_pid = None
+        driver._server_process = FakeServerProcess(False)
+        driver._subprocess_was_unresponsive = False
+        assert_crash(driver, '#PROCESS UNRESPONSIVE - WebProcess (pid 8675)\n', True, 'WebProcess', 8675, True)
+
+        driver._crashed_process_name = None
+        driver._crashed_pid = None
+        driver._server_process = FakeServerProcess(False)
+        driver._subprocess_was_unresponsive = False
+        assert_crash(driver, '#CRASHED - renderer (pid 8675)\n', True, 'renderer', 8675)
+
+        driver._crashed_process_name = None
+        driver._crashed_pid = None
+        driver._server_process = FakeServerProcess(True)
+        driver._subprocess_was_unresponsive = False
+        assert_crash(driver, '', True, 'FakeServerProcess', 1234)
+
+    def test_creating_a_port_does_not_write_to_the_filesystem(self):
+        port = TestWebKitPort()
+        driver = Driver(port, 0, pixel_tests=True)
+        self.assertEquals(port._filesystem.written_files, {})
+        self.assertEquals(port._filesystem.last_tmpdir, None)
+
+    def test_stop_cleans_up_properly(self):
+        port = TestWebKitPort()
+        port._server_process_constructor = MockServerProcess
+        driver = Driver(port, 0, pixel_tests=True)
+        driver.start(True, [])
+        last_tmpdir = port._filesystem.last_tmpdir
+        self.assertNotEquals(last_tmpdir, None)
+        driver.stop()
+        self.assertFalse(port._filesystem.isdir(last_tmpdir))
+
+    def test_two_starts_cleans_up_properly(self):
+        port = TestWebKitPort()
+        port._server_process_constructor = MockServerProcess
+        driver = Driver(port, 0, pixel_tests=True)
+        driver.start(True, [])
+        last_tmpdir = port._filesystem.last_tmpdir
+        driver._start(True, [])
+        self.assertFalse(port._filesystem.isdir(last_tmpdir))
+
+    def test_start_actually_starts(self):
+        port = TestWebKitPort()
+        port._server_process_constructor = MockServerProcess
+        driver = Driver(port, 0, pixel_tests=True)
+        driver.start(True, [])
+        self.assertTrue(driver._server_process.started)
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/efl.py b/Tools/Scripts/webkitpy/layout_tests/port/efl.py
new file mode 100644
index 0000000..0c9acd8
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/port/efl.py
@@ -0,0 +1,123 @@
+# Copyright (C) 2011 ProFUSION Embedded Systems. All rights reserved.
+# Copyright (C) 2011 Samsung Electronics. All rights reserved.
+# Copyright (C) 2012 Intel Corporation
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""WebKit Efl implementation of the Port interface."""
+
+import os
+
+from webkitpy.layout_tests.models.test_configuration import TestConfiguration
+from webkitpy.layout_tests.port.base import Port
+from webkitpy.layout_tests.port.pulseaudio_sanitizer import PulseAudioSanitizer
+from webkitpy.layout_tests.port.xvfbdriver import XvfbDriver
+
+class EflPort(Port, PulseAudioSanitizer):
+    port_name = 'efl'
+
+    def __init__(self, *args, **kwargs):
+        super(EflPort, self).__init__(*args, **kwargs)
+
+        self._jhbuild_wrapper_path = self.path_from_webkit_base('Tools', 'efl', 'run-with-jhbuild')
+
+        self.set_option_default('wrapper', self._jhbuild_wrapper_path)
+        self.webprocess_cmd_prefix = self.get_option('webprocess_cmd_prefix')
+
+    def _port_flag_for_scripts(self):
+        return "--efl"
+
+    def setup_test_run(self):
+        self._unload_pulseaudio_module()
+
+    def setup_environ_for_server(self, server_name=None):
+        env = super(EflPort, self).setup_environ_for_server(server_name)
+        # If DISPLAY environment variable is unset in the system
+        # e.g. on build bot, remove DISPLAY variable from the dictionary
+        if not 'DISPLAY' in os.environ:
+            del env['DISPLAY']
+        env['TEST_RUNNER_INJECTED_BUNDLE_FILENAME'] = self._build_path('lib', 'libTestRunnerInjectedBundle.so')
+        env['TEST_RUNNER_PLUGIN_PATH'] = self._build_path('lib')
+        if self.webprocess_cmd_prefix:
+            env['WEB_PROCESS_CMD_PREFIX'] = self.webprocess_cmd_prefix
+
+        return env
+
+    def default_timeout_ms(self):
+        # Tests run considerably slower under gdb
+        # or valgrind.
+        if self.get_option('webprocess_cmd_prefix'):
+            return 350 * 1000
+        return super(EflPort, self).default_timeout_ms()
+
+    def clean_up_test_run(self):
+        super(EflPort, self).clean_up_test_run()
+        self._restore_pulseaudio_module()
+
+    def _generate_all_test_configurations(self):
+        return [TestConfiguration(version=self._version, architecture='x86', build_type=build_type) for build_type in self.ALL_BUILD_TYPES]
+
+    def _driver_class(self):
+        return XvfbDriver
+
+    def _path_to_driver(self):
+        return self._build_path('bin', self.driver_name())
+
+    def _path_to_image_diff(self):
+        return self._build_path('bin', 'ImageDiff')
+
+    def _image_diff_command(self, *args, **kwargs):
+        return [self._jhbuild_wrapper_path] + super(EflPort, self)._image_diff_command(*args, **kwargs)
+
+    def _path_to_webcore_library(self):
+        static_path = self._build_path('lib', 'libwebcore_efl.a')
+        dyn_path = self._build_path('lib', 'libwebcore_efl.so')
+        return static_path if self._filesystem.exists(static_path) else dyn_path
+
+    def _search_paths(self):
+        search_paths = []
+        if self.get_option('webkit_test_runner'):
+            search_paths.append(self.port_name + '-wk2')
+            search_paths.append('wk2')
+        else:
+            search_paths.append(self.port_name + '-wk1')
+        search_paths.append(self.port_name)
+        return search_paths
+
+    def default_baseline_search_path(self):
+        return map(self._webkit_baseline_path, self._search_paths())
+
+    def expectations_files(self):
+        # FIXME: We should be able to use the default algorithm here.
+        return list(reversed([self._filesystem.join(self._webkit_baseline_path(p), 'TestExpectations') for p in self._search_paths()]))
+
+    def show_results_html_file(self, results_filename):
+        # FIXME: We should find a way to share this implmentation with Gtk,
+        # or teach run-launcher how to call run-safari and move this down to WebKitPort.
+        run_launcher_args = ["file://%s" % results_filename]
+        if self.get_option('webkit_test_runner'):
+            run_launcher_args.append('-2')
+        # FIXME: old-run-webkit-tests also added ["-graphicssystem", "raster", "-style", "windows"]
+        # FIXME: old-run-webkit-tests converted results_filename path for cygwin.
+        self._run_script("run-launcher", run_launcher_args)
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/efl_unittest.py b/Tools/Scripts/webkitpy/layout_tests/port/efl_unittest.py
new file mode 100644
index 0000000..d9851b3
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/port/efl_unittest.py
@@ -0,0 +1,43 @@
+# Copyright (C) 2011 ProFUSION Embedded Systems. All rights reserved.
+# Copyright (C) 2011 Samsung Electronics. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from webkitpy.common.system.outputcapture import OutputCapture
+from webkitpy.layout_tests.port.efl import EflPort
+from webkitpy.layout_tests.port import port_testcase
+from webkitpy.common.system.executive_mock import MockExecutive
+
+
+class EflPortTest(port_testcase.PortTestCase):
+    port_name = 'efl'
+    port_maker = EflPort
+
+    def test_show_results_html_file(self):
+        port = self.make_port()
+        port._executive = MockExecutive(should_log=True)
+        expected_stderr = "MOCK run_command: ['Tools/Scripts/run-launcher', '--release', '--efl', 'file://test.html'], cwd=/mock-checkout\n"
+        OutputCapture().assert_outputs(self, port.show_results_html_file, ["test.html"], expected_stderr=expected_stderr)
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/factory.py b/Tools/Scripts/webkitpy/layout_tests/port/factory.py
new file mode 100644
index 0000000..ad7c644
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/port/factory.py
@@ -0,0 +1,145 @@
+#!/usr/bin/env python
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Factory method to retrieve the appropriate port implementation."""
+
+import fnmatch
+import optparse
+import re
+
+from webkitpy.layout_tests.port import builders
+
+
+def platform_options(use_globs=False):
+    return [
+        optparse.make_option('--platform', action='store',
+            help=('Glob-style list of platform/ports to use (e.g., "mac*")' if use_globs else 'Platform to use (e.g., "mac-lion")')),
+        optparse.make_option('--chromium', action='store_const', dest='platform',
+            const=('chromium*' if use_globs else 'chromium'),
+            help=('Alias for --platform=chromium*' if use_globs else 'Alias for --platform=chromium')),
+        optparse.make_option('--chromium-android', action='store_const', dest='platform',
+            const=('chromium-android*' if use_globs else 'chromium-android'),
+            help=('Alias for --platform=chromium-android*' if use_globs else 'Alias for --platform=chromium')),
+        optparse.make_option('--efl', action='store_const', dest='platform',
+            const=('efl*' if use_globs else 'efl'),
+            help=('Alias for --platform=efl*' if use_globs else 'Alias for --platform=efl')),
+        optparse.make_option('--gtk', action='store_const', dest='platform',
+            const=('gtk*' if use_globs else 'gtk'),
+            help=('Alias for --platform=gtk*' if use_globs else 'Alias for --platform=gtk')),
+        optparse.make_option('--qt', action='store_const', dest="platform",
+            const=('qt*' if use_globs else 'qt'),
+            help=('Alias for --platform=qt' if use_globs else 'Alias for --platform=qt')),
+        ]
+
+
+def configuration_options():
+    return [
+        optparse.make_option("-t", "--target", dest="configuration", help="(DEPRECATED)"),
+        # FIXME: --help should display which configuration is default.
+        optparse.make_option('--debug', action='store_const', const='Debug', dest="configuration",
+            help='Set the configuration to Debug'),
+        optparse.make_option('--release', action='store_const', const='Release', dest="configuration",
+            help='Set the configuration to Release'),
+        optparse.make_option('--32-bit', action='store_const', const='x86', default=None, dest="architecture",
+            help='use 32-bit binaries by default (x86 instead of x86_64)'),
+        ]
+
+
+
+def _builder_options(builder_name):
+    configuration = "Debug" if re.search(r"[d|D](ebu|b)g", builder_name) else "Release"
+    is_webkit2 = builder_name.find("WK2") != -1
+    builder_name = builder_name
+    return optparse.Values({'builder_name': builder_name, 'configuration': configuration, 'webkit_test_runner': is_webkit2})
+
+
+class PortFactory(object):
+    PORT_CLASSES = (
+        'chromium_android.ChromiumAndroidPort',
+        'chromium_linux.ChromiumLinuxPort',
+        'chromium_mac.ChromiumMacPort',
+        'chromium_win.ChromiumWinPort',
+        'efl.EflPort',
+        'gtk.GtkPort',
+        'mac.MacPort',
+        'mock_drt.MockDRTPort',
+        'qt.QtPort',
+        'test.TestPort',
+        'win.WinPort',
+    )
+
+    def __init__(self, host):
+        self._host = host
+
+    def _default_port(self, options):
+        platform = self._host.platform
+        if platform.is_linux() or platform.is_freebsd():
+            return 'chromium-linux'
+        elif platform.is_mac():
+            return 'mac'
+        elif platform.is_win():
+            return 'win'
+        raise NotImplementedError('unknown platform: %s' % platform)
+
+    def get(self, port_name=None, options=None, **kwargs):
+        """Returns an object implementing the Port interface. If
+        port_name is None, this routine attempts to guess at the most
+        appropriate port on this platform."""
+        port_name = port_name or self._default_port(options)
+
+        # FIXME(dpranke): We special-case '--platform chromium' so that it can co-exist
+        # with '--platform chromium-mac' and '--platform chromium-linux' properly (we
+        # can't look at the port_name prefix in this case).
+        if port_name == 'chromium':
+            port_name = 'chromium-' + self._host.platform.os_name
+
+        for port_class in self.PORT_CLASSES:
+            module_name, class_name = port_class.rsplit('.', 1)
+            module = __import__(module_name, globals(), locals(), [], -1)
+            cls = module.__dict__[class_name]
+            if port_name.startswith(cls.port_name):
+                port_name = cls.determine_full_port_name(self._host, options, port_name)
+                return cls(self._host, port_name, options=options, **kwargs)
+        raise NotImplementedError('unsupported platform: "%s"' % port_name)
+
+    def all_port_names(self, platform=None):
+        """Return a list of all valid, fully-specified, "real" port names.
+
+        This is the list of directories that are used as actual baseline_paths()
+        by real ports. This does not include any "fake" names like "test"
+        or "mock-mac", and it does not include any directories that are not.
+
+        If platform is not specified, we will glob-match all ports"""
+        platform = platform or '*'
+        return fnmatch.filter(builders.all_port_names(), platform)
+
+    def get_from_builder_name(self, builder_name):
+        port_name = builders.port_name_for_builder_name(builder_name)
+        assert port_name, "unrecognized builder name '%s'" % builder_name
+        return self.get(port_name, _builder_options(builder_name))
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/factory_unittest.py b/Tools/Scripts/webkitpy/layout_tests/port/factory_unittest.py
new file mode 100644
index 0000000..2980c2d
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/port/factory_unittest.py
@@ -0,0 +1,113 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#    * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from webkitpy.tool.mocktool import MockOptions
+from webkitpy.common.system.systemhost_mock import MockSystemHost
+
+from webkitpy.layout_tests.port import chromium_android
+from webkitpy.layout_tests.port import chromium_linux
+from webkitpy.layout_tests.port import chromium_mac
+from webkitpy.layout_tests.port import chromium_win
+from webkitpy.layout_tests.port import factory
+from webkitpy.layout_tests.port import gtk
+from webkitpy.layout_tests.port import mac
+from webkitpy.layout_tests.port import qt
+from webkitpy.layout_tests.port import test
+from webkitpy.layout_tests.port import win
+
+
+class FactoryTest(unittest.TestCase):
+    """Test that the factory creates the proper port object for given combination of port_name, host.platform, and options."""
+    # FIXME: The ports themselves should expose what options they require,
+    # instead of passing generic "options".
+
+    def setUp(self):
+        self.webkit_options = MockOptions(pixel_tests=False)
+
+    def assert_port(self, port_name=None, os_name=None, os_version=None, options=None, cls=None):
+        host = MockSystemHost(os_name=os_name, os_version=os_version)
+        port = factory.PortFactory(host).get(port_name, options=options)
+        self.assertTrue(isinstance(port, cls))
+
+    def test_mac(self):
+        self.assert_port(port_name='mac-lion', cls=mac.MacPort)
+        self.assert_port(port_name='mac-lion-wk2', cls=mac.MacPort)
+        self.assert_port(port_name='mac', os_name='mac', os_version='lion', cls=mac.MacPort)
+        self.assert_port(port_name=None,  os_name='mac', os_version='lion', cls=mac.MacPort)
+
+    def test_win(self):
+        self.assert_port(port_name='win-xp', cls=win.WinPort)
+        self.assert_port(port_name='win-xp-wk2', cls=win.WinPort)
+        self.assert_port(port_name='win', os_name='win', os_version='xp', cls=win.WinPort)
+        self.assert_port(port_name=None, os_name='win', os_version='xp', cls=win.WinPort)
+        self.assert_port(port_name=None, os_name='win', os_version='xp', options=self.webkit_options, cls=win.WinPort)
+
+    def test_gtk(self):
+        self.assert_port(port_name='gtk', cls=gtk.GtkPort)
+
+    def test_qt(self):
+        self.assert_port(port_name='qt', cls=qt.QtPort)
+
+    def test_chromium_mac(self):
+        self.assert_port(port_name='chromium-mac', os_name='mac', os_version='snowleopard',
+                         cls=chromium_mac.ChromiumMacPort)
+        self.assert_port(port_name='chromium', os_name='mac', os_version='lion',
+                         cls=chromium_mac.ChromiumMacPort)
+
+    def test_chromium_linux(self):
+        self.assert_port(port_name='chromium-linux', cls=chromium_linux.ChromiumLinuxPort)
+        self.assert_port(port_name='chromium', os_name='linux', os_version='lucid',
+                         cls=chromium_linux.ChromiumLinuxPort)
+
+    def test_chromium_android(self):
+        self.assert_port(port_name='chromium-android', cls=chromium_android.ChromiumAndroidPort)
+        # NOTE: We can't check for port_name=chromium here, as this will append the host's
+        # operating system, whereas host!=target for Android.
+
+    def test_chromium_win(self):
+        self.assert_port(port_name='chromium-win-xp', cls=chromium_win.ChromiumWinPort)
+        self.assert_port(port_name='chromium-win', os_name='win', os_version='xp',
+                         cls=chromium_win.ChromiumWinPort)
+        self.assert_port(port_name='chromium', os_name='win', os_version='xp',
+                         cls=chromium_win.ChromiumWinPort)
+
+    def test_unknown_specified(self):
+        self.assertRaises(NotImplementedError, factory.PortFactory(MockSystemHost()).get, port_name='unknown')
+
+    def test_unknown_default(self):
+        self.assertRaises(NotImplementedError, factory.PortFactory(MockSystemHost(os_name='vms')).get)
+
+    def test_get_from_builder_name(self):
+        self.assertEquals(factory.PortFactory(MockSystemHost()).get_from_builder_name('WebKit Mac10.7').name(),
+                          'chromium-mac-lion')
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/gtk.py b/Tools/Scripts/webkitpy/layout_tests/port/gtk.py
new file mode 100644
index 0000000..3d82027
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/port/gtk.py
@@ -0,0 +1,162 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the Google name nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import os
+import subprocess
+
+from webkitpy.layout_tests.models.test_configuration import TestConfiguration
+from webkitpy.layout_tests.port.base import Port
+from webkitpy.layout_tests.port.pulseaudio_sanitizer import PulseAudioSanitizer
+from webkitpy.layout_tests.port.xvfbdriver import XvfbDriver
+
+
+class GtkPort(Port, PulseAudioSanitizer):
+    port_name = "gtk"
+
+    def warn_if_bug_missing_in_test_expectations(self):
+        return True
+
+    def _port_flag_for_scripts(self):
+        return "--gtk"
+
+    def _driver_class(self):
+        return XvfbDriver
+
+    def default_timeout_ms(self):
+        # For now, use the base Port's default timeout value in case of WebKitTestRunner.
+        if self.get_option('webkit_test_runner'):
+            return super(GtkPort, self).default_timeout_ms()
+
+        if self.get_option('configuration') == 'Debug':
+            return 12 * 1000
+        return 6 * 1000
+
+    def setup_test_run(self):
+        self._unload_pulseaudio_module()
+
+    def clean_up_test_run(self):
+        super(GtkPort, self).clean_up_test_run()
+        self._restore_pulseaudio_module()
+
+    def setup_environ_for_server(self, server_name=None):
+        environment = super(GtkPort, self).setup_environ_for_server(server_name)
+        environment['GTK_MODULES'] = 'gail'
+        environment['GSETTINGS_BACKEND'] = 'memory'
+        environment['LIBOVERLAY_SCROLLBAR'] = '0'
+        environment['TEST_RUNNER_INJECTED_BUNDLE_FILENAME'] = self._build_path('Libraries', 'libTestRunnerInjectedBundle.la')
+        environment['TEST_RUNNER_TEST_PLUGIN_PATH'] = self._build_path('TestNetscapePlugin', '.libs')
+        environment['WEBKIT_INSPECTOR_PATH'] = self._build_path('Programs', 'resources', 'inspector')
+        environment['AUDIO_RESOURCES_PATH'] = self._filesystem.join(self._config.webkit_base_dir(),
+                                                                    'Source', 'WebCore', 'platform',
+                                                                    'audio', 'resources')
+        self._copy_value_from_environ_if_set(environment, 'WEBKITOUTPUTDIR')
+        return environment
+
+    def _generate_all_test_configurations(self):
+        configurations = []
+        for build_type in self.ALL_BUILD_TYPES:
+            configurations.append(TestConfiguration(version=self._version, architecture='x86', build_type=build_type))
+        return configurations
+
+    def _path_to_driver(self):
+        return self._build_path('Programs', self.driver_name())
+
+    def _path_to_image_diff(self):
+        return self._build_path('Programs', 'ImageDiff')
+
+    def _path_to_webcore_library(self):
+        gtk_library_names = [
+            "libwebkitgtk-1.0.so",
+            "libwebkitgtk-3.0.so",
+            "libwebkit2gtk-1.0.so",
+        ]
+
+        for library in gtk_library_names:
+            full_library = self._build_path(".libs", library)
+            if self._filesystem.isfile(full_library):
+                return full_library
+        return None
+
+    # FIXME: We should find a way to share this implmentation with Gtk,
+    # or teach run-launcher how to call run-safari and move this down to Port.
+    def show_results_html_file(self, results_filename):
+        run_launcher_args = ["file://%s" % results_filename]
+        if self.get_option('webkit_test_runner'):
+            run_launcher_args.append('-2')
+        # FIXME: old-run-webkit-tests also added ["-graphicssystem", "raster", "-style", "windows"]
+        # FIXME: old-run-webkit-tests converted results_filename path for cygwin.
+        self._run_script("run-launcher", run_launcher_args)
+
+    def _get_gdb_output(self, coredump_path):
+        cmd = ['gdb', '-ex', 'thread apply all bt', '--batch', str(self._path_to_driver()), coredump_path]
+        proc = subprocess.Popen(cmd, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+        proc.wait()
+        errors = [l.strip().decode('utf8', 'ignore') for l in proc.stderr.readlines()]
+        trace = proc.stdout.read().decode('utf8', 'ignore')
+        return (trace, errors)
+
+    def _get_crash_log(self, name, pid, stdout, stderr, newer_than):
+        pid_representation = str(pid or '<unknown>')
+        log_directory = os.environ.get("WEBKIT_CORE_DUMPS_DIRECTORY")
+        errors = []
+        crash_log = ''
+        expected_crash_dump_filename = "core-pid_%s-_-process_%s" % (pid_representation, name)
+
+        def match_filename(filesystem, directory, filename):
+            if pid:
+                return filename == expected_crash_dump_filename
+            return filename.find(name) > -1
+
+        if log_directory:
+            dumps = self._filesystem.files_under(log_directory, file_filter=match_filename)
+            if dumps:
+                # Get the most recent coredump matching the pid and/or process name.
+                coredump_path = list(reversed(sorted(dumps)))[0]
+                if not newer_than or self._filesystem.mtime(coredump_path) > newer_than:
+                    crash_log, errors = self._get_gdb_output(coredump_path)
+
+        stderr_lines = errors + (stderr or '<empty>').decode('utf8', 'ignore').splitlines()
+        errors_str = '\n'.join(('STDERR: ' + l) for l in stderr_lines)
+        if not crash_log:
+            if not log_directory:
+                log_directory = "/path/to/coredumps"
+            core_pattern = os.path.join(log_directory, "core-pid_%p-_-process_%e")
+            crash_log = """\
+Coredump %(expected_crash_dump_filename)s not found. To enable crash logs:
+
+- run this command as super-user: echo "%(core_pattern)s" > /proc/sys/kernel/core_pattern
+- enable core dumps: ulimit -c unlimited
+- set the WEBKIT_CORE_DUMPS_DIRECTORY environment variable: export WEBKIT_CORE_DUMPS_DIRECTORY=%(log_directory)s
+
+""" % locals()
+
+        return (stderr, """\
+Crash log for %(name)s (pid %(pid_representation)s):
+
+%(crash_log)s
+%(errors_str)s""" % locals())
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/gtk_unittest.py b/Tools/Scripts/webkitpy/layout_tests/port/gtk_unittest.py
new file mode 100644
index 0000000..f1df6bf
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/port/gtk_unittest.py
@@ -0,0 +1,90 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#    * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+import sys
+import os
+
+from webkitpy.common.system.outputcapture import OutputCapture
+from webkitpy.layout_tests.port.gtk import GtkPort
+from webkitpy.layout_tests.port import port_testcase
+from webkitpy.common.system.executive_mock import MockExecutive
+from webkitpy.thirdparty.mock import Mock
+from webkitpy.common.system.filesystem_mock import MockFileSystem
+from webkitpy.tool.mocktool import MockOptions
+
+
+class GtkPortTest(port_testcase.PortTestCase):
+    port_name = 'gtk'
+    port_maker = GtkPort
+
+    def test_show_results_html_file(self):
+        port = self.make_port()
+        port._executive = MockExecutive(should_log=True)
+        expected_stderr = "MOCK run_command: ['Tools/Scripts/run-launcher', '--release', '--gtk', 'file://test.html'], cwd=/mock-checkout\n"
+        OutputCapture().assert_outputs(self, port.show_results_html_file, ["test.html"], expected_stderr=expected_stderr)
+
+    def test_default_timeout_ms(self):
+        self.assertEquals(self.make_port(options=MockOptions(configuration='Release')).default_timeout_ms(), 6000)
+        self.assertEquals(self.make_port(options=MockOptions(configuration='Debug')).default_timeout_ms(), 12000)
+        self.assertEquals(self.make_port(options=MockOptions(webkit_test_runner=True, configuration='Debug')).default_timeout_ms(), 80000)
+        self.assertEquals(self.make_port(options=MockOptions(webkit_test_runner=True, configuration='Release')).default_timeout_ms(), 80000)
+
+    def assertLinesEqual(self, a, b):
+        if hasattr(self, 'assertMultiLineEqual'):
+            self.assertMultiLineEqual(a, b)
+        else:
+            self.assertEqual(a.splitlines(), b.splitlines())
+
+    def test_get_crash_log(self):
+        core_directory = os.environ.get('WEBKIT_CORE_DUMPS_DIRECTORY', '/path/to/coredumps')
+        core_pattern = os.path.join(core_directory, "core-pid_%p-_-process_%e")
+        mock_empty_crash_log = """\
+Crash log for DumpRenderTree (pid 28529):
+
+Coredump core-pid_28529-_-process_DumpRenderTree not found. To enable crash logs:
+
+- run this command as super-user: echo "%(core_pattern)s" > /proc/sys/kernel/core_pattern
+- enable core dumps: ulimit -c unlimited
+- set the WEBKIT_CORE_DUMPS_DIRECTORY environment variable: export WEBKIT_CORE_DUMPS_DIRECTORY=%(core_directory)s
+
+
+STDERR: <empty>""" % locals()
+
+        def _mock_gdb_output(coredump_path):
+            return (mock_empty_crash_log, [])
+
+        port = self.make_port()
+        port._get_gdb_output = mock_empty_crash_log
+        stderr, log = port._get_crash_log("DumpRenderTree", 28529, "", "", newer_than=None)
+        self.assertEqual(stderr, "")
+        self.assertLinesEqual(log, mock_empty_crash_log)
+
+        stderr, log = port._get_crash_log("DumpRenderTree", 28529, "", "", newer_than=0.0)
+        self.assertEqual(stderr, "")
+        self.assertLinesEqual(log, mock_empty_crash_log)
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/http_lock.py b/Tools/Scripts/webkitpy/layout_tests/port/http_lock.py
new file mode 100644
index 0000000..c2eece3
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/port/http_lock.py
@@ -0,0 +1,134 @@
+#!/usr/bin/env python
+# Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Szeged
+# Copyright (C) 2010 Andras Becsi (abecsi@inf.u-szeged.hu), University of Szeged
+#
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+#    notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+#    notice, this list of conditions and the following disclaimer in the
+#    documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY UNIVERSITY OF SZEGED ``AS IS'' AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL UNIVERSITY OF SZEGED OR
+# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""This class helps to block NRWT threads when more NRWTs run
+perf, http and websocket tests in a same time."""
+
+import logging
+import os
+import sys
+import tempfile
+import time
+
+from webkitpy.common.system.executive import Executive
+from webkitpy.common.system.file_lock import FileLock
+from webkitpy.common.system.filesystem import FileSystem
+
+
+_log = logging.getLogger(__name__)
+
+
+class HttpLock(object):
+    def __init__(self, lock_path, lock_file_prefix="WebKitHttpd.lock.", guard_lock="WebKit.lock", filesystem=None, executive=None):
+        self._executive = executive or Executive()
+        self._filesystem = filesystem or FileSystem()
+        self._lock_path = lock_path
+        if not self._lock_path:
+            # FIXME: FileSystem should have an accessor for tempdir()
+            self._lock_path = tempfile.gettempdir()
+        self._lock_file_prefix = lock_file_prefix
+        self._lock_file_path_prefix = self._filesystem.join(self._lock_path, self._lock_file_prefix)
+        self._guard_lock_file = self._filesystem.join(self._lock_path, guard_lock)
+        self._guard_lock = FileLock(self._guard_lock_file)
+        self._process_lock_file_name = ""
+
+    def cleanup_http_lock(self):
+        """Delete the lock file if exists."""
+        if self._filesystem.exists(self._process_lock_file_name):
+            _log.debug("Removing lock file: %s" % self._process_lock_file_name)
+            self._filesystem.remove(self._process_lock_file_name)
+
+    def _extract_lock_number(self, lock_file_name):
+        """Return the lock number from lock file."""
+        prefix_length = len(self._lock_file_path_prefix)
+        return int(lock_file_name[prefix_length:])
+
+    def _lock_file_list(self):
+        """Return the list of lock files sequentially."""
+        lock_list = self._filesystem.glob(self._lock_file_path_prefix + '*')
+        lock_list.sort(key=self._extract_lock_number)
+        return lock_list
+
+    def _next_lock_number(self):
+        """Return the next available lock number."""
+        lock_list = self._lock_file_list()
+        if not lock_list:
+            return 0
+        return self._extract_lock_number(lock_list[-1]) + 1
+
+    def _current_lock_pid(self):
+        """Return with the current lock pid. If the lock is not valid
+        it deletes the lock file."""
+        lock_list = self._lock_file_list()
+        if not lock_list:
+            _log.debug("No lock file list")
+            return
+        try:
+            current_pid = self._filesystem.read_text_file(lock_list[0])
+            if not (current_pid and self._executive.check_running_pid(int(current_pid))):
+                _log.debug("Removing stuck lock file: %s" % lock_list[0])
+                self._filesystem.remove(lock_list[0])
+                return
+        except IOError, e:
+            _log.debug("IOError: %s" % e)
+            return
+        except OSError, e:
+            _log.debug("OSError: %s" % e)
+            return
+        return int(current_pid)
+
+    def _create_lock_file(self):
+        """The lock files are used to schedule the running test sessions in first
+        come first served order. The guard lock ensures that the lock numbers are
+        sequential."""
+        if not self._filesystem.exists(self._lock_path):
+            _log.debug("Lock directory does not exist: %s" % self._lock_path)
+            return False
+
+        if not self._guard_lock.acquire_lock():
+            _log.debug("Guard lock timed out!")
+            return False
+
+        self._process_lock_file_name = (self._lock_file_path_prefix + str(self._next_lock_number()))
+        _log.debug("Creating lock file: %s" % self._process_lock_file_name)
+        # FIXME: Executive.py should have an accessor for getpid()
+        self._filesystem.write_text_file(self._process_lock_file_name, str(os.getpid()))
+        self._guard_lock.release_lock()
+        return True
+
+    def wait_for_httpd_lock(self):
+        """Create a lock file and wait until it's turn comes. If something goes wrong
+        it wont do any locking."""
+        if not self._create_lock_file():
+            _log.debug("Warning, http locking failed!")
+            return
+
+        # FIXME: This can hang forever!
+        while self._current_lock_pid() != os.getpid():
+            time.sleep(1)
+
+        _log.debug("HTTP lock acquired")
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/http_lock_unittest.py b/Tools/Scripts/webkitpy/layout_tests/port/http_lock_unittest.py
new file mode 100644
index 0000000..fbf2d9d
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/port/http_lock_unittest.py
@@ -0,0 +1,121 @@
+#!/usr/bin/env python
+# Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Szeged
+#
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+#    notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+#    notice, this list of conditions and the following disclaimer in the
+#    documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY UNIVERSITY OF SZEGED ``AS IS'' AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL UNIVERSITY OF SZEGED OR
+# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from http_lock import HttpLock
+import os  # Used for os.getpid()
+import unittest
+
+from webkitpy.common.system.filesystem_mock import MockFileSystem
+from webkitpy.common.system.executive_mock import MockExecutive
+
+
+# FIXME: These tests all touch the real disk, but could be written to a MockFileSystem instead.
+class HttpLockTestWithRealFileSystem(unittest.TestCase):
+    # FIXME: Unit tests do not use an __init__ method, but rather setUp and tearDown methods.
+    def __init__(self, testFunc):
+        self.http_lock = HttpLock(None, "WebKitTestHttpd.lock.", "WebKitTest.lock")
+        self.filesystem = self.http_lock._filesystem  # FIXME: We should be passing in a MockFileSystem instead.
+        self.lock_file_path_prefix = self.filesystem.join(self.http_lock._lock_path, self.http_lock._lock_file_prefix)
+        self.lock_file_name = self.lock_file_path_prefix + "0"
+        self.guard_lock_file = self.http_lock._guard_lock_file
+        self.clean_all_lockfile()
+        unittest.TestCase.__init__(self, testFunc)
+
+    def clean_all_lockfile(self):
+        if self.filesystem.exists(self.guard_lock_file):
+            self.filesystem.remove(self.guard_lock_file)
+        lock_list = self.filesystem.glob(self.lock_file_path_prefix + '*')
+        for file_name in lock_list:
+            self.filesystem.remove(file_name)
+
+    def assertEqual(self, first, second):
+        if first != second:
+            self.clean_all_lockfile()
+        unittest.TestCase.assertEqual(self, first, second)
+
+    def _check_lock_file(self):
+        if self.filesystem.exists(self.lock_file_name):
+            pid = os.getpid()
+            lock_file_pid = self.filesystem.read_text_file(self.lock_file_name)
+            self.assertEqual(pid, int(lock_file_pid))
+            return True
+        return False
+
+    def test_lock_lifecycle(self):
+        self.http_lock._create_lock_file()
+
+        self.assertEqual(True, self._check_lock_file())
+        self.assertEqual(1, self.http_lock._next_lock_number())
+
+        self.http_lock.cleanup_http_lock()
+
+        self.assertEqual(False, self._check_lock_file())
+        self.assertEqual(0, self.http_lock._next_lock_number())
+
+
+class HttpLockTest(unittest.TestCase):
+    def setUp(self):
+        self.filesystem = MockFileSystem()
+        self.http_lock = HttpLock(None, "WebKitTestHttpd.lock.", "WebKitTest.lock", filesystem=self.filesystem, executive=MockExecutive())
+        # FIXME: Shouldn't we be able to get these values from the http_lock object directly?
+        self.lock_file_path_prefix = self.filesystem.join(self.http_lock._lock_path, self.http_lock._lock_file_prefix)
+        self.lock_file_name = self.lock_file_path_prefix + "0"
+
+    def test_current_lock_pid(self):
+        # FIXME: Once Executive wraps getpid, we can mock this and not use a real pid.
+        current_pid = os.getpid()
+        self.http_lock._filesystem.write_text_file(self.lock_file_name, str(current_pid))
+        self.assertEquals(self.http_lock._current_lock_pid(), current_pid)
+
+    def test_extract_lock_number(self):
+        lock_file_list = (
+            self.lock_file_path_prefix + "00",
+            self.lock_file_path_prefix + "9",
+            self.lock_file_path_prefix + "001",
+            self.lock_file_path_prefix + "021",
+        )
+
+        expected_number_list = (0, 9, 1, 21)
+
+        for lock_file, expected in zip(lock_file_list, expected_number_list):
+            self.assertEqual(self.http_lock._extract_lock_number(lock_file), expected)
+
+    def test_lock_file_list(self):
+        self.http_lock._filesystem = MockFileSystem({
+            self.lock_file_path_prefix + "6": "",
+            self.lock_file_path_prefix + "1": "",
+            self.lock_file_path_prefix + "4": "",
+            self.lock_file_path_prefix + "3": "",
+        })
+
+        expected_file_list = [
+            self.lock_file_path_prefix + "1",
+            self.lock_file_path_prefix + "3",
+            self.lock_file_path_prefix + "4",
+            self.lock_file_path_prefix + "6",
+        ]
+
+        self.assertEqual(self.http_lock._lock_file_list(), expected_file_list)
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/image_diff.py b/Tools/Scripts/webkitpy/layout_tests/port/image_diff.py
new file mode 100644
index 0000000..72d061f
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/port/image_diff.py
@@ -0,0 +1,119 @@
+#!/usr/bin/env python
+# Copyright (C) 2010 Google Inc. All rights reserved.
+# Copyright (C) 2010 Gabor Rapcsanyi <rgabor@inf.u-szeged.hu>, University of Szeged
+# Copyright (C) 2011, 2012 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the Google name nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""WebKit implementations of the Port interface."""
+
+import logging
+import re
+import time
+
+from webkitpy.layout_tests.port import server_process
+
+
+_log = logging.getLogger(__name__)
+
+
+class ImageDiffer(object):
+    def __init__(self, port):
+        self._port = port
+        self._tolerance = None
+        self._process = None
+
+    def diff_image(self, expected_contents, actual_contents, tolerance):
+        if tolerance != self._tolerance:
+            self.stop()
+        try:
+            assert(expected_contents)
+            assert(actual_contents)
+            assert(tolerance is not None)
+
+            if not self._process:
+                self._start(tolerance)
+            # Note that although we are handed 'old', 'new', ImageDiff wants 'new', 'old'.
+            self._process.write('Content-Length: %d\n%sContent-Length: %d\n%s' % (
+                len(actual_contents), actual_contents,
+                len(expected_contents), expected_contents))
+            return self._read()
+        except IOError as exception:
+            return (None, 0, "Failed to compute an image diff: %s" % str(exception))
+
+    def _start(self, tolerance):
+        command = [self._port._path_to_image_diff(), '--tolerance', str(tolerance)]
+        environment = self._port.setup_environ_for_server('ImageDiff')
+        self._process = self._port._server_process_constructor(self._port, 'ImageDiff', command, environment)
+        self._process.start()
+        self._tolerance = tolerance
+
+    def _read(self):
+        deadline = time.time() + 2.0
+        output = None
+        output_image = ""
+
+        while not self._process.timed_out and not self._process.has_crashed():
+            output = self._process.read_stdout_line(deadline)
+            if self._process.timed_out or self._process.has_crashed() or not output:
+                break
+
+            if output.startswith('diff'):  # This is the last line ImageDiff prints.
+                break
+
+            if output.startswith('Content-Length'):
+                m = re.match('Content-Length: (\d+)', output)
+                content_length = int(m.group(1))
+                output_image = self._process.read_stdout(deadline, content_length)
+                output = self._process.read_stdout_line(deadline)
+                break
+
+        stderr = self._process.pop_all_buffered_stderr()
+        err_str = ''
+        if stderr:
+            err_str += "ImageDiff produced stderr output:\n" + stderr
+        if self._process.timed_out:
+            err_str += "ImageDiff timed out\n"
+        if self._process.has_crashed():
+            err_str += "ImageDiff crashed\n"
+
+        # FIXME: There is no need to shut down the ImageDiff server after every diff.
+        self._process.stop()
+
+        diff_percent = 0
+        if output and output.startswith('diff'):
+            m = re.match('diff: (.+)% (passed|failed)', output)
+            if m.group(2) == 'passed':
+                return (None, 0, None)
+            diff_percent = float(m.group(1))
+
+        return (output_image, diff_percent, err_str or None)
+
+    def stop(self):
+        if self._process:
+            self._process.stop()
+            self._process = None
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/image_diff_unittest.py b/Tools/Scripts/webkitpy/layout_tests/port/image_diff_unittest.py
new file mode 100755
index 0000000..46cc98a
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/port/image_diff_unittest.py
@@ -0,0 +1,57 @@
+# Copyright (C) 2012 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#    * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Unit testing base class for Port implementations."""
+
+import unittest
+
+from webkitpy.layout_tests.port.server_process_mock import MockServerProcess
+from webkitpy.layout_tests.port.image_diff import ImageDiffer
+
+
+class FakePort(object):
+    def __init__(self, server_process_output):
+        self._server_process_constructor = lambda port, nm, cmd, env: MockServerProcess(lines=server_process_output)
+
+    def _path_to_image_diff(self):
+        return ''
+
+    def setup_environ_for_server(self, nm):
+        return None
+
+
+class TestImageDiffer(unittest.TestCase):
+    def test_diff_image_failed(self):
+        port = FakePort(['diff: 100% failed\n'])
+        image_differ = ImageDiffer(port)
+        self.assertEquals(image_differ.diff_image('foo', 'bar', 0.1), ('', 100.0, None))
+
+    def test_diff_image_passed(self):
+        port = FakePort(['diff: 0% passed\n'])
+        image_differ = ImageDiffer(port)
+        self.assertEquals(image_differ.diff_image('foo', 'bar', 0.1), (None, 0, None))
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/leakdetector.py b/Tools/Scripts/webkitpy/layout_tests/port/leakdetector.py
new file mode 100644
index 0000000..f46cd34
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/port/leakdetector.py
@@ -0,0 +1,153 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the Google name nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import logging
+import re
+
+from webkitpy.common.system.executive import ScriptError
+
+_log = logging.getLogger(__name__)
+
+
+# If other ports/platforms decide to support --leaks, we should see about sharing as much of this code as possible.
+# Right now this code is only used by Apple's MacPort.
+
+class LeakDetector(object):
+    def __init__(self, port):
+        # We should operate on a "platform" not a port here.
+        self._port = port
+        self._executive = port._executive
+        self._filesystem = port._filesystem
+
+    # We exclude the following reported leaks so they do not get in our way when looking for WebKit leaks:
+    # This allows us ignore known leaks and only be alerted when new leaks occur. Some leaks are in the old
+    # versions of the system frameworks that are being used by the leaks bots. Even though a leak has been
+    # fixed, it will be listed here until the bot has been updated with the newer frameworks.
+    def _types_to_exlude_from_leaks(self):
+        # Currently we don't have any type excludes from OS leaks, but we will likely again in the future.
+        return []
+
+    def _callstacks_to_exclude_from_leaks(self):
+        callstacks = [
+            "Flash_EnforceLocalSecurity",  # leaks in Flash plug-in code, rdar://problem/4449747
+            "ScanFromString", # <http://code.google.com/p/angleproject/issues/detail?id=249> leak in ANGLE
+        ]
+        if self._port.is_snowleopard():
+            callstacks += [
+                "readMakerNoteProps",  # <rdar://problem/7156432> leak in ImageIO
+                "QTKitMovieControllerView completeUISetup",  # <rdar://problem/7155156> leak in QTKit
+                "getVMInitArgs",  # <rdar://problem/7714444> leak in Java
+                "Java_java_lang_System_initProperties",  # <rdar://problem/7714465> leak in Java
+                "glrCompExecuteKernel",  # <rdar://problem/7815391> leak in graphics driver while using OpenGL
+                "NSNumberFormatter getObjectValue:forString:errorDescription:",  # <rdar://problem/7149350> Leak in NSNumberFormatter
+            ]
+        elif self._port.is_lion():
+            callstacks += [
+                "FigByteFlumeCustomURLCreateWithURL", # <rdar://problem/10461926> leak in CoreMedia
+                "PDFPage\(PDFPageInternal\) pageLayoutIfAvail", # <rdar://problem/10462055> leak in PDFKit
+                "SecTransformExecute", # <rdar://problem/10470667> leak in Security.framework
+                "_NSCopyStyleRefForFocusRingStyleClip", # <rdar://problem/10462031> leak in AppKit
+            ]
+        return callstacks
+
+    def _leaks_args(self, pid):
+        leaks_args = []
+        for callstack in self._callstacks_to_exclude_from_leaks():
+            leaks_args += ['--exclude-callstack=%s' % callstack]
+        for excluded_type in self._types_to_exlude_from_leaks():
+            leaks_args += ['--exclude-type=%s' % excluded_type]
+        leaks_args.append(pid)
+        return leaks_args
+
+    def _parse_leaks_output(self, leaks_output):
+        _, count, bytes = re.search(r'Process (?P<pid>\d+): (?P<count>\d+) leaks? for (?P<bytes>\d+) total', leaks_output).groups()
+        excluded_match = re.search(r'(?P<excluded>\d+) leaks? excluded', leaks_output)
+        excluded = excluded_match.group('excluded') if excluded_match else 0
+        return int(count), int(excluded), int(bytes)
+
+    def leaks_files_in_directory(self, directory):
+        return self._filesystem.glob(self._filesystem.join(directory, "*-leaks.txt"))
+
+    def leaks_file_name(self, process_name, process_pid):
+        # We include the number of files this worker has already written in the name to prevent overwritting previous leak results..
+        return "%s-%s-leaks.txt" % (process_name, process_pid)
+
+    def count_total_bytes_and_unique_leaks(self, leak_files):
+        merge_depth = 5  # ORWT had a --merge-leak-depth argument, but that seems out of scope for the run-webkit-tests tool.
+        args = [
+            '--merge-depth',
+            merge_depth,
+        ] + leak_files
+        try:
+            parse_malloc_history_output = self._port._run_script("parse-malloc-history", args, include_configuration_arguments=False)
+        except ScriptError, e:
+            _log.warn("Failed to parse leaks output: %s" % e.message_with_output())
+            return
+
+        # total: 5,888 bytes (0 bytes excluded).
+        unique_leak_count = len(re.findall(r'^(\d*)\scalls', parse_malloc_history_output, re.MULTILINE))
+        total_bytes_string = re.search(r'^total\:\s(.+)\s\(', parse_malloc_history_output, re.MULTILINE).group(1)
+        return (total_bytes_string, unique_leak_count)
+
+    def count_total_leaks(self, leak_file_paths):
+        total_leaks = 0
+        for leak_file_path in leak_file_paths:
+            # Leaks have been seen to include non-utf8 data, so we use read_binary_file.
+            # See https://bugs.webkit.org/show_bug.cgi?id=71112.
+            leaks_output = self._filesystem.read_binary_file(leak_file_path)
+            count, _, _ = self._parse_leaks_output(leaks_output)
+            total_leaks += count
+        return total_leaks
+
+    def check_for_leaks(self, process_name, process_pid):
+        _log.debug("Checking for leaks in %s" % process_name)
+        try:
+            # Oddly enough, run-leaks (or the underlying leaks tool) does not seem to always output utf-8,
+            # thus we pass decode_output=False.  Without this code we've seen errors like:
+            # "UnicodeDecodeError: 'utf8' codec can't decode byte 0x88 in position 779874: unexpected code byte"
+            leaks_output = self._port._run_script("run-leaks", self._leaks_args(process_pid), include_configuration_arguments=False, decode_output=False)
+        except ScriptError, e:
+            _log.warn("Failed to run leaks tool: %s" % e.message_with_output())
+            return
+
+        # FIXME: We end up parsing this output 3 times.  Once here and twice for summarizing.
+        count, excluded, bytes = self._parse_leaks_output(leaks_output)
+        adjusted_count = count - excluded
+        if not adjusted_count:
+            return
+
+        leaks_filename = self.leaks_file_name(process_name, process_pid)
+        leaks_output_path = self._filesystem.join(self._port.results_directory(), leaks_filename)
+        self._filesystem.write_binary_file(leaks_output_path, leaks_output)
+
+        # FIXME: Ideally we would not be logging from the worker process, but rather pass the leak
+        # information back to the manager and have it log.
+        if excluded:
+            _log.info("%s leaks (%s bytes including %s excluded leaks) were found, details in %s" % (adjusted_count, bytes, excluded, leaks_output_path))
+        else:
+            _log.info("%s leaks (%s bytes) were found, details in %s" % (count, bytes, leaks_output_path))
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/leakdetector_unittest.py b/Tools/Scripts/webkitpy/layout_tests/port/leakdetector_unittest.py
new file mode 100644
index 0000000..7628bb7
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/port/leakdetector_unittest.py
@@ -0,0 +1,152 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#    * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from webkitpy.layout_tests.port.leakdetector import LeakDetector
+from webkitpy.common.system.filesystem_mock import MockFileSystem
+from webkitpy.common.system.outputcapture import OutputCapture
+from webkitpy.common.system.executive_mock import MockExecutive
+
+
+class LeakDetectorTest(unittest.TestCase):
+    def _mock_port(self):
+        class MockPort(object):
+            def __init__(self):
+                self._filesystem = MockFileSystem()
+                self._executive = MockExecutive()
+
+        return MockPort()
+
+    def _make_detector(self):
+        return LeakDetector(self._mock_port())
+
+    def test_leaks_args(self):
+        detector = self._make_detector()
+        detector._callstacks_to_exclude_from_leaks = lambda: ['foo bar', 'BAZ']
+        detector._types_to_exlude_from_leaks = lambda: ['abcdefg', 'hi jklmno']
+        expected_args = ['--exclude-callstack=foo bar', '--exclude-callstack=BAZ', '--exclude-type=abcdefg', '--exclude-type=hi jklmno', 1234]
+        self.assertEquals(detector._leaks_args(1234), expected_args)
+
+    example_leaks_output = """Process 5122: 663744 nodes malloced for 78683 KB
+Process 5122: 337301 leaks for 6525216 total leaked bytes.
+Leak: 0x38cb600  size=3072  zone: DefaultMallocZone_0x1d94000   instance of 'NSCFData', type ObjC, implemented in Foundation
+        0xa033f0b8 0x01001384 0x00000b3a 0x00000b3a     ..3.....:...:...
+        0x00000000 0x038cb620 0x00000000 0x00000000     .... ...........
+        0x00000000 0x21000000 0x726c6468 0x00000000     .......!hdlr....
+        0x00000000 0x7269646d 0x6c707061 0x00000000     ....mdirappl....
+        0x00000000 0x04000000 0x736c69c1 0x00000074     .........ilst...
+        0x6f74a923 0x0000006f 0x7461641b 0x00000061     #.too....data...
+        0x00000001 0x76614c00 0x2e323566 0x302e3236     .....Lavf52.62.0
+        0x37000000 0x6d616ea9 0x2f000000 0x61746164     ...7.nam.../data
+        ...
+Leak: 0x2a9c960  size=288  zone: DefaultMallocZone_0x1d94000
+        0x09a1cc47 0x1bda8560 0x3d472cd1 0xfbe9bccd     G...`....,G=....
+        0x8bcda008 0x9e972a91 0xa892cf63 0x2448bdb0     .....*..c.....H$
+        0x4736fc34 0xdbe2d94e 0x25f56688 0x839402a4     4.6GN....f.%....
+        0xd12496b3 0x59c40c12 0x8cfcab2a 0xd20ef9c4     ..$....Y*.......
+        0xe7c56b1b 0x5835af45 0xc69115de 0x6923e4bb     .k..E.5X......#i
+        0x86f15553 0x15d40fa9 0x681288a4 0xc33298a9     SU.........h..2.
+        0x439bb535 0xc4fc743d 0x7dfaaff8 0x2cc49a4a     5..C=t.....}J..,
+        0xdd119df8 0x7e086821 0x3d7d129e 0x2e1b1547     ....!h.~..}=G...
+        ...
+Leak: 0x25102fe0  size=176  zone: DefaultMallocZone_0x1d94000   string 'NSException Data'
+"""
+
+    example_leaks_output_with_exclusions = """
+Process 57064: 865808 nodes malloced for 81032 KB
+Process 57064: 282 leaks for 21920 total leaked bytes.
+Leak: 0x7fc506023960  size=576  zone: DefaultMallocZone_0x107c29000   URLConnectionLoader::LoaderConnectionEventQueue  C++  CFNetwork
+        0x73395460 0x00007fff 0x7488af40 0x00007fff     `T9s....@..t....
+        0x73395488 0x00007fff 0x46eecd74 0x0001ed83     .T9s....t..F....
+        0x0100000a 0x00000000 0x7488bfc0 0x00007fff     ...........t....
+        0x00000000 0x00000000 0x46eecd8b 0x0001ed83     ...........F....
+        0x00000000 0x00000000 0x00000000 0x00000000     ................
+        0x00000000 0x00000000 0x46eecda3 0x0001ed83     ...........F....
+        0x00000000 0x00000000 0x00000000 0x00000000     ................
+        0x00000000 0x00000000 0x46eecdbc 0x0001ed83     ...........F....
+        ...
+Leak: 0x7fc506025980  size=432  zone: DefaultMallocZone_0x107c29000   URLConnectionInstanceData  CFType  CFNetwork
+        0x74862b28 0x00007fff 0x00012b80 0x00000001     (+.t.....+......
+        0x73395310 0x00007fff 0x733953f8 0x00007fff     .S9s.....S9s....
+        0x4d555458 0x00000000 0x00000000 0x00002068     XTUM........h ..
+        0x00000000 0x00000000 0x00000b00 0x00000b00     ................
+        0x00000000 0x00000000 0x060259b8 0x00007fc5     .........Y......
+        0x060259bc 0x00007fc5 0x00000000 0x00000000     .Y..............
+        0x73395418 0x00007fff 0x06025950 0x00007fc5     .T9s....PY......
+        0x73395440 0x00007fff 0x00005013 0x00000001     @T9s.....P......
+        ...
+
+
+Binary Images:
+       0x107ac2000 -        0x107b4aff7 +DumpRenderTree (??? - ???) <5694BE03-A60A-30B2-9D40-27CFFCFB88EE> /Volumes/Data/WebKit-BuildSlave/lion-intel-leaks/build/WebKitBuild/Debug/DumpRenderTree
+       0x107c2f000 -        0x107c58fff +libWebCoreTestSupport.dylib (535.8.0 - compatibility 1.0.0) <E4F7A13E-5807-30F7-A399-62F8395F9106> /Volumes/Data/WebKit-BuildSlave/lion-intel-leaks/build/WebKitBuild/Debug/libWebCoreTestSupport.dylib
+17 leaks excluded (not printed)
+"""
+
+    def test_parse_leaks_output(self):
+        self.assertEquals(self._make_detector()._parse_leaks_output(self.example_leaks_output), (337301, 0, 6525216))
+        self.assertEquals(self._make_detector()._parse_leaks_output(self.example_leaks_output_with_exclusions), (282, 17, 21920))
+
+    def test_leaks_files_in_directory(self):
+        detector = self._make_detector()
+        self.assertEquals(detector.leaks_files_in_directory('/bogus-directory'), [])
+        detector._filesystem = MockFileSystem({
+            '/mock-results/DumpRenderTree-1234-leaks.txt': '',
+            '/mock-results/DumpRenderTree-23423-leaks.txt': '',
+            '/mock-results/DumpRenderTree-823-leaks.txt': '',
+        })
+        self.assertEquals(len(detector.leaks_files_in_directory('/mock-results')), 3)
+
+    def test_count_total_bytes_and_unique_leaks(self):
+        detector = self._make_detector()
+
+        def mock_run_script(name, args, include_configuration_arguments=False):
+            print "MOCK _run_script: %s %s" % (name, args)
+            return """1 calls for 16 bytes: -[NSURLRequest mutableCopyWithZone:] | +[NSObject(NSObject) allocWithZone:] | _internal_class_createInstanceFromZone | calloc | malloc_zone_calloc
+
+147 calls for 9,408 bytes: _CFRuntimeCreateInstance | _ZN3WTF24StringWrapperCFAllocatorL8allocateElmPv StringImplCF.cpp:67 | WTF::fastMalloc(unsigned long) FastMalloc.cpp:268 | malloc | malloc_zone_malloc 
+
+total: 5,888 bytes (0 bytes excluded)."""
+        detector._port._run_script = mock_run_script
+
+        leak_files = ['/mock-results/DumpRenderTree-1234-leaks.txt', '/mock-results/DumpRenderTree-1235-leaks.txt']
+        expected_stdout = "MOCK _run_script: parse-malloc-history ['--merge-depth', 5, '/mock-results/DumpRenderTree-1234-leaks.txt', '/mock-results/DumpRenderTree-1235-leaks.txt']\n"
+        results_tuple = OutputCapture().assert_outputs(self, detector.count_total_bytes_and_unique_leaks, [leak_files], expected_stdout=expected_stdout)
+        self.assertEquals(results_tuple, ("5,888 bytes", 2))
+
+    def test_count_total_leaks(self):
+        detector = self._make_detector()
+        detector._filesystem = MockFileSystem({
+            # The \xff is some non-utf8 characters to make sure we don't blow up trying to parse the file.
+            '/mock-results/DumpRenderTree-1234-leaks.txt': '\xff\nProcess 1234: 12 leaks for 40 total leaked bytes.\n\xff\n',
+            '/mock-results/DumpRenderTree-23423-leaks.txt': 'Process 1235: 12341 leaks for 27934 total leaked bytes.\n',
+            '/mock-results/DumpRenderTree-823-leaks.txt': 'Process 12356: 23412 leaks for 18 total leaked bytes.\n',
+        })
+        leak_file_paths = ['/mock-results/DumpRenderTree-1234-leaks.txt', '/mock-results/DumpRenderTree-23423-leaks.txt', '/mock-results/DumpRenderTree-823-leaks.txt']
+        self.assertEquals(detector.count_total_leaks(leak_file_paths), 35765)
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/mac.py b/Tools/Scripts/webkitpy/layout_tests/port/mac.py
new file mode 100644
index 0000000..e6fd5bd
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/port/mac.py
@@ -0,0 +1,280 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+# Copyright (C) 2012 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the Google name nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import logging
+import os
+import re
+import subprocess
+import sys
+import time
+
+from webkitpy.common.system.crashlogs import CrashLogs
+from webkitpy.common.system.executive import ScriptError
+from webkitpy.layout_tests.port.apple import ApplePort
+from webkitpy.layout_tests.port.leakdetector import LeakDetector
+
+
+_log = logging.getLogger(__name__)
+
+
+class MacPort(ApplePort):
+    port_name = "mac"
+
+    VERSION_FALLBACK_ORDER = ['mac-snowleopard', 'mac-lion', 'mac-mountainlion']
+
+    ARCHITECTURES = ['x86_64', 'x86']
+
+    def __init__(self, host, port_name, **kwargs):
+        ApplePort.__init__(self, host, port_name, **kwargs)
+        self._architecture = self.get_option('architecture')
+
+        if not self._architecture:
+            self._architecture = 'x86_64'
+
+        self._leak_detector = LeakDetector(self)
+        if self.get_option("leaks"):
+            # DumpRenderTree slows down noticably if we run more than about 1000 tests in a batch
+            # with MallocStackLogging enabled.
+            self.set_option_default("batch_size", 1000)
+
+    def default_timeout_ms(self):
+        if self.get_option('guard_malloc'):
+            return 350 * 1000
+        return super(MacPort, self).default_timeout_ms()
+
+    def _build_driver_flags(self):
+        return ['ARCHS=i386'] if self.architecture() == 'x86' else []
+
+    def should_retry_crashes(self):
+        # On Apple Mac, we retry crashes due to https://bugs.webkit.org/show_bug.cgi?id=82233
+        return True
+
+    def default_baseline_search_path(self):
+        if self._name.endswith(self.FUTURE_VERSION):
+            fallback_names = [self.port_name]
+        else:
+            fallback_names = self.VERSION_FALLBACK_ORDER[self.VERSION_FALLBACK_ORDER.index(self._name):-1] + [self.port_name]
+        if self.get_option('webkit_test_runner'):
+            fallback_names.insert(0, self._wk2_port_name())
+            # Note we do not add 'wk2' here, even though it's included in _skipped_search_paths().
+        return map(self._webkit_baseline_path, fallback_names)
+
+    def setup_environ_for_server(self, server_name=None):
+        env = super(MacPort, self).setup_environ_for_server(server_name)
+        if server_name == self.driver_name():
+            if self.get_option('leaks'):
+                env['MallocStackLogging'] = '1'
+            if self.get_option('guard_malloc'):
+                env['DYLD_INSERT_LIBRARIES'] = '/usr/lib/libgmalloc.dylib'
+        env['XML_CATALOG_FILES'] = ''  # work around missing /etc/catalog <rdar://problem/4292995>
+        return env
+
+    def operating_system(self):
+        return 'mac'
+
+    # Belongs on a Platform object.
+    def is_snowleopard(self):
+        return self._version == "snowleopard"
+
+    # Belongs on a Platform object.
+    def is_lion(self):
+        return self._version == "lion"
+
+    def default_child_processes(self):
+        # FIXME: The Printer isn't initialized when this is called, so using _log would just show an unitialized logger error.
+
+        if self._version == "snowleopard":
+            print >> sys.stderr, "Cannot run tests in parallel on Snow Leopard due to rdar://problem/10621525."
+            return 1
+
+        default_count = super(MacPort, self).default_child_processes()
+
+        # FIXME: https://bugs.webkit.org/show_bug.cgi?id=95906  With too many WebProcess WK2 tests get stuck in resource contention.
+        # To alleviate the issue reduce the number of running processes
+        # Anecdotal evidence suggests that a 4 core/8 core logical machine may run into this, but that a 2 core/4 core logical machine does not.
+        if self.get_option('webkit_test_runner') and default_count > 4:
+            default_count = int(.75 * default_count)
+
+        # Make sure we have enough ram to support that many instances:
+        total_memory = self.host.platform.total_bytes_memory()
+        bytes_per_drt = 256 * 1024 * 1024  # Assume each DRT needs 256MB to run.
+        overhead = 2048 * 1024 * 1024  # Assume we need 2GB free for the O/S
+        supportable_instances = max((total_memory - overhead) / bytes_per_drt, 1)  # Always use one process, even if we don't have space for it.
+        if supportable_instances < default_count:
+            print >> sys.stderr, "This machine could support %s child processes, but only has enough memory for %s." % (default_count, supportable_instances)
+        return min(supportable_instances, default_count)
+
+    def _build_java_test_support(self):
+        java_tests_path = self._filesystem.join(self.layout_tests_dir(), "java")
+        build_java = ["/usr/bin/make", "-C", java_tests_path]
+        if self._executive.run_command(build_java, return_exit_code=True):  # Paths are absolute, so we don't need to set a cwd.
+            _log.error("Failed to build Java support files: %s" % build_java)
+            return False
+        return True
+
+    def check_for_leaks(self, process_name, process_pid):
+        if not self.get_option('leaks'):
+            return
+        # We could use http://code.google.com/p/psutil/ to get the process_name from the pid.
+        self._leak_detector.check_for_leaks(process_name, process_pid)
+
+    def print_leaks_summary(self):
+        if not self.get_option('leaks'):
+            return
+        # We're in the manager process, so the leak detector will not have a valid list of leak files.
+        # FIXME: This is a hack, but we don't have a better way to get this information from the workers yet.
+        # FIXME: This will include too many leaks in subsequent runs until the results directory is cleared!
+        leaks_files = self._leak_detector.leaks_files_in_directory(self.results_directory())
+        if not leaks_files:
+            return
+        total_bytes_string, unique_leaks = self._leak_detector.count_total_bytes_and_unique_leaks(leaks_files)
+        total_leaks = self._leak_detector.count_total_leaks(leaks_files)
+        _log.info("%s total leaks found for a total of %s!" % (total_leaks, total_bytes_string))
+        _log.info("%s unique leaks found!" % unique_leaks)
+
+    def _check_port_build(self):
+        return self._build_java_test_support()
+
+    def _path_to_webcore_library(self):
+        return self._build_path('WebCore.framework/Versions/A/WebCore')
+
+    def show_results_html_file(self, results_filename):
+        # We don't use self._run_script() because we don't want to wait for the script
+        # to exit and we want the output to show up on stdout in case there are errors
+        # launching the browser.
+        self._executive.popen([self._config.script_path('run-safari')] + self._arguments_for_configuration() + ['--no-saved-state', '-NSOpen', results_filename],
+            cwd=self._config.webkit_base_dir(), stdout=file(os.devnull), stderr=file(os.devnull))
+
+    # FIXME: The next two routines turn off the http locking in order
+    # to work around failures on the bots caused when the slave restarts.
+    # See https://bugs.webkit.org/show_bug.cgi?id=64886 for more info.
+    # The proper fix is to make sure the slave is actually stopping NRWT
+    # properly on restart. Note that by removing the lock file and not waiting,
+    # the result should be that if there is a web server already running,
+    # it'll be killed and this one will be started in its place; this
+    # may lead to weird things happening in the other run. However, I don't
+    # think we're (intentionally) actually running multiple runs concurrently
+    # on any Mac bots.
+
+    def acquire_http_lock(self):
+        pass
+
+    def release_http_lock(self):
+        pass
+
+    def _get_crash_log(self, name, pid, stdout, stderr, newer_than, time_fn=None, sleep_fn=None, wait_for_log=True):
+        # Note that we do slow-spin here and wait, since it appears the time
+        # ReportCrash takes to actually write and flush the file varies when there are
+        # lots of simultaneous crashes going on.
+        # FIXME: Should most of this be moved into CrashLogs()?
+        time_fn = time_fn or time.time
+        sleep_fn = sleep_fn or time.sleep
+        crash_log = ''
+        crash_logs = CrashLogs(self.host)
+        now = time_fn()
+        # FIXME: delete this after we're sure this code is working ...
+        _log.debug('looking for crash log for %s:%s' % (name, str(pid)))
+        deadline = now + 5 * int(self.get_option('child_processes', 1))
+        while not crash_log and now <= deadline:
+            crash_log = crash_logs.find_newest_log(name, pid, include_errors=True, newer_than=newer_than)
+            if not wait_for_log:
+                break
+            if not crash_log or not [line for line in crash_log.splitlines() if not line.startswith('ERROR')]:
+                sleep_fn(0.1)
+                now = time_fn()
+
+        if not crash_log:
+            return (stderr, None)
+        return (stderr, crash_log)
+
+    def look_for_new_crash_logs(self, crashed_processes, start_time):
+        """Since crash logs can take a long time to be written out if the system is
+           under stress do a second pass at the end of the test run.
+
+           crashes: test_name -> pid, process_name tuple of crashed process
+           start_time: time the tests started at.  We're looking for crash
+               logs after that time.
+        """
+        crash_logs = {}
+        for (test_name, process_name, pid) in crashed_processes:
+            # Passing None for output.  This is a second pass after the test finished so
+            # if the output had any loggine we would have already collected it.
+            crash_log = self._get_crash_log(process_name, pid, None, None, start_time, wait_for_log=False)[1]
+            if not crash_log:
+                continue
+            crash_logs[test_name] = crash_log
+        return crash_logs
+
+    def sample_process(self, name, pid):
+        try:
+            hang_report = self._filesystem.join(self.results_directory(), "%s-%s.sample.txt" % (name, pid))
+            self._executive.run_command([
+                "/usr/bin/sample",
+                pid,
+                10,
+                10,
+                "-file",
+                hang_report,
+            ])
+        except ScriptError, e:
+            _log.warning('Unable to sample process.')
+
+    def _path_to_helper(self):
+        binary_name = 'LayoutTestHelper'
+        return self._build_path(binary_name)
+
+    def start_helper(self):
+        helper_path = self._path_to_helper()
+        if helper_path:
+            _log.debug("Starting layout helper %s" % helper_path)
+            # Note: Not thread safe: http://bugs.python.org/issue2320
+            self._helper = self._executive.popen([helper_path],
+                stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=None)
+            is_ready = self._helper.stdout.readline()
+            if not is_ready.startswith('ready'):
+                _log.error("LayoutTestHelper failed to be ready")
+
+    def stop_helper(self):
+        if self._helper:
+            _log.debug("Stopping LayoutTestHelper")
+            try:
+                self._helper.stdin.write("x\n")
+                self._helper.stdin.close()
+                self._helper.wait()
+            except IOError, e:
+                _log.debug("IOError raised while stopping helper: %s" % str(e))
+                pass
+            self._helper = None
+
+    def nm_command(self):
+        try:
+            return self._executive.run_command(['xcrun', '-find', 'nm']).rstrip()
+        except ScriptError, e:
+            _log.warn("xcrun failed; falling back to 'nm'.")
+            return 'nm'
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/mac_unittest.py b/Tools/Scripts/webkitpy/layout_tests/port/mac_unittest.py
new file mode 100644
index 0000000..c2b26b2
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/port/mac_unittest.py
@@ -0,0 +1,257 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#    * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from webkitpy.layout_tests.port.mac import MacPort
+from webkitpy.layout_tests.port import port_testcase
+from webkitpy.common.system.filesystem_mock import MockFileSystem
+from webkitpy.common.system.outputcapture import OutputCapture
+from webkitpy.tool.mocktool import MockOptions
+from webkitpy.common.system.executive_mock import MockExecutive, MockExecutive2, MockProcess, ScriptError
+from webkitpy.common.system.systemhost_mock import MockSystemHost
+
+
+class MacTest(port_testcase.PortTestCase):
+    os_name = 'mac'
+    os_version = 'lion'
+    port_name = 'mac-lion'
+    port_maker = MacPort
+
+    def assert_skipped_file_search_paths(self, port_name, expected_paths, use_webkit2=False):
+        port = self.make_port(port_name=port_name, options=MockOptions(webkit_test_runner=use_webkit2))
+        self.assertEqual(port._skipped_file_search_paths(), expected_paths)
+
+    def test_default_timeout_ms(self):
+        super(MacTest, self).test_default_timeout_ms()
+        self.assertEquals(self.make_port(options=MockOptions(guard_malloc=True)).default_timeout_ms(), 350000)
+
+
+    example_skipped_file = u"""
+# <rdar://problem/5647952> fast/events/mouseout-on-window.html needs mac DRT to issue mouse out events
+fast/events/mouseout-on-window.html
+
+# <rdar://problem/5643675> window.scrollTo scrolls a window with no scrollbars
+fast/events/attempt-scroll-with-no-scrollbars.html
+
+# see bug <rdar://problem/5646437> REGRESSION (r28015): svg/batik/text/smallFonts fails
+svg/batik/text/smallFonts.svg
+
+# Java tests don't work on WK2
+java/
+"""
+    example_skipped_tests = [
+        "fast/events/mouseout-on-window.html",
+        "fast/events/attempt-scroll-with-no-scrollbars.html",
+        "svg/batik/text/smallFonts.svg",
+        "java",
+    ]
+
+    def test_tests_from_skipped_file_contents(self):
+        port = self.make_port()
+        self.assertEqual(port._tests_from_skipped_file_contents(self.example_skipped_file), self.example_skipped_tests)
+
+    def assert_name(self, port_name, os_version_string, expected):
+        host = MockSystemHost(os_name='mac', os_version=os_version_string)
+        port = self.make_port(host=host, port_name=port_name)
+        self.assertEquals(expected, port.name())
+
+    def test_tests_for_other_platforms(self):
+        platforms = ['mac', 'chromium-linux', 'mac-snowleopard']
+        port = self.make_port(port_name='mac-snowleopard')
+        platform_dir_paths = map(port._webkit_baseline_path, platforms)
+        # Replace our empty mock file system with one which has our expected platform directories.
+        port._filesystem = MockFileSystem(dirs=platform_dir_paths)
+
+        dirs_to_skip = port._tests_for_other_platforms()
+        self.assertTrue('platform/chromium-linux' in dirs_to_skip)
+        self.assertFalse('platform/mac' in dirs_to_skip)
+        self.assertFalse('platform/mac-snowleopard' in dirs_to_skip)
+
+    def test_version(self):
+        port = self.make_port()
+        self.assertTrue(port.version())
+
+    def test_versions(self):
+        # Note: these tests don't need to be exhaustive as long as we get path coverage.
+        self.assert_name('mac', 'snowleopard', 'mac-snowleopard')
+        self.assert_name('mac-snowleopard', 'leopard', 'mac-snowleopard')
+        self.assert_name('mac-snowleopard', 'lion', 'mac-snowleopard')
+
+        self.assert_name('mac', 'lion', 'mac-lion')
+        self.assert_name('mac-lion', 'lion', 'mac-lion')
+
+        self.assert_name('mac', 'mountainlion', 'mac-mountainlion')
+        self.assert_name('mac-mountainlion', 'lion', 'mac-mountainlion')
+
+        self.assert_name('mac', 'future', 'mac-future')
+        self.assert_name('mac-future', 'future', 'mac-future')
+
+        self.assertRaises(AssertionError, self.assert_name, 'mac-tiger', 'leopard', 'mac-leopard')
+
+    def test_setup_environ_for_server(self):
+        port = self.make_port(options=MockOptions(leaks=True, guard_malloc=True))
+        env = port.setup_environ_for_server(port.driver_name())
+        self.assertEquals(env['MallocStackLogging'], '1')
+        self.assertEquals(env['DYLD_INSERT_LIBRARIES'], '/usr/lib/libgmalloc.dylib')
+
+    def _assert_search_path(self, port_name, baseline_path, search_paths, use_webkit2=False):
+        port = self.make_port(port_name=port_name, options=MockOptions(webkit_test_runner=use_webkit2))
+        absolute_search_paths = map(port._webkit_baseline_path, search_paths)
+        self.assertEquals(port.baseline_path(), port._webkit_baseline_path(baseline_path))
+        self.assertEquals(port.baseline_search_path(), absolute_search_paths)
+
+    def test_baseline_search_path(self):
+        # Note that we don't need total coverage here, just path coverage, since this is all data driven.
+        self._assert_search_path('mac-snowleopard', 'mac-snowleopard', ['mac-snowleopard', 'mac-lion', 'mac'])
+        self._assert_search_path('mac-lion', 'mac-lion', ['mac-lion', 'mac'])
+        self._assert_search_path('mac-mountainlion', 'mac', ['mac'])
+        self._assert_search_path('mac-future', 'mac', ['mac'])
+        self._assert_search_path('mac-snowleopard', 'mac-wk2', ['mac-wk2', 'mac-snowleopard', 'mac-lion', 'mac'], use_webkit2=True)
+        self._assert_search_path('mac-lion', 'mac-wk2', ['mac-wk2', 'mac-lion', 'mac'], use_webkit2=True)
+        self._assert_search_path('mac-mountainlion', 'mac-wk2', ['mac-wk2', 'mac'], use_webkit2=True)
+        self._assert_search_path('mac-future', 'mac-wk2', ['mac-wk2', 'mac'], use_webkit2=True)
+
+    def test_show_results_html_file(self):
+        port = self.make_port()
+        # Delay setting a should_log executive to avoid logging from MacPort.__init__.
+        port._executive = MockExecutive(should_log=True)
+        expected_stderr = "MOCK popen: ['Tools/Scripts/run-safari', '--release', '--no-saved-state', '-NSOpen', 'test.html'], cwd=/mock-checkout\n"
+        OutputCapture().assert_outputs(self, port.show_results_html_file, ["test.html"], expected_stderr=expected_stderr)
+
+    def test_operating_system(self):
+        self.assertEqual('mac', self.make_port().operating_system())
+
+    def test_default_child_processes(self):
+        port = self.make_port(port_name='mac-lion')
+        # MockPlatformInfo only has 2 mock cores.  The important part is that 2 > 1.
+        self.assertEqual(port.default_child_processes(), 2)
+
+        bytes_for_drt = 200 * 1024 * 1024
+        port.host.platform.total_bytes_memory = lambda: bytes_for_drt
+        expected_stderr = "This machine could support 2 child processes, but only has enough memory for 1.\n"
+        child_processes = OutputCapture().assert_outputs(self, port.default_child_processes, (), expected_stderr=expected_stderr)
+        self.assertEqual(child_processes, 1)
+
+        # Make sure that we always use one process, even if we don't have the memory for it.
+        port.host.platform.total_bytes_memory = lambda: bytes_for_drt - 1
+        expected_stderr = "This machine could support 2 child processes, but only has enough memory for 1.\n"
+        child_processes = OutputCapture().assert_outputs(self, port.default_child_processes, (), expected_stderr=expected_stderr)
+        self.assertEqual(child_processes, 1)
+
+        # SnowLeopard has a CFNetwork bug which causes crashes if we execute more than one copy of DRT at once.
+        port = self.make_port(port_name='mac-snowleopard')
+        expected_stderr = "Cannot run tests in parallel on Snow Leopard due to rdar://problem/10621525.\n"
+        child_processes = OutputCapture().assert_outputs(self, port.default_child_processes, (), expected_stderr=expected_stderr)
+        self.assertEqual(child_processes, 1)
+
+    def test_get_crash_log(self):
+        # Mac crash logs are tested elsewhere, so here we just make sure we don't crash.
+        def fake_time_cb():
+            times = [0, 20, 40]
+            return lambda: times.pop(0)
+        port = self.make_port(port_name='mac-snowleopard')
+        port._get_crash_log('DumpRenderTree', 1234, '', '', 0,
+            time_fn=fake_time_cb(), sleep_fn=lambda delay: None)
+
+    def test_helper_starts(self):
+        host = MockSystemHost(MockExecutive())
+        port = self.make_port(host)
+        oc = OutputCapture()
+        oc.capture_output()
+        host.executive._proc = MockProcess('ready\n')
+        port.start_helper()
+        port.stop_helper()
+        oc.restore_output()
+
+        # make sure trying to stop the helper twice is safe.
+        port.stop_helper()
+
+    def test_helper_fails_to_start(self):
+        host = MockSystemHost(MockExecutive())
+        port = self.make_port(host)
+        oc = OutputCapture()
+        oc.capture_output()
+        port.start_helper()
+        port.stop_helper()
+        oc.restore_output()
+
+    def test_helper_fails_to_stop(self):
+        host = MockSystemHost(MockExecutive())
+        host.executive._proc = MockProcess()
+
+        def bad_waiter():
+            raise IOError('failed to wait')
+        host.executive._proc.wait = bad_waiter
+
+        port = self.make_port(host)
+        oc = OutputCapture()
+        oc.capture_output()
+        port.start_helper()
+        port.stop_helper()
+        oc.restore_output()
+
+    def test_sample_process(self):
+
+        def logging_run_command(args):
+            print args
+
+        port = self.make_port()
+        port._executive = MockExecutive2(run_command_fn=logging_run_command)
+        expected_stdout = "['/usr/bin/sample', 42, 10, 10, '-file', '/mock-build/layout-test-results/test-42.sample.txt']\n"
+        OutputCapture().assert_outputs(self, port.sample_process, args=['test', 42], expected_stdout=expected_stdout)
+
+    def test_sample_process_throws_exception(self):
+
+        def throwing_run_command(args):
+            raise ScriptError("MOCK script error")
+
+        port = self.make_port()
+        port._executive = MockExecutive2(run_command_fn=throwing_run_command)
+        OutputCapture().assert_outputs(self, port.sample_process, args=['test', 42])
+
+    def test_32bit(self):
+        port = self.make_port(options=MockOptions(architecture='x86'))
+
+        def run_script(script, args=None, env=None):
+            self.args = args
+
+        port._run_script = run_script
+        self.assertEquals(port.architecture(), 'x86')
+        port._build_driver()
+        self.assertEquals(self.args, ['ARCHS=i386'])
+
+    def test_64bit(self):
+        # Apple Mac port is 64-bit by default
+        port = self.make_port()
+        self.assertEquals(port.architecture(), 'x86_64')
+
+        def run_script(script, args=None, env=None):
+            self.args = args
+
+        port._run_script = run_script
+        port._build_driver()
+        self.assertEquals(self.args, [])
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/mock_drt.py b/Tools/Scripts/webkitpy/layout_tests/port/mock_drt.py
new file mode 100644
index 0000000..a2106fd
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/port/mock_drt.py
@@ -0,0 +1,297 @@
+#!/usr/bin/env python
+# Copyright (c) 2012 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the Google name nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""
+This is an implementation of the Port interface that overrides other
+ports and changes the Driver binary to "MockDRT".
+
+The MockDRT objects emulate what a real DRT would do. In particular, they
+return the output a real DRT would return for a given test, assuming that
+test actually passes (except for reftests, which currently cause the
+MockDRT to crash).
+"""
+
+import base64
+import logging
+import optparse
+import os
+import sys
+
+# Since we execute this script directly as part of the unit tests, we need to ensure
+# that Tools/Scripts is in sys.path for the next imports to work correctly.
+script_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
+if script_dir not in sys.path:
+    sys.path.append(script_dir)
+
+from webkitpy.common.system.systemhost import SystemHost
+from webkitpy.layout_tests.port.driver import DriverInput, DriverOutput, DriverProxy
+from webkitpy.layout_tests.port.factory import PortFactory
+
+_log = logging.getLogger(__name__)
+
+
+class MockDRTPort(object):
+    port_name = 'mock'
+
+    @classmethod
+    def determine_full_port_name(cls, host, options, port_name):
+        return port_name
+
+    def __init__(self, host, port_name, **kwargs):
+        self.__delegate = PortFactory(host).get(port_name.replace('mock-', ''), **kwargs)
+
+    def __getattr__(self, name):
+        return getattr(self.__delegate, name)
+
+    def check_build(self, needs_http):
+        return True
+
+    def check_sys_deps(self, needs_http):
+        return True
+
+    def create_driver(self, worker_number, no_timeout=False):
+        # The magic of the MockDRTPort is that we create a driver that has a
+        # cmd_line() method monkey-patched to invoke this script instead of DRT.
+        return DriverProxy(self, worker_number, self._mocked_driver_maker, pixel_tests=self.get_option('pixel_tests'), no_timeout=no_timeout)
+
+    @staticmethod
+    def _mocked_driver_maker(port, worker_number, pixel_tests, no_timeout=False):
+        path_to_this_file = port.host.filesystem.abspath(__file__.replace('.pyc', '.py'))
+        driver = port.__delegate._driver_class()(port, worker_number, pixel_tests, no_timeout)
+        driver.cmd_line = port._overriding_cmd_line(driver.cmd_line,
+                                                    port.__delegate._path_to_driver(),
+                                                    sys.executable,
+                                                    path_to_this_file,
+                                                    port.__delegate.name())
+        return driver
+
+    @staticmethod
+    def _overriding_cmd_line(original_cmd_line, driver_path, python_exe, this_file, port_name):
+        def new_cmd_line(pixel_tests, per_test_args):
+            cmd_line = original_cmd_line(pixel_tests, per_test_args)
+            index = cmd_line.index(driver_path)
+            cmd_line[index:index + 1] = [python_exe, this_file, '--platform', port_name]
+            return cmd_line
+
+        return new_cmd_line
+
+    def start_helper(self):
+        pass
+
+    def start_http_server(self, number_of_servers):
+        pass
+
+    def start_websocket_server(self):
+        pass
+
+    def acquire_http_lock(self):
+        pass
+
+    def stop_helper(self):
+        pass
+
+    def stop_http_server(self):
+        pass
+
+    def stop_websocket_server(self):
+        pass
+
+    def release_http_lock(self):
+        pass
+
+
+def main(argv, host, stdin, stdout, stderr):
+    """Run the tests."""
+
+    options, args = parse_options(argv)
+    if options.test_shell:
+        drt = MockTestShell(options, args, host, stdin, stdout, stderr)
+    else:
+        drt = MockDRT(options, args, host, stdin, stdout, stderr)
+    return drt.run()
+
+
+def parse_options(argv):
+    # FIXME: We have to do custom arg parsing instead of using the optparse
+    # module.  First, Chromium and non-Chromium DRTs have a different argument
+    # syntax.  Chromium uses --pixel-tests=<path>, and non-Chromium uses
+    # --pixel-tests as a boolean flag. Second, we don't want to have to list
+    # every command line flag DRT accepts, but optparse complains about
+    # unrecognized flags. At some point it might be good to share a common
+    # DRT options class between this file and webkit.py and chromium.py
+    # just to get better type checking.
+    platform_index = argv.index('--platform')
+    platform = argv[platform_index + 1]
+
+    pixel_tests = False
+    pixel_path = None
+    test_shell = '--test-shell' in argv
+    if test_shell:
+        for arg in argv:
+            if arg.startswith('--pixel-tests'):
+                pixel_tests = True
+                pixel_path = arg[len('--pixel-tests='):]
+    else:
+        pixel_tests = '--pixel-tests' in argv
+    options = optparse.Values({'test_shell': test_shell, 'platform': platform, 'pixel_tests': pixel_tests, 'pixel_path': pixel_path})
+    return (options, argv)
+
+
+class MockDRT(object):
+    def __init__(self, options, args, host, stdin, stdout, stderr):
+        self._options = options
+        self._args = args
+        self._host = host
+        self._stdout = stdout
+        self._stdin = stdin
+        self._stderr = stderr
+
+        port_name = None
+        if options.platform:
+            port_name = options.platform
+        self._port = PortFactory(host).get(port_name=port_name, options=options)
+        self._driver = self._port.create_driver(0)
+
+    def run(self):
+        while True:
+            line = self._stdin.readline()
+            if not line:
+                return 0
+            driver_input = self.input_from_line(line)
+            dirname, basename = self._port.split_test(driver_input.test_name)
+            is_reftest = (self._port.reference_files(driver_input.test_name) or
+                          self._port.is_reference_html_file(self._port._filesystem, dirname, basename))
+            output = self.output_for_test(driver_input, is_reftest)
+            self.write_test_output(driver_input, output, is_reftest)
+
+    def input_from_line(self, line):
+        vals = line.strip().split("'")
+        if len(vals) == 1:
+            uri = vals[0]
+            checksum = None
+        else:
+            uri = vals[0]
+            checksum = vals[1]
+        if uri.startswith('http://') or uri.startswith('https://'):
+            test_name = self._driver.uri_to_test(uri)
+        else:
+            test_name = self._port.relative_test_filename(uri)
+
+        return DriverInput(test_name, 0, checksum, self._options.pixel_tests)
+
+    def output_for_test(self, test_input, is_reftest):
+        port = self._port
+        actual_text = port.expected_text(test_input.test_name)
+        actual_audio = port.expected_audio(test_input.test_name)
+        actual_image = None
+        actual_checksum = None
+        if is_reftest:
+            # Make up some output for reftests.
+            actual_text = 'reference text\n'
+            actual_checksum = 'mock-checksum'
+            actual_image = 'blank'
+            if test_input.test_name.endswith('-mismatch.html'):
+                actual_text = 'not reference text\n'
+                actual_checksum = 'not-mock-checksum'
+                actual_image = 'not blank'
+        elif self._options.pixel_tests and test_input.image_hash:
+            actual_checksum = port.expected_checksum(test_input.test_name)
+            actual_image = port.expected_image(test_input.test_name)
+
+        return DriverOutput(actual_text, actual_image, actual_checksum, actual_audio)
+
+    def write_test_output(self, test_input, output, is_reftest):
+        if output.audio:
+            self._stdout.write('Content-Type: audio/wav\n')
+            self._stdout.write('Content-Transfer-Encoding: base64\n')
+            self._stdout.write(base64.b64encode(output.audio))
+        else:
+            self._stdout.write('Content-Type: text/plain\n')
+            # FIXME: Note that we don't ensure there is a trailing newline!
+            # This mirrors actual (Mac) DRT behavior but is a bug.
+            if output.text:
+                self._stdout.write(output.text)
+
+        self._stdout.write('#EOF\n')
+
+        if self._options.pixel_tests and output.image_hash:
+            self._stdout.write('\n')
+            self._stdout.write('ActualHash: %s\n' % output.image_hash)
+            self._stdout.write('ExpectedHash: %s\n' % test_input.image_hash)
+            if output.image_hash != test_input.image_hash:
+                self._stdout.write('Content-Type: image/png\n')
+                self._stdout.write('Content-Length: %s\n' % len(output.image))
+                self._stdout.write(output.image)
+        self._stdout.write('#EOF\n')
+        self._stdout.flush()
+        self._stderr.write('#EOF\n')
+        self._stderr.flush()
+
+
+class MockTestShell(MockDRT):
+    def input_from_line(self, line):
+        vals = line.strip().split()
+        if len(vals) == 3:
+            uri, timeout, checksum = vals
+        else:
+            uri, timeout = vals
+            checksum = None
+
+        test_name = self._driver.uri_to_test(uri)
+        return DriverInput(test_name, timeout, checksum, self._options.pixel_tests)
+
+    def output_for_test(self, test_input, is_reftest):
+        # FIXME: This is a hack to make virtual tests work. Need something more general.
+        original_test_name = test_input.test_name
+        if '--enable-accelerated-2d-canvas' in self._args and 'canvas' in test_input.test_name:
+            test_input.test_name = 'platform/chromium/virtual/gpu/' + test_input.test_name
+        output = super(MockTestShell, self).output_for_test(test_input, is_reftest)
+        test_input.test_name = original_test_name
+        return output
+
+    def write_test_output(self, test_input, output, is_reftest):
+        self._stdout.write("#URL:%s\n" % self._driver.test_to_uri(test_input.test_name))
+        if self._options.pixel_tests and output.image_hash:
+            self._stdout.write("#MD5:%s\n" % output.image_hash)
+            if output.image:
+                self._host.filesystem.maybe_make_directory(self._host.filesystem.dirname(self._options.pixel_path))
+                self._host.filesystem.write_binary_file(self._options.pixel_path, output.image)
+        if output.text:
+            self._stdout.write(output.text)
+
+        if output.text and not output.text.endswith('\n'):
+            self._stdout.write('\n')
+        self._stdout.write('#EOF\n')
+        self._stdout.flush()
+
+
+if __name__ == '__main__':
+    # Note that the Mock in MockDRT refers to the fact that it is emulating a
+    # real DRT, and as such, it needs access to a real SystemHost, not a MockSystemHost.
+    sys.exit(main(sys.argv[1:], SystemHost(), sys.stdin, sys.stdout, sys.stderr))
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/mock_drt_unittest.py b/Tools/Scripts/webkitpy/layout_tests/port/mock_drt_unittest.py
new file mode 100755
index 0000000..1ac051a
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/port/mock_drt_unittest.py
@@ -0,0 +1,268 @@
+#!/usr/bin/env python
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Unit tests for MockDRT."""
+
+import sys
+import unittest
+
+from webkitpy.common import newstringio
+from webkitpy.common.system.systemhost_mock import MockSystemHost
+from webkitpy.layout_tests.port import mock_drt
+from webkitpy.layout_tests.port import port_testcase
+from webkitpy.layout_tests.port import test
+from webkitpy.layout_tests.port.factory import PortFactory
+from webkitpy.tool import mocktool
+
+
+mock_options = mocktool.MockOptions(configuration='Release')
+
+
+class MockDRTPortTest(port_testcase.PortTestCase):
+
+    def make_port(self, options=mock_options):
+        host = MockSystemHost()
+        test.add_unit_tests_to_mock_filesystem(host.filesystem)
+        return mock_drt.MockDRTPort(host, port_name='mock-mac-lion', options=options)
+
+    def test_port_name_in_constructor(self):
+        self.assertTrue(mock_drt.MockDRTPort(MockSystemHost(), port_name='mock-test'))
+
+    def test_check_sys_deps(self):
+        pass
+
+    def test_diff_image(self):
+        pass
+
+    def test_diff_image_crashed(self):
+        pass
+
+    def test_uses_apache(self):
+        pass
+
+    def integration_test_http_lock(self):
+        pass
+
+    def integration_test_start_helper(self):
+        pass
+
+    def integration_test_http_server__normal(self):
+        pass
+
+    def integration_test_http_server__fails(self):
+        pass
+
+    def integration_test_websocket_server__normal(self):
+        pass
+
+    def integration_test_websocket_server__fails(self):
+        pass
+
+    def integration_test_helper(self):
+        pass
+
+    def test_get_crash_log(self):
+        pass
+
+    def test_check_build(self):
+        pass
+
+
+class MockDRTTest(unittest.TestCase):
+    def input_line(self, port, test_name, checksum=None):
+        url = port.create_driver(0).test_to_uri(test_name)
+        if url.startswith('file://'):
+            url = url[len('file://'):]
+
+        if checksum:
+            return url + "'" + checksum + '\n'
+        return url + '\n'
+
+    def extra_args(self, pixel_tests):
+        if pixel_tests:
+            return ['--pixel-tests', '-']
+        return ['-']
+
+    def make_drt(self, options, args, host, stdin, stdout, stderr):
+        return mock_drt.MockDRT(options, args, host, stdin, stdout, stderr)
+
+    def make_input_output(self, port, test_name, pixel_tests,
+                          expected_checksum, drt_output, drt_input=None, expected_text=None):
+        if pixel_tests:
+            if not expected_checksum:
+                expected_checksum = port.expected_checksum(test_name)
+        if not drt_input:
+            drt_input = self.input_line(port, test_name, expected_checksum)
+        text_output = expected_text or port.expected_text(test_name) or ''
+
+        if not drt_output:
+            drt_output = self.expected_output(port, test_name, pixel_tests,
+                                              text_output, expected_checksum)
+        return (drt_input, drt_output)
+
+    def expected_output(self, port, test_name, pixel_tests, text_output, expected_checksum):
+        output = ['Content-Type: text/plain\n']
+        if text_output:
+            output.append(text_output)
+        output.append('#EOF\n')
+        if pixel_tests and expected_checksum:
+            output.extend(['\n',
+                           'ActualHash: %s\n' % expected_checksum,
+                           'ExpectedHash: %s\n' % expected_checksum])
+        output.append('#EOF\n')
+        return output
+
+    def assertTest(self, test_name, pixel_tests, expected_checksum=None, drt_output=None, host=None, expected_text=None):
+        port_name = 'test'
+        host = host or MockSystemHost()
+        test.add_unit_tests_to_mock_filesystem(host.filesystem)
+        port = PortFactory(host).get(port_name)
+        drt_input, drt_output = self.make_input_output(port, test_name,
+            pixel_tests, expected_checksum, drt_output, drt_input=None, expected_text=expected_text)
+
+        args = ['--platform', port_name] + self.extra_args(pixel_tests)
+        stdin = newstringio.StringIO(drt_input)
+        stdout = newstringio.StringIO()
+        stderr = newstringio.StringIO()
+        options, args = mock_drt.parse_options(args)
+
+        drt = self.make_drt(options, args, host, stdin, stdout, stderr)
+        res = drt.run()
+
+        self.assertEqual(res, 0)
+
+        # We use the StringIO.buflist here instead of getvalue() because
+        # the StringIO might be a mix of unicode/ascii and 8-bit strings.
+        self.assertEqual(stdout.buflist, drt_output)
+        self.assertEqual(stderr.getvalue(), '' if options.test_shell else '#EOF\n')
+
+    def test_main(self):
+        host = MockSystemHost()
+        test.add_unit_tests_to_mock_filesystem(host.filesystem)
+        stdin = newstringio.StringIO()
+        stdout = newstringio.StringIO()
+        stderr = newstringio.StringIO()
+        res = mock_drt.main(['--platform', 'test'] + self.extra_args(False),
+                            host, stdin, stdout, stderr)
+        self.assertEqual(res, 0)
+        self.assertEqual(stdout.getvalue(), '')
+        self.assertEqual(stderr.getvalue(), '')
+        self.assertEqual(host.filesystem.written_files, {})
+
+    def test_pixeltest_passes(self):
+        # This also tests that we handle HTTP: test URLs properly.
+        self.assertTest('http/tests/passes/text.html', True)
+
+    def test_pixeltest__fails(self):
+        self.assertTest('failures/expected/image_checksum.html', pixel_tests=True,
+            expected_checksum='image_checksum-checksum',
+            drt_output=['Content-Type: text/plain\n',
+                        'image_checksum-txt',
+                        '#EOF\n',
+                        '\n',
+                        'ActualHash: image_checksum-checksum\n',
+                        'ExpectedHash: image_checksum-checksum\n',
+                        '#EOF\n'])
+
+    def test_textonly(self):
+        self.assertTest('passes/image.html', False)
+
+    def test_checksum_in_png(self):
+        self.assertTest('passes/checksum_in_image.html', True)
+
+    def test_missing_image(self):
+        self.assertTest('failures/expected/missing_image.html', True)
+
+    def test_missing_text(self):
+        self.assertTest('failures/expected/missing_text.html', True)
+
+    def test_reftest_match(self):
+        self.assertTest('passes/reftest.html', False, expected_checksum='mock-checksum', expected_text='reference text\n')
+        self.assertTest('passes/reftest.html', True, expected_checksum='mock-checksum', expected_text='reference text\n')
+
+    def test_reftest_mismatch(self):
+        self.assertTest('passes/mismatch.html', False, expected_checksum='mock-checksum', expected_text='reference text\n')
+        self.assertTest('passes/mismatch.html', True, expected_checksum='mock-checksum', expected_text='reference text\n')
+
+
+class MockTestShellTest(MockDRTTest):
+    def extra_args(self, pixel_tests):
+        if pixel_tests:
+            return ['--pixel-tests=/tmp/png_result0.png']
+        return []
+
+    def make_drt(self, options, args, host, stdin, stdout, stderr):
+        options.test_shell = True
+
+        # We have to set these by hand because --platform test won't trigger
+        # the Chromium code paths.
+        options.pixel_path = '/tmp/png_result0.png'
+        options.pixel_tests = True
+
+        return mock_drt.MockTestShell(options, args, host, stdin, stdout, stderr)
+
+    def input_line(self, port, test_name, checksum=None):
+        url = port.create_driver(0).test_to_uri(test_name)
+        if checksum:
+            return url + ' 6000 ' + checksum + '\n'
+        return url + ' 6000\n'
+
+    def expected_output(self, port, test_name, pixel_tests, text_output, expected_checksum):
+        url = port.create_driver(0).test_to_uri(test_name)
+        output = ['#URL:%s\n' % url]
+        if expected_checksum:
+            output.append('#MD5:%s\n' % expected_checksum)
+        if text_output:
+            output.append(text_output)
+            if not text_output.endswith('\n'):
+                output.append('\n')
+        output.append('#EOF\n')
+        return output
+
+    def test_pixeltest__fails(self):
+        host = MockSystemHost()
+        url = '#URL:file://'
+        url = url + '%s/failures/expected/image_checksum.html' % PortFactory(host).get('test').layout_tests_dir()
+        self.assertTest('failures/expected/image_checksum.html', pixel_tests=True,
+            expected_checksum='image_checksum',
+            drt_output=[url + '\n',
+                        '#MD5:image_checksum-checksum\n',
+                        'image_checksum-txt',
+                        '\n',
+                        '#EOF\n'],
+            host=host)
+        self.assertEquals(host.filesystem.written_files,
+            {'/tmp/png_result0.png': 'image_checksum\x8a-pngtEXtchecksum\x00image_checksum-checksum'})
+
+    def test_test_shell_parse_options(self):
+        options, args = mock_drt.parse_options(['--platform', 'chromium-mac', '--test-shell',
+            '--pixel-tests=/tmp/png_result0.png'])
+        self.assertTrue(options.test_shell)
+        self.assertTrue(options.pixel_tests)
+        self.assertEquals(options.pixel_path, '/tmp/png_result0.png')
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/port_testcase.py b/Tools/Scripts/webkitpy/layout_tests/port/port_testcase.py
new file mode 100755
index 0000000..b036f4b
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/port/port_testcase.py
@@ -0,0 +1,649 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#    * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Unit testing base class for Port implementations."""
+
+import errno
+import logging
+import os
+import socket
+import sys
+import time
+import unittest
+
+from webkitpy.common.system.executive_mock import MockExecutive
+from webkitpy.common.system.filesystem_mock import MockFileSystem
+from webkitpy.common.system.outputcapture import OutputCapture
+from webkitpy.common.system.systemhost_mock import MockSystemHost
+from webkitpy.layout_tests.port.base import Port
+from webkitpy.layout_tests.port.config_mock import MockConfig
+from webkitpy.layout_tests.port.server_process_mock import MockServerProcess
+from webkitpy.layout_tests.servers import http_server_base
+from webkitpy.tool.mocktool import MockOptions
+
+
+# FIXME: get rid of this fixture
+class TestWebKitPort(Port):
+    port_name = "testwebkitport"
+
+    def __init__(self, symbols_string=None,
+                 expectations_file=None, skips_file=None, host=None, config=None,
+                 **kwargs):
+        self.symbols_string = symbols_string  # Passing "" disables all staticly-detectable features.
+        host = host or MockSystemHost()
+        config = config or MockConfig()
+        super(TestWebKitPort, self).__init__(host=host, config=config, **kwargs)
+
+    def all_test_configurations(self):
+        return [self.test_configuration()]
+
+    def _symbols_string(self):
+        return self.symbols_string
+
+    def _tests_for_other_platforms(self):
+        return ["media", ]
+
+    def _tests_for_disabled_features(self):
+        return ["accessibility", ]
+
+
+class PortTestCase(unittest.TestCase):
+    """Tests that all Port implementations must pass."""
+    HTTP_PORTS = (8000, 8080, 8443)
+    WEBSOCKET_PORTS = (8880,)
+
+    # Subclasses override this to point to their Port subclass.
+    os_name = None
+    os_version = None
+    port_maker = TestWebKitPort
+
+    def make_port(self, host=None, port_name=None, options=None, os_name=None, os_version=None, config=None, **kwargs):
+        host = host or MockSystemHost(os_name=(os_name or self.os_name), os_version=(os_version or self.os_version))
+        options = options or MockOptions(configuration='Release')
+        config = config or MockConfig(filesystem=host.filesystem, default_configuration='Release')
+        port_name = port_name or self.port_name
+        port_name = self.port_maker.determine_full_port_name(host, options, port_name)
+        return self.port_maker(host, port_name, options=options, config=config, **kwargs)
+
+    def test_default_max_locked_shards(self):
+        port = self.make_port()
+        port.default_child_processes = lambda: 16
+        self.assertEquals(port.default_max_locked_shards(), 1)
+        port.default_child_processes = lambda: 2
+        self.assertEquals(port.default_max_locked_shards(), 1)
+
+    def test_default_timeout_ms(self):
+        self.assertEquals(self.make_port(options=MockOptions(configuration='Release')).default_timeout_ms(), 35000)
+        self.assertEquals(self.make_port(options=MockOptions(configuration='Debug')).default_timeout_ms(), 35000)
+
+    def test_default_pixel_tests(self):
+        self.assertEquals(self.make_port().default_pixel_tests(), False)
+
+    def test_driver_cmd_line(self):
+        port = self.make_port()
+        self.assertTrue(len(port.driver_cmd_line()))
+
+        options = MockOptions(additional_drt_flag=['--foo=bar', '--foo=baz'])
+        port = self.make_port(options=options)
+        cmd_line = port.driver_cmd_line()
+        self.assertTrue('--foo=bar' in cmd_line)
+        self.assertTrue('--foo=baz' in cmd_line)
+
+    def test_expectations_files(self):
+        self.assertNotEquals(self.make_port().expectations_files(), [])
+
+    def test_uses_apache(self):
+        self.assertTrue(self.make_port()._uses_apache())
+
+    def assert_servers_are_down(self, host, ports):
+        for port in ports:
+            try:
+                test_socket = socket.socket()
+                test_socket.connect((host, port))
+                self.fail()
+            except IOError, e:
+                self.assertTrue(e.errno in (errno.ECONNREFUSED, errno.ECONNRESET))
+            finally:
+                test_socket.close()
+
+    def assert_servers_are_up(self, host, ports):
+        for port in ports:
+            try:
+                test_socket = socket.socket()
+                test_socket.connect((host, port))
+            except IOError, e:
+                self.fail('failed to connect to %s:%d' % (host, port))
+            finally:
+                test_socket.close()
+
+    def integration_test_http_lock(self):
+        port = self.make_port()
+        # Only checking that no exception is raised.
+        port.acquire_http_lock()
+        port.release_http_lock()
+
+    def integration_test_check_sys_deps(self):
+        port = self.make_port()
+        # Only checking that no exception is raised.
+        port.check_sys_deps(True)
+
+    def integration_test_helper(self):
+        port = self.make_port()
+        # Only checking that no exception is raised.
+        port.start_helper()
+        port.stop_helper()
+
+    def integration_test_http_server__normal(self):
+        port = self.make_port()
+        self.assert_servers_are_down('localhost', self.HTTP_PORTS)
+        port.start_http_server()
+        self.assert_servers_are_up('localhost', self.HTTP_PORTS)
+        port.stop_http_server()
+        self.assert_servers_are_down('localhost', self.HTTP_PORTS)
+
+    def integration_test_http_server__fails(self):
+        port = self.make_port()
+        # Test that if a port isn't available, the call fails.
+        for port_number in self.HTTP_PORTS:
+            test_socket = socket.socket()
+            try:
+                try:
+                    test_socket.bind(('localhost', port_number))
+                except socket.error, e:
+                    if e.errno in (errno.EADDRINUSE, errno.EALREADY):
+                        self.fail('could not bind to port %d' % port_number)
+                    raise
+                try:
+                    port.start_http_server()
+                    self.fail('should not have been able to start the server while bound to %d' % port_number)
+                except http_server_base.ServerError, e:
+                    pass
+            finally:
+                port.stop_http_server()
+                test_socket.close()
+
+        # Test that calling start() twice fails.
+        try:
+            port.start_http_server()
+            self.assertRaises(AssertionError, port.start_http_server)
+        finally:
+            port.stop_http_server()
+
+    def integration_test_http_server__two_servers(self):
+        # Test that calling start() on two different ports causes the
+        # first port to be treated as stale and killed.
+        port = self.make_port()
+        # Test that if a port isn't available, the call fails.
+        port.start_http_server()
+        new_port = self.make_port()
+        try:
+            new_port.start_http_server()
+
+            # Check that the first server was killed.
+            self.assertFalse(port._executive.check_running_pid(port._http_server._pid))
+
+            # Check that there is something running.
+            self.assert_servers_are_up('localhost', self.HTTP_PORTS)
+
+            # Test that calling stop() on a killed server is harmless.
+            port.stop_http_server()
+        finally:
+            port.stop_http_server()
+            new_port.stop_http_server()
+
+            # Test that calling stop() twice is harmless.
+            new_port.stop_http_server()
+
+    def integration_test_image_diff(self):
+        port = self.make_port()
+        # FIXME: This test will never run since we are using a MockFilesystem for these tests!?!?
+        if not port.check_image_diff():
+            # The port hasn't been built - don't run the tests.
+            return
+
+        dir = port.layout_tests_dir()
+        file1 = port._filesystem.join(dir, 'fast', 'css', 'button_center.png')
+        contents1 = port._filesystem.read_binary_file(file1)
+        file2 = port._filesystem.join(dir, 'fast', 'css',
+                                      'remove-shorthand-expected.png')
+        contents2 = port._filesystem.read_binary_file(file2)
+        tmpfd, tmpfile = port._filesystem.open_binary_tempfile('')
+        tmpfd.close()
+
+        self.assertFalse(port.diff_image(contents1, contents1)[0])
+        self.assertTrue(port.diff_image(contents1, contents2)[0])
+
+        self.assertTrue(port.diff_image(contents1, contents2, tmpfile)[0])
+
+        port._filesystem.remove(tmpfile)
+
+    def test_diff_image__missing_both(self):
+        port = self.make_port()
+        self.assertFalse(port.diff_image(None, None)[0])
+        self.assertFalse(port.diff_image(None, '')[0])
+        self.assertFalse(port.diff_image('', None)[0])
+
+        self.assertFalse(port.diff_image('', '')[0])
+
+    def test_diff_image__missing_actual(self):
+        port = self.make_port()
+        self.assertTrue(port.diff_image(None, 'foo')[0])
+        self.assertTrue(port.diff_image('', 'foo')[0])
+
+    def test_diff_image__missing_expected(self):
+        port = self.make_port()
+        self.assertTrue(port.diff_image('foo', None)[0])
+        self.assertTrue(port.diff_image('foo', '')[0])
+
+    def test_diff_image(self):
+        port = self.make_port()
+        self.proc = None
+
+        def make_proc(port, nm, cmd, env):
+            self.proc = MockServerProcess(port, nm, cmd, env, lines=['diff: 100% failed\n', 'diff: 100% failed\n'])
+            return self.proc
+
+        port._server_process_constructor = make_proc
+        port.setup_test_run()
+        self.assertEquals(port.diff_image('foo', 'bar'), ('', 100.0, None))
+        self.assertEquals(self.proc.cmd[1:3], ["--tolerance", "0.1"])
+
+        self.assertEquals(port.diff_image('foo', 'bar', None), ('', 100.0, None))
+        self.assertEquals(self.proc.cmd[1:3], ["--tolerance", "0.1"])
+
+        self.assertEquals(port.diff_image('foo', 'bar', 0), ('', 100.0, None))
+        self.assertEquals(self.proc.cmd[1:3], ["--tolerance", "0"])
+
+        port.clean_up_test_run()
+        self.assertTrue(self.proc.stopped)
+        self.assertEquals(port._image_differ, None)
+
+    def test_diff_image_crashed(self):
+        port = self.make_port()
+        self.proc = None
+
+        def make_proc(port, nm, cmd, env):
+            self.proc = MockServerProcess(port, nm, cmd, env, crashed=True)
+            return self.proc
+
+        port._server_process_constructor = make_proc
+        port.setup_test_run()
+        self.assertEquals(port.diff_image('foo', 'bar'), ('', 0, 'ImageDiff crashed\n'))
+        port.clean_up_test_run()
+
+    def test_check_wdiff(self):
+        port = self.make_port()
+        port.check_wdiff()
+
+    def integration_test_websocket_server__normal(self):
+        port = self.make_port()
+        self.assert_servers_are_down('localhost', self.WEBSOCKET_PORTS)
+        port.start_websocket_server()
+        self.assert_servers_are_up('localhost', self.WEBSOCKET_PORTS)
+        port.stop_websocket_server()
+        self.assert_servers_are_down('localhost', self.WEBSOCKET_PORTS)
+
+    def integration_test_websocket_server__fails(self):
+        port = self.make_port()
+
+        # Test that start() fails if a port isn't available.
+        for port_number in self.WEBSOCKET_PORTS:
+            test_socket = socket.socket()
+            try:
+                test_socket.bind(('localhost', port_number))
+                try:
+                    port.start_websocket_server()
+                    self.fail('should not have been able to start the server while bound to %d' % port_number)
+                except http_server_base.ServerError, e:
+                    pass
+            finally:
+                port.stop_websocket_server()
+                test_socket.close()
+
+        # Test that calling start() twice fails.
+        try:
+            port.start_websocket_server()
+            self.assertRaises(AssertionError, port.start_websocket_server)
+        finally:
+            port.stop_websocket_server()
+
+    def integration_test_websocket_server__two_servers(self):
+        port = self.make_port()
+
+        # Test that calling start() on two different ports causes the
+        # first port to be treated as stale and killed.
+        port.start_websocket_server()
+        new_port = self.make_port()
+        try:
+            new_port.start_websocket_server()
+
+            # Check that the first server was killed.
+            self.assertFalse(port._executive.check_running_pid(port._websocket_server._pid))
+
+            # Check that there is something running.
+            self.assert_servers_are_up('localhost', self.WEBSOCKET_PORTS)
+
+            # Test that calling stop() on a killed server is harmless.
+            port.stop_websocket_server()
+        finally:
+            port.stop_websocket_server()
+            new_port.stop_websocket_server()
+
+            # Test that calling stop() twice is harmless.
+            new_port.stop_websocket_server()
+
+    def test_test_configuration(self):
+        port = self.make_port()
+        self.assertTrue(port.test_configuration())
+
+    def test_all_test_configurations(self):
+        port = self.make_port()
+        self.assertTrue(len(port.all_test_configurations()) > 0)
+        self.assertTrue(port.test_configuration() in port.all_test_configurations(), "%s not in %s" % (port.test_configuration(), port.all_test_configurations()))
+
+    def integration_test_http_server__loop(self):
+        port = self.make_port()
+
+        i = 0
+        while i < 10:
+            self.assert_servers_are_down('localhost', self.HTTP_PORTS)
+            port.start_http_server()
+
+            # We sleep in between alternating runs to ensure that this
+            # test handles both back-to-back starts and stops and
+            # starts and stops separated by a delay.
+            if i % 2:
+                time.sleep(0.1)
+
+            self.assert_servers_are_up('localhost', self.HTTP_PORTS)
+            port.stop_http_server()
+            if i % 2:
+                time.sleep(0.1)
+
+            i += 1
+
+    def test_get_crash_log(self):
+        port = self.make_port()
+        self.assertEquals(port._get_crash_log(None, None, None, None, newer_than=None),
+           (None,
+            'crash log for <unknown process name> (pid <unknown>):\n'
+            'STDOUT: <empty>\n'
+            'STDERR: <empty>\n'))
+
+        self.assertEquals(port._get_crash_log('foo', 1234, 'out bar\nout baz', 'err bar\nerr baz\n', newer_than=None),
+            ('err bar\nerr baz\n',
+             'crash log for foo (pid 1234):\n'
+             'STDOUT: out bar\n'
+             'STDOUT: out baz\n'
+             'STDERR: err bar\n'
+             'STDERR: err baz\n'))
+
+        self.assertEquals(port._get_crash_log('foo', 1234, 'foo\xa6bar', 'foo\xa6bar', newer_than=None),
+            ('foo\xa6bar',
+             u'crash log for foo (pid 1234):\n'
+             u'STDOUT: foo\ufffdbar\n'
+             u'STDERR: foo\ufffdbar\n'))
+
+        self.assertEquals(port._get_crash_log('foo', 1234, 'foo\xa6bar', 'foo\xa6bar', newer_than=1.0),
+            ('foo\xa6bar',
+             u'crash log for foo (pid 1234):\n'
+             u'STDOUT: foo\ufffdbar\n'
+             u'STDERR: foo\ufffdbar\n'))
+
+    def assert_build_path(self, options, dirs, expected_path):
+        port = self.make_port(options=options)
+        for directory in dirs:
+            port.host.filesystem.maybe_make_directory(directory)
+        self.assertEquals(port._build_path(), expected_path)
+
+    def test_expectations_ordering(self):
+        port = self.make_port()
+        for path in port.expectations_files():
+            port._filesystem.write_text_file(path, '')
+        ordered_dict = port.expectations_dict()
+        self.assertEquals(port.path_to_test_expectations_file(), ordered_dict.keys()[0])
+
+        options = MockOptions(additional_expectations=['/tmp/foo', '/tmp/bar'])
+        port = self.make_port(options=options)
+        for path in port.expectations_files():
+            port._filesystem.write_text_file(path, '')
+        port._filesystem.write_text_file('/tmp/foo', 'foo')
+        port._filesystem.write_text_file('/tmp/bar', 'bar')
+        ordered_dict = port.expectations_dict()
+        self.assertEquals(ordered_dict.keys()[-2:], options.additional_expectations)
+        self.assertEquals(ordered_dict.values()[-2:], ['foo', 'bar'])
+
+    def test_path_to_test_expectations_file(self):
+        port = TestWebKitPort()
+        port._options = MockOptions(webkit_test_runner=False)
+        self.assertEqual(port.path_to_test_expectations_file(), '/mock-checkout/LayoutTests/platform/testwebkitport/TestExpectations')
+
+        port = TestWebKitPort()
+        port._options = MockOptions(webkit_test_runner=True)
+        self.assertEqual(port.path_to_test_expectations_file(), '/mock-checkout/LayoutTests/platform/testwebkitport/TestExpectations')
+
+        port = TestWebKitPort()
+        port.host.filesystem.files['/mock-checkout/LayoutTests/platform/testwebkitport/TestExpectations'] = 'some content'
+        port._options = MockOptions(webkit_test_runner=False)
+        self.assertEqual(port.path_to_test_expectations_file(), '/mock-checkout/LayoutTests/platform/testwebkitport/TestExpectations')
+
+    def test_skipped_directories_for_symbols(self):
+        # This first test confirms that the commonly found symbols result in the expected skipped directories.
+        symbols_string = " ".join(["GraphicsLayer", "WebCoreHas3DRendering", "isXHTMLMPDocument", "fooSymbol"])
+        expected_directories = set([
+            "mathml",  # Requires MathMLElement
+            "fast/canvas/webgl",  # Requires WebGLShader
+            "compositing/webgl",  # Requires WebGLShader
+            "http/tests/canvas/webgl",  # Requires WebGLShader
+            "mhtml",  # Requires MHTMLArchive
+            "fast/css/variables",  # Requires CSS Variables
+            "inspector/styles/variables",  # Requires CSS Variables
+        ])
+
+        result_directories = set(TestWebKitPort(symbols_string, None)._skipped_tests_for_unsupported_features(test_list=['mathml/foo.html']))
+        self.assertEqual(result_directories, expected_directories)
+
+        # Test that the nm string parsing actually works:
+        symbols_string = """
+000000000124f498 s __ZZN7WebCore13GraphicsLayer12replaceChildEPS0_S1_E19__PRETTY_FUNCTION__
+000000000124f500 s __ZZN7WebCore13GraphicsLayer13addChildAboveEPS0_S1_E19__PRETTY_FUNCTION__
+000000000124f670 s __ZZN7WebCore13GraphicsLayer13addChildBelowEPS0_S1_E19__PRETTY_FUNCTION__
+"""
+        # Note 'compositing' is not in the list of skipped directories (hence the parsing of GraphicsLayer worked):
+        expected_directories = set(['mathml', 'transforms/3d', 'compositing/webgl', 'fast/canvas/webgl', 'animations/3d', 'mhtml', 'http/tests/canvas/webgl', 'fast/css/variables', 'inspector/styles/variables'])
+        result_directories = set(TestWebKitPort(symbols_string, None)._skipped_tests_for_unsupported_features(test_list=['mathml/foo.html']))
+        self.assertEqual(result_directories, expected_directories)
+
+    def test_skipped_directories_for_features(self):
+        supported_features = ["Accelerated Compositing", "Foo Feature"]
+        expected_directories = set(["animations/3d", "transforms/3d"])
+        port = TestWebKitPort(None, supported_features)
+        port._runtime_feature_list = lambda: supported_features
+        result_directories = set(port._skipped_tests_for_unsupported_features(test_list=["animations/3d/foo.html"]))
+        self.assertEqual(result_directories, expected_directories)
+
+    def test_skipped_directories_for_features_no_matching_tests_in_test_list(self):
+        supported_features = ["Accelerated Compositing", "Foo Feature"]
+        expected_directories = set([])
+        result_directories = set(TestWebKitPort(None, supported_features)._skipped_tests_for_unsupported_features(test_list=['foo.html']))
+        self.assertEqual(result_directories, expected_directories)
+
+    def test_skipped_tests_for_unsupported_features_empty_test_list(self):
+        supported_features = ["Accelerated Compositing", "Foo Feature"]
+        expected_directories = set([])
+        result_directories = set(TestWebKitPort(None, supported_features)._skipped_tests_for_unsupported_features(test_list=None))
+        self.assertEqual(result_directories, expected_directories)
+
+    def test_skipped_layout_tests(self):
+        self.assertEqual(TestWebKitPort(None, None).skipped_layout_tests(test_list=[]), set(['media']))
+
+    def test_expectations_files(self):
+        port = TestWebKitPort()
+
+        def platform_dirs(port):
+            return [port.host.filesystem.basename(port.host.filesystem.dirname(f)) for f in port.expectations_files()]
+
+        self.assertEqual(platform_dirs(port), ['testwebkitport'])
+
+        port._name = "testwebkitport-version"
+        self.assertEqual(platform_dirs(port), ['testwebkitport', 'testwebkitport-version'])
+
+        port._options = MockOptions(webkit_test_runner=True)
+        self.assertEqual(platform_dirs(port), ['testwebkitport', 'testwebkitport-version', 'testwebkitport-wk2', 'wk2'])
+
+        port._options = MockOptions(additional_platform_directory=["internal-testwebkitport"])
+        self.assertEqual(platform_dirs(port), ['testwebkitport', 'testwebkitport-version', 'internal-testwebkitport'])
+
+    def test_root_option(self):
+        port = TestWebKitPort()
+        port._options = MockOptions(root='/foo')
+        self.assertEqual(port._path_to_driver(), "/foo/DumpRenderTree")
+
+    def test_test_expectations(self):
+        # Check that we read the expectations file
+        host = MockSystemHost()
+        host.filesystem.write_text_file('/mock-checkout/LayoutTests/platform/testwebkitport/TestExpectations',
+            'BUG_TESTEXPECTATIONS SKIP : fast/html/article-element.html = FAIL\n')
+        port = TestWebKitPort(host=host)
+        self.assertEqual(''.join(port.expectations_dict().values()), 'BUG_TESTEXPECTATIONS SKIP : fast/html/article-element.html = FAIL\n')
+
+    def test_build_driver(self):
+        output = OutputCapture()
+        port = TestWebKitPort()
+        # Delay setting _executive to avoid logging during construction
+        port._executive = MockExecutive(should_log=True)
+        port._options = MockOptions(configuration="Release")  # This should not be necessary, but I think TestWebKitPort is actually reading from disk (and thus detects the current configuration).
+        expected_stderr = "MOCK run_command: ['Tools/Scripts/build-dumprendertree', '--release'], cwd=/mock-checkout, env={'LC_ALL': 'C', 'MOCK_ENVIRON_COPY': '1'}\n"
+        self.assertTrue(output.assert_outputs(self, port._build_driver, expected_stderr=expected_stderr, expected_logs=''))
+
+        # Make sure when passed --webkit-test-runner we build the right tool.
+        port._options = MockOptions(webkit_test_runner=True, configuration="Release")
+        expected_stderr = "MOCK run_command: ['Tools/Scripts/build-dumprendertree', '--release'], cwd=/mock-checkout, env={'LC_ALL': 'C', 'MOCK_ENVIRON_COPY': '1'}\nMOCK run_command: ['Tools/Scripts/build-webkittestrunner', '--release'], cwd=/mock-checkout, env={'LC_ALL': 'C', 'MOCK_ENVIRON_COPY': '1'}\n"
+        self.assertTrue(output.assert_outputs(self, port._build_driver, expected_stderr=expected_stderr, expected_logs=''))
+
+        # Make sure we show the build log when --verbose is passed, which we simulate by setting the logging level to DEBUG.
+        output.set_log_level(logging.DEBUG)
+        port._options = MockOptions(configuration="Release")
+        expected_stderr = "MOCK run_command: ['Tools/Scripts/build-dumprendertree', '--release'], cwd=/mock-checkout, env={'LC_ALL': 'C', 'MOCK_ENVIRON_COPY': '1'}\n"
+        expected_logs = "Output of ['Tools/Scripts/build-dumprendertree', '--release']:\nMOCK output of child process\n"
+        self.assertTrue(output.assert_outputs(self, port._build_driver, expected_stderr=expected_stderr, expected_logs=expected_logs))
+        output.set_log_level(logging.INFO)
+
+        # Make sure that failure to build returns False.
+        port._executive = MockExecutive(should_log=True, should_throw=True)
+        # Because WK2 currently has to build both webkittestrunner and DRT, if DRT fails, that's the only one it tries.
+        expected_stderr = "MOCK run_command: ['Tools/Scripts/build-dumprendertree', '--release'], cwd=/mock-checkout, env={'LC_ALL': 'C', 'MOCK_ENVIRON_COPY': '1'}\n"
+        expected_logs = "MOCK ScriptError\n\nMOCK output of child process\n"
+        self.assertFalse(output.assert_outputs(self, port._build_driver, expected_stderr=expected_stderr, expected_logs=expected_logs))
+
+    def _assert_config_file_for_platform(self, port, platform, config_file):
+        self.assertEquals(port._apache_config_file_name_for_platform(platform), config_file)
+
+    def test_linux_distro_detection(self):
+        port = TestWebKitPort()
+        self.assertFalse(port._is_redhat_based())
+        self.assertFalse(port._is_debian_based())
+
+        port._filesystem = MockFileSystem({'/etc/redhat-release': ''})
+        self.assertTrue(port._is_redhat_based())
+        self.assertFalse(port._is_debian_based())
+
+        port._filesystem = MockFileSystem({'/etc/debian_version': ''})
+        self.assertFalse(port._is_redhat_based())
+        self.assertTrue(port._is_debian_based())
+
+    def test_apache_config_file_name_for_platform(self):
+        port = TestWebKitPort()
+        self._assert_config_file_for_platform(port, 'cygwin', 'cygwin-httpd.conf')
+
+        self._assert_config_file_for_platform(port, 'linux2', 'apache2-httpd.conf')
+        self._assert_config_file_for_platform(port, 'linux3', 'apache2-httpd.conf')
+
+        port._is_redhat_based = lambda: True
+        self._assert_config_file_for_platform(port, 'linux2', 'fedora-httpd.conf')
+
+        port = TestWebKitPort()
+        port._is_debian_based = lambda: True
+        self._assert_config_file_for_platform(port, 'linux2', 'apache2-debian-httpd.conf')
+
+        self._assert_config_file_for_platform(port, 'mac', 'apache2-httpd.conf')
+        self._assert_config_file_for_platform(port, 'win32', 'apache2-httpd.conf')  # win32 isn't a supported sys.platform.  AppleWin/WinCairo/WinCE ports all use cygwin.
+        self._assert_config_file_for_platform(port, 'barf', 'apache2-httpd.conf')
+
+    def test_path_to_apache_config_file(self):
+        port = TestWebKitPort()
+
+        saved_environ = os.environ.copy()
+        try:
+            os.environ['WEBKIT_HTTP_SERVER_CONF_PATH'] = '/path/to/httpd.conf'
+            self.assertRaises(IOError, port._path_to_apache_config_file)
+            port._filesystem.write_text_file('/existing/httpd.conf', 'Hello, world!')
+            os.environ['WEBKIT_HTTP_SERVER_CONF_PATH'] = '/existing/httpd.conf'
+            self.assertEquals(port._path_to_apache_config_file(), '/existing/httpd.conf')
+        finally:
+            os.environ = saved_environ.copy()
+
+        # Mock out _apache_config_file_name_for_platform to ignore the passed sys.platform value.
+        port._apache_config_file_name_for_platform = lambda platform: 'httpd.conf'
+        self.assertEquals(port._path_to_apache_config_file(), '/mock-checkout/LayoutTests/http/conf/httpd.conf')
+
+        # Check that even if we mock out _apache_config_file_name, the environment variable takes precedence.
+        saved_environ = os.environ.copy()
+        try:
+            os.environ['WEBKIT_HTTP_SERVER_CONF_PATH'] = '/existing/httpd.conf'
+            self.assertEquals(port._path_to_apache_config_file(), '/existing/httpd.conf')
+        finally:
+            os.environ = saved_environ.copy()
+
+    def test_check_build(self):
+        port = self.make_port(options=MockOptions(build=True))
+        self.build_called = False
+
+        def build_driver_called():
+            self.build_called = True
+            return True
+
+        port._build_driver = build_driver_called
+        port.check_build(False)
+        self.assertTrue(self.build_called)
+
+        port = self.make_port(options=MockOptions(root='/tmp', build=True))
+        self.build_called = False
+        port._build_driver = build_driver_called
+        port.check_build(False)
+        self.assertFalse(self.build_called, None)
+
+        port = self.make_port(options=MockOptions(build=False))
+        self.build_called = False
+        port._build_driver = build_driver_called
+        port.check_build(False)
+        self.assertFalse(self.build_called, None)
+
+    def test_additional_platform_directory(self):
+        port = self.make_port(options=MockOptions(additional_platform_directory=['/tmp/foo']))
+        self.assertEquals(port.baseline_search_path()[0], '/tmp/foo')
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/pulseaudio_sanitizer.py b/Tools/Scripts/webkitpy/layout_tests/port/pulseaudio_sanitizer.py
new file mode 100644
index 0000000..f4574a9
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/port/pulseaudio_sanitizer.py
@@ -0,0 +1,85 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+# Copyrigth (C) 2012 Intel Corporation
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the Google name nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import logging
+import os
+import subprocess
+
+
+_log = logging.getLogger(__name__)
+
+
+# Shared by GTK and EFL for pulseaudio sanitizing before running tests.
+class PulseAudioSanitizer:
+    def _unload_pulseaudio_module(self):
+        # Unload pulseaudio's module-stream-restore, since it remembers
+        # volume settings from different runs, and could affect
+        # multimedia tests results
+        self._pa_module_index = -1
+        with open(os.devnull, 'w') as devnull:
+            try:
+                pactl_process = subprocess.Popen(["pactl", "list", "short", "modules"], stdout=subprocess.PIPE, stderr=devnull)
+                pactl_process.wait()
+            except OSError:
+                # pactl might not be available.
+                _log.debug('pactl not found. Please install pulseaudio-utils to avoid some potential media test failures.')
+                return
+        modules_list = pactl_process.communicate()[0]
+        for module in modules_list.splitlines():
+            if module.find("module-stream-restore") >= 0:
+                # Some pulseaudio-utils versions don't provide
+                # the index, just an empty string
+                self._pa_module_index = module.split('\t')[0] or -1
+                try:
+                    # Since they could provide other stuff (not an index
+                    # nor an empty string, let's make sure this is an int.
+                    if int(self._pa_module_index) != -1:
+                        pactl_process = subprocess.Popen(["pactl", "unload-module", self._pa_module_index])
+                        pactl_process.wait()
+                        if pactl_process.returncode == 0:
+                            _log.debug('Unloaded module-stream-restore successfully')
+                        else:
+                            _log.debug('Unloading module-stream-restore failed')
+                except ValueError:
+                        # pactl should have returned an index if the module is found
+                        _log.debug('Unable to parse module index. Please check if your pulseaudio-utils version is too old.')
+                return
+
+    def _restore_pulseaudio_module(self):
+        # If pulseaudio's module-stream-restore was previously unloaded,
+        # restore it back. We shouldn't need extra checks here, since an
+        # index != -1 here means we successfully unloaded it previously.
+        if self._pa_module_index != -1:
+            with open(os.devnull, 'w') as devnull:
+                pactl_process = subprocess.Popen(["pactl", "load-module", "module-stream-restore"], stdout=devnull, stderr=devnull)
+                pactl_process.wait()
+                if pactl_process.returncode == 0:
+                    _log.debug('Restored module-stream-restore successfully')
+                else:
+                    _log.debug('Restoring module-stream-restore failed')
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/qt.py b/Tools/Scripts/webkitpy/layout_tests/port/qt.py
new file mode 100644
index 0000000..55f13ee
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/port/qt.py
@@ -0,0 +1,184 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the Google name nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""QtWebKit implementation of the Port interface."""
+
+import glob
+import logging
+import re
+import sys
+import os
+
+from webkitpy.common.memoized import memoized
+from webkitpy.layout_tests.models.test_configuration import TestConfiguration
+from webkitpy.layout_tests.port.base import Port
+from webkitpy.layout_tests.port.xvfbdriver import XvfbDriver
+
+_log = logging.getLogger(__name__)
+
+
+class QtPort(Port):
+    ALL_VERSIONS = ['linux', 'win', 'mac']
+    port_name = "qt"
+
+    def _wk2_port_name(self):
+        return "qt-5.0-wk2"
+
+    def _port_flag_for_scripts(self):
+        return "--qt"
+
+    @classmethod
+    def determine_full_port_name(cls, host, options, port_name):
+        if port_name and port_name != cls.port_name:
+            return port_name
+        return port_name + '-' + host.platform.os_name
+
+    # sys_platform exists only for unit testing.
+    def __init__(self, host, port_name, **kwargs):
+        super(QtPort, self).__init__(host, port_name, **kwargs)
+
+        # FIXME: This will allow Port.baseline_search_path
+        # to do the right thing, but doesn't include support for qt-4.8 or qt-arm (seen in LayoutTests/platform) yet.
+        self._operating_system = port_name.replace('qt-', '')
+
+        # FIXME: Why is this being set at all?
+        self._version = self.operating_system()
+
+    def _generate_all_test_configurations(self):
+        configurations = []
+        for version in self.ALL_VERSIONS:
+            for build_type in self.ALL_BUILD_TYPES:
+                configurations.append(TestConfiguration(version=version, architecture='x86', build_type=build_type))
+        return configurations
+
+    def _build_driver(self):
+        # The Qt port builds DRT as part of the main build step
+        return True
+
+    def _path_to_driver(self):
+        return self._build_path('bin/%s' % self.driver_name())
+
+    def _path_to_image_diff(self):
+        return self._build_path('bin/ImageDiff')
+
+    def _path_to_webcore_library(self):
+        if self.operating_system() == 'mac':
+            return self._build_path('lib/QtWebKitWidgets.framework/QtWebKitWidgets')
+        else:
+            return self._build_path('lib/libQtWebKitWidgets.so')
+
+    def _modules_to_search_for_symbols(self):
+        # We search in every library to be reliable in the case of building with CONFIG+=force_static_libs_as_shared.
+        if self.operating_system() == 'mac':
+            frameworks = glob.glob(os.path.join(self._build_path('lib'), '*.framework'))
+            return [os.path.join(framework, os.path.splitext(os.path.basename(framework))[0]) for framework in frameworks]
+        else:
+            suffix = 'dll' if self.operating_system() == 'win' else 'so'
+            return glob.glob(os.path.join(self._build_path('lib'), 'lib*.' + suffix))
+
+    @memoized
+    def qt_version(self):
+        version = ''
+        try:
+            for line in self._executive.run_command(['qmake', '-v']).split('\n'):
+                match = re.search('Qt\sversion\s(?P<version>\d\.\d)', line)
+                if match:
+                    version = match.group('version')
+                    break
+        except OSError:
+            version = '4.8'
+        return version
+
+    def _search_paths(self):
+        # qt-5.0-wk1    qt-5.0-wk2
+        #            \/
+        #         qt-5.0    qt-4.8
+        #                \/
+        #    (qt-linux|qt-mac|qt-win)
+        #                |
+        #               qt
+        search_paths = []
+        version = self.qt_version()
+        if '5.0' in version:
+            if self.get_option('webkit_test_runner'):
+                search_paths.append('qt-5.0-wk2')
+            else:
+                search_paths.append('qt-5.0-wk1')
+        if '4.8' in version:
+            search_paths.append('qt-4.8')
+        elif version:
+            search_paths.append('qt-5.0')
+        search_paths.append(self.port_name + '-' + self.operating_system())
+        search_paths.append(self.port_name)
+        return search_paths
+
+    def default_baseline_search_path(self):
+        return map(self._webkit_baseline_path, self._search_paths())
+
+    def expectations_files(self):
+        paths = self._search_paths()
+        if self.get_option('webkit_test_runner'):
+            paths.append('wk2')
+
+        # expectations_files() uses the directories listed in _search_paths reversed.
+        # e.g. qt -> qt-linux -> qt-4.8
+        return list(reversed([self._filesystem.join(self._webkit_baseline_path(p), 'TestExpectations') for p in paths]))
+
+    def setup_environ_for_server(self, server_name=None):
+        clean_env = super(QtPort, self).setup_environ_for_server(server_name)
+        clean_env['QTWEBKIT_PLUGIN_PATH'] = self._build_path('lib/plugins')
+        self._copy_value_from_environ_if_set(clean_env, 'QT_DRT_WEBVIEW_MODE')
+        self._copy_value_from_environ_if_set(clean_env, 'DYLD_IMAGE_SUFFIX')
+        self._copy_value_from_environ_if_set(clean_env, 'QT_WEBKIT_LOG')
+        self._copy_value_from_environ_if_set(clean_env, 'DISABLE_NI_WARNING')
+        self._copy_value_from_environ_if_set(clean_env, 'QT_WEBKIT_PAUSE_UI_PROCESS')
+        self._copy_value_from_environ_if_set(clean_env, 'QT_QPA_PLATFORM_PLUGIN_PATH')
+        self._copy_value_from_environ_if_set(clean_env, 'QT_WEBKIT_DISABLE_UIPROCESS_DUMPPIXELS')
+        return clean_env
+
+    # FIXME: We should find a way to share this implmentation with Gtk,
+    # or teach run-launcher how to call run-safari and move this down to Port.
+    def show_results_html_file(self, results_filename):
+        run_launcher_args = []
+        if self.get_option('webkit_test_runner'):
+            run_launcher_args.append('-2')
+        run_launcher_args.append("file://%s" % results_filename)
+        self._run_script("run-launcher", run_launcher_args)
+
+    def operating_system(self):
+        return self._operating_system
+
+    def check_sys_deps(self, needs_http):
+        result = super(QtPort, self).check_sys_deps(needs_http)
+        if not 'WEBKIT_TESTFONTS' in os.environ:
+            _log.error('\nThe WEBKIT_TESTFONTS environment variable is not defined or not set properly.')
+            _log.error('You must set it before running the tests.')
+            _log.error('Use git to grab the actual fonts from http://gitorious.org/qtwebkit/testfonts')
+            return False
+        return result
+
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/qt_unittest.py b/Tools/Scripts/webkitpy/layout_tests/port/qt_unittest.py
new file mode 100644
index 0000000..cf09bd8
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/port/qt_unittest.py
@@ -0,0 +1,132 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#    * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+import os
+from copy import deepcopy
+
+from webkitpy.common.system.executive_mock import MockExecutive, MockExecutive2
+from webkitpy.common.system.outputcapture import OutputCapture
+from webkitpy.common.system.systemhost_mock import MockSystemHost
+from webkitpy.layout_tests.port import port_testcase
+from webkitpy.layout_tests.port.qt import QtPort
+from webkitpy.tool.mocktool import MockOptions
+
+
+class QtPortTest(port_testcase.PortTestCase):
+    port_name = 'qt-mac'
+    port_maker = QtPort
+    search_paths_cases = [
+        {'search_paths':['qt-4.8', 'qt-mac', 'qt'], 'os_name':'mac', 'use_webkit2':False, 'qt_version':'4.8'},
+        {'search_paths':['qt-4.8', 'qt-win', 'qt'], 'os_name':'win', 'use_webkit2':False, 'qt_version':'4.8'},
+        {'search_paths':['qt-4.8', 'qt-linux', 'qt'], 'os_name':'linux', 'use_webkit2':False, 'qt_version':'4.8'},
+
+        {'search_paths':['qt-4.8', 'qt-mac', 'qt'], 'os_name':'mac', 'use_webkit2':False},
+        {'search_paths':['qt-4.8', 'qt-win', 'qt'], 'os_name':'win', 'use_webkit2':False},
+        {'search_paths':['qt-4.8', 'qt-linux', 'qt'], 'os_name':'linux', 'use_webkit2':False},
+
+        {'search_paths':['qt-5.0-wk2', 'qt-5.0', 'qt-mac', 'qt'], 'os_name':'mac', 'use_webkit2':True, 'qt_version':'5.0'},
+        {'search_paths':['qt-5.0-wk2', 'qt-5.0', 'qt-win', 'qt'], 'os_name':'win', 'use_webkit2':True, 'qt_version':'5.0'},
+        {'search_paths':['qt-5.0-wk2', 'qt-5.0', 'qt-linux', 'qt'], 'os_name':'linux', 'use_webkit2':True, 'qt_version':'5.0'},
+
+        {'search_paths':['qt-5.0-wk1', 'qt-5.0', 'qt-mac', 'qt'], 'os_name':'mac', 'use_webkit2':False, 'qt_version':'5.0'},
+        {'search_paths':['qt-5.0-wk1', 'qt-5.0', 'qt-win', 'qt'], 'os_name':'win', 'use_webkit2':False, 'qt_version':'5.0'},
+        {'search_paths':['qt-5.0-wk1', 'qt-5.0', 'qt-linux', 'qt'], 'os_name':'linux', 'use_webkit2':False, 'qt_version':'5.0'},
+    ]
+
+    def _assert_search_path(self, search_paths, os_name, use_webkit2=False, qt_version='4.8'):
+        # FIXME: Port constructors should not "parse" the port name, but
+        # rather be passed components (directly or via setters).  Once
+        # we fix that, this method will need a re-write.
+        host = MockSystemHost(os_name=os_name)
+        host.executive = MockExecutive2(self._qt_version(qt_version))
+        port_name = 'qt-' + os_name
+        port = self.make_port(host=host, qt_version=qt_version, port_name=port_name,
+                              options=MockOptions(webkit_test_runner=use_webkit2, platform='qt'))
+        absolute_search_paths = map(port._webkit_baseline_path, search_paths)
+        self.assertEquals(port.baseline_search_path(), absolute_search_paths)
+
+    def _assert_expectations_files(self, search_paths, os_name, use_webkit2=False, qt_version='4.8'):
+        # FIXME: Port constructors should not "parse" the port name, but
+        # rather be passed components (directly or via setters).  Once
+        # we fix that, this method will need a re-write.
+        host = MockSystemHost(os_name=os_name)
+        host.executive = MockExecutive2(self._qt_version(qt_version))
+        port_name = 'qt-' + os_name
+        port = self.make_port(host=host, qt_version=qt_version, port_name=port_name,
+                              options=MockOptions(webkit_test_runner=use_webkit2, platform='qt'))
+        self.assertEquals(port.expectations_files(), search_paths)
+
+    def _qt_version(self, qt_version):
+        if qt_version in '4.8':
+            return 'QMake version 2.01a\nUsing Qt version 4.8.0 in /usr/local/Trolltech/Qt-4.8.2/lib'
+        if qt_version in '5.0':
+            return 'QMake version 2.01a\nUsing Qt version 5.0.0 in /usr/local/Trolltech/Qt-5.0.0/lib'
+
+    def test_baseline_search_path(self):
+        for case in self.search_paths_cases:
+            self._assert_search_path(**case)
+
+    def test_expectations_files(self):
+        for case in self.search_paths_cases:
+            expectations_case = deepcopy(case)
+            if expectations_case['use_webkit2']:
+                expectations_case['search_paths'].append("wk2")
+            expectations_case['search_paths'].reverse()
+            expectations_case['search_paths'] = map(lambda path: '/mock-checkout/LayoutTests/platform/%s/TestExpectations' % (path), expectations_case['search_paths'])
+            self._assert_expectations_files(**expectations_case)
+
+    def test_show_results_html_file(self):
+        port = self.make_port()
+        port._executive = MockExecutive(should_log=True)
+        expected_stderr = "MOCK run_command: ['Tools/Scripts/run-launcher', '--release', '--qt', 'file://test.html'], cwd=/mock-checkout\n"
+        OutputCapture().assert_outputs(self, port.show_results_html_file, ["test.html"], expected_stderr=expected_stderr)
+
+    def test_setup_environ_for_server(self):
+        port = self.make_port()
+        env = port.setup_environ_for_server(port.driver_name())
+        self.assertEquals(env['QTWEBKIT_PLUGIN_PATH'], '/mock-build/lib/plugins')
+
+    def test_operating_system(self):
+        self.assertEqual('linux', self.make_port(port_name='qt-linux', os_name='linux').operating_system())
+        self.assertEqual('mac', self.make_port(os_name='mac').operating_system())
+        self.assertEqual('win', self.make_port(port_name='qt-win', os_name='win').operating_system())
+
+    def test_check_sys_deps(self):
+        port = self.make_port()
+
+        # Success
+        os.environ['WEBKIT_TESTFONTS'] = '/tmp/foo'
+        port._executive = MockExecutive2(exit_code=0)
+        self.assertTrue(port.check_sys_deps(needs_http=False))
+
+        # Failure
+        del os.environ['WEBKIT_TESTFONTS']
+        port._executive = MockExecutive2(exit_code=1,
+            output='testing output failure')
+        self.assertFalse(port.check_sys_deps(needs_http=False))
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/server_process.py b/Tools/Scripts/webkitpy/layout_tests/port/server_process.py
new file mode 100644
index 0000000..8f0cda9
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/port/server_process.py
@@ -0,0 +1,380 @@
+#!/usr/bin/env python
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the Google name nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Package that implements the ServerProcess wrapper class"""
+
+import errno
+import logging
+import signal
+import sys
+import time
+
+# Note that although win32 python does provide an implementation of
+# the win32 select API, it only works on sockets, and not on the named pipes
+# used by subprocess, so we have to use the native APIs directly.
+if sys.platform == 'win32':
+    import msvcrt
+    import win32pipe
+    import win32file
+else:
+    import fcntl
+    import os
+    import select
+
+from webkitpy.common.system.executive import ScriptError
+
+
+_log = logging.getLogger(__name__)
+
+
+class ServerProcess(object):
+    """This class provides a wrapper around a subprocess that
+    implements a simple request/response usage model. The primary benefit
+    is that reading responses takes a deadline, so that we don't ever block
+    indefinitely. The class also handles transparently restarting processes
+    as necessary to keep issuing commands."""
+
+    def __init__(self, port_obj, name, cmd, env=None, universal_newlines=False, treat_no_data_as_crash=False):
+        self._port = port_obj
+        self._name = name  # Should be the command name (e.g. DumpRenderTree, ImageDiff)
+        self._cmd = cmd
+        self._env = env
+        # Set if the process outputs non-standard newlines like '\r\n' or '\r'.
+        # Don't set if there will be binary data or the data must be ASCII encoded.
+        self._universal_newlines = universal_newlines
+        self._treat_no_data_as_crash = treat_no_data_as_crash
+        self._host = self._port.host
+        self._pid = None
+        self._reset()
+
+        # See comment in imports for why we need the win32 APIs and can't just use select.
+        # FIXME: there should be a way to get win32 vs. cygwin from platforminfo.
+        self._use_win32_apis = sys.platform == 'win32'
+
+    def name(self):
+        return self._name
+
+    def pid(self):
+        return self._pid
+
+    def _reset(self):
+        if getattr(self, '_proc', None):
+            if self._proc.stdin:
+                self._proc.stdin.close()
+                self._proc.stdin = None
+            if self._proc.stdout:
+                self._proc.stdout.close()
+                self._proc.stdout = None
+            if self._proc.stderr:
+                self._proc.stderr.close()
+                self._proc.stderr = None
+
+        self._proc = None
+        self._output = str()  # bytesarray() once we require Python 2.6
+        self._error = str()  # bytesarray() once we require Python 2.6
+        self._crashed = False
+        self.timed_out = False
+
+    def process_name(self):
+        return self._name
+
+    def _start(self):
+        if self._proc:
+            raise ValueError("%s already running" % self._name)
+        self._reset()
+        # close_fds is a workaround for http://bugs.python.org/issue2320
+        close_fds = not self._host.platform.is_win()
+        self._proc = self._host.executive.popen(self._cmd, stdin=self._host.executive.PIPE,
+            stdout=self._host.executive.PIPE,
+            stderr=self._host.executive.PIPE,
+            close_fds=close_fds,
+            env=self._env,
+            universal_newlines=self._universal_newlines)
+        self._pid = self._proc.pid
+        fd = self._proc.stdout.fileno()
+        if not self._use_win32_apis:
+            fl = fcntl.fcntl(fd, fcntl.F_GETFL)
+            fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
+            fd = self._proc.stderr.fileno()
+            fl = fcntl.fcntl(fd, fcntl.F_GETFL)
+            fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
+
+    def _handle_possible_interrupt(self):
+        """This routine checks to see if the process crashed or exited
+        because of a keyboard interrupt and raises KeyboardInterrupt
+        accordingly."""
+        # FIXME: Linux and Mac set the returncode to -signal.SIGINT if a
+        # subprocess is killed with a ctrl^C.  Previous comments in this
+        # routine said that supposedly Windows returns 0xc000001d, but that's not what
+        # -1073741510 evaluates to. Figure out what the right value is
+        # for win32 and cygwin here ...
+        if self._proc.returncode in (-1073741510, -signal.SIGINT):
+            raise KeyboardInterrupt
+
+    def poll(self):
+        """Check to see if the underlying process is running; returns None
+        if it still is (wrapper around subprocess.poll)."""
+        if self._proc:
+            return self._proc.poll()
+        return None
+
+    def write(self, bytes):
+        """Write a request to the subprocess. The subprocess is (re-)start()'ed
+        if is not already running."""
+        if not self._proc:
+            self._start()
+        try:
+            self._proc.stdin.write(bytes)
+        except IOError, e:
+            self.stop(0.0)
+            # stop() calls _reset(), so we have to set crashed to True after calling stop().
+            self._crashed = True
+
+    def _pop_stdout_line_if_ready(self):
+        index_after_newline = self._output.find('\n') + 1
+        if index_after_newline > 0:
+            return self._pop_output_bytes(index_after_newline)
+        return None
+
+    def _pop_stderr_line_if_ready(self):
+        index_after_newline = self._error.find('\n') + 1
+        if index_after_newline > 0:
+            return self._pop_error_bytes(index_after_newline)
+        return None
+
+    def pop_all_buffered_stderr(self):
+        return self._pop_error_bytes(len(self._error))
+
+    def read_stdout_line(self, deadline):
+        return self._read(deadline, self._pop_stdout_line_if_ready)
+
+    def read_stderr_line(self, deadline):
+        return self._read(deadline, self._pop_stderr_line_if_ready)
+
+    def read_either_stdout_or_stderr_line(self, deadline):
+        def retrieve_bytes_from_buffers():
+            stdout_line = self._pop_stdout_line_if_ready()
+            if stdout_line:
+                return stdout_line, None
+            stderr_line = self._pop_stderr_line_if_ready()
+            if stderr_line:
+                return None, stderr_line
+            return None  # Instructs the caller to keep waiting.
+
+        return_value = self._read(deadline, retrieve_bytes_from_buffers)
+        # FIXME: This is a bit of a hack around the fact that _read normally only returns one value, but this caller wants it to return two.
+        if return_value is None:
+            return None, None
+        return return_value
+
+    def read_stdout(self, deadline, size):
+        if size <= 0:
+            raise ValueError('ServerProcess.read() called with a non-positive size: %d ' % size)
+
+        def retrieve_bytes_from_stdout_buffer():
+            if len(self._output) >= size:
+                return self._pop_output_bytes(size)
+            return None
+
+        return self._read(deadline, retrieve_bytes_from_stdout_buffer)
+
+    def _log(self, message):
+        # This is a bit of a hack, but we first log a blank line to avoid
+        # messing up the master process's output.
+        _log.info('')
+        _log.info(message)
+
+    def _handle_timeout(self):
+        self.timed_out = True
+        self._port.sample_process(self._name, self._proc.pid)
+
+    def _split_string_after_index(self, string, index):
+        return string[:index], string[index:]
+
+    def _pop_output_bytes(self, bytes_count):
+        output, self._output = self._split_string_after_index(self._output, bytes_count)
+        return output
+
+    def _pop_error_bytes(self, bytes_count):
+        output, self._error = self._split_string_after_index(self._error, bytes_count)
+        return output
+
+    def _wait_for_data_and_update_buffers_using_select(self, deadline, stopping=False):
+        if self._proc.stdout.closed or self._proc.stderr.closed:
+            # If the process crashed and is using FIFOs, like Chromium Android, the
+            # stdout and stderr pipes will be closed.
+            return
+
+        out_fd = self._proc.stdout.fileno()
+        err_fd = self._proc.stderr.fileno()
+        select_fds = (out_fd, err_fd)
+        try:
+            read_fds, _, _ = select.select(select_fds, [], select_fds, max(deadline - time.time(), 0))
+        except select.error, e:
+            # We can ignore EINVAL since it's likely the process just crashed and we'll
+            # figure that out the next time through the loop in _read().
+            if e.args[0] == errno.EINVAL:
+                return
+            raise
+
+        try:
+            # Note that we may get no data during read() even though
+            # select says we got something; see the select() man page
+            # on linux. I don't know if this happens on Mac OS and
+            # other Unixen as well, but we don't bother special-casing
+            # Linux because it's relatively harmless either way.
+            if out_fd in read_fds:
+                data = self._proc.stdout.read()
+                if not data and not stopping and (self._treat_no_data_as_crash or self._proc.poll()):
+                    self._crashed = True
+                self._output += data
+
+            if err_fd in read_fds:
+                data = self._proc.stderr.read()
+                if not data and not stopping and (self._treat_no_data_as_crash or self._proc.poll()):
+                    self._crashed = True
+                self._error += data
+        except IOError, e:
+            # We can ignore the IOErrors because we will detect if the subporcess crashed
+            # the next time through the loop in _read()
+            pass
+
+    def _wait_for_data_and_update_buffers_using_win32_apis(self, deadline):
+        # See http://code.activestate.com/recipes/440554-module-to-allow-asynchronous-subprocess-use-on-win/
+        # and http://docs.activestate.com/activepython/2.6/pywin32/modules.html
+        # for documentation on all of these win32-specific modules.
+        now = time.time()
+        out_fh = msvcrt.get_osfhandle(self._proc.stdout.fileno())
+        err_fh = msvcrt.get_osfhandle(self._proc.stderr.fileno())
+        while (self._proc.poll() is None) and (now < deadline):
+            output = self._non_blocking_read_win32(out_fh)
+            error = self._non_blocking_read_win32(err_fh)
+            if output or error:
+                if output:
+                    self._output += output
+                if error:
+                    self._error += error
+                return
+            time.sleep(0.01)
+            now = time.time()
+        return
+
+    def _non_blocking_read_win32(self, handle):
+        try:
+            _, avail, _ = win32pipe.PeekNamedPipe(handle, 0)
+            if avail > 0:
+                _, buf = win32file.ReadFile(handle, avail, None)
+                return buf
+        except Exception, e:
+            if e[0] not in (109, errno.ESHUTDOWN):  # 109 == win32 ERROR_BROKEN_PIPE
+                raise
+        return None
+
+    def has_crashed(self):
+        if not self._crashed and self.poll():
+            self._crashed = True
+            self._handle_possible_interrupt()
+        return self._crashed
+
+    # This read function is a bit oddly-designed, as it polls both stdout and stderr, yet
+    # only reads/returns from one of them (buffering both in local self._output/self._error).
+    # It might be cleaner to pass in the file descriptor to poll instead.
+    def _read(self, deadline, fetch_bytes_from_buffers_callback):
+        while True:
+            if self.has_crashed():
+                return None
+
+            if time.time() > deadline:
+                self._handle_timeout()
+                return None
+
+            bytes = fetch_bytes_from_buffers_callback()
+            if bytes is not None:
+                return bytes
+
+            if self._use_win32_apis:
+                self._wait_for_data_and_update_buffers_using_win32_apis(deadline)
+            else:
+                self._wait_for_data_and_update_buffers_using_select(deadline)
+
+    def start(self):
+        if not self._proc:
+            self._start()
+
+    def stop(self, timeout_secs=3.0):
+        if not self._proc:
+            return (None, None)
+
+        # Only bother to check for leaks or stderr if the process is still running.
+        if self.poll() is None:
+            self._port.check_for_leaks(self.name(), self.pid())
+
+        now = time.time()
+        if self._proc.stdin:
+            self._proc.stdin.close()
+            self._proc.stdin = None
+        killed = False
+        if timeout_secs:
+            deadline = now + timeout_secs
+            while self._proc.poll() is None and time.time() < deadline:
+                time.sleep(0.01)
+            if self._proc.poll() is None:
+                _log.warning('stopping %s(pid %d) timed out, killing it' % (self._name, self._proc.pid))
+
+        if self._proc.poll() is None:
+            self._kill()
+            killed = True
+            _log.debug('killed pid %d' % self._proc.pid)
+
+        # read any remaining data on the pipes and return it.
+        if not killed:
+            if self._use_win32_apis:
+                self._wait_for_data_and_update_buffers_using_win32_apis(now)
+            else:
+                self._wait_for_data_and_update_buffers_using_select(now, stopping=True)
+        out, err = self._output, self._error
+        self._reset()
+        return (out, err)
+
+    def kill(self):
+        self.stop(0.0)
+
+    def _kill(self):
+        self._host.executive.kill_process(self._proc.pid)
+        if self._proc.poll() is not None:
+            self._proc.wait()
+
+    def replace_outputs(self, stdout, stderr):
+        assert self._proc
+        if stdout:
+            self._proc.stdout.close()
+            self._proc.stdout = stdout
+        if stderr:
+            self._proc.stderr.close()
+            self._proc.stderr = stderr
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/server_process_mock.py b/Tools/Scripts/webkitpy/layout_tests/port/server_process_mock.py
new file mode 100644
index 0000000..d234ebd
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/port/server_process_mock.py
@@ -0,0 +1,77 @@
+# Copyright (C) 2012 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#    * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+class MockServerProcess(object):
+    def __init__(self, port_obj=None, name=None, cmd=None, env=None, universal_newlines=False, lines=None, crashed=False):
+        self.timed_out = False
+        self.lines = lines or []
+        self.crashed = crashed
+        self.writes = []
+        self.cmd = cmd
+        self.env = env
+        self.started = False
+        self.stopped = False
+
+    def write(self, bytes):
+        self.writes.append(bytes)
+
+    def has_crashed(self):
+        return self.crashed
+
+    def read_stdout_line(self, deadline):
+        return self.lines.pop(0) + "\n"
+
+    def read_stdout(self, deadline, size):
+        first_line = self.lines[0]
+        if size > len(first_line):
+            self.lines.pop(0)
+            remaining_size = size - len(first_line) - 1
+            if not remaining_size:
+                return first_line + "\n"
+            return first_line + "\n" + self.read_stdout(deadline, remaining_size)
+        result = self.lines[0][:size]
+        self.lines[0] = self.lines[0][size:]
+        return result
+
+    def pop_all_buffered_stderr(self):
+        return ''
+
+    def read_either_stdout_or_stderr_line(self, deadline):
+        # FIXME: We should have tests which intermix stderr and stdout lines.
+        return self.read_stdout_line(deadline), None
+
+    def start(self):
+        self.started = True
+
+    def stop(self, kill_directly=False):
+        self.stopped = True
+        return
+
+    def kill(self):
+        return
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/server_process_unittest.py b/Tools/Scripts/webkitpy/layout_tests/port/server_process_unittest.py
new file mode 100644
index 0000000..7a5ac45
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/port/server_process_unittest.py
@@ -0,0 +1,152 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#    * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import sys
+import time
+import unittest
+
+from webkitpy.layout_tests.port.factory import PortFactory
+from webkitpy.layout_tests.port import server_process
+from webkitpy.common.system.systemhost import SystemHost
+from webkitpy.common.system.systemhost_mock import MockSystemHost
+from webkitpy.common.system.outputcapture import OutputCapture
+
+
+
+class TrivialMockPort(object):
+    def __init__(self):
+        self.host = MockSystemHost()
+        self.host.executive.kill_process = lambda x: None
+        self.host.executive.kill_process = lambda x: None
+
+    def results_directory(self):
+        return "/mock-results"
+
+    def check_for_leaks(self, process_name, process_pid):
+        pass
+
+    def process_kill_time(self):
+        return 1
+
+
+class MockFile(object):
+    def __init__(self, server_process):
+        self._server_process = server_process
+        self.closed = False
+
+    def fileno(self):
+        return 1
+
+    def write(self, line):
+        self._server_process.broken_pipes.append(self)
+        raise IOError
+
+    def close(self):
+        self.closed = True
+
+
+class MockProc(object):
+    def __init__(self, server_process):
+        self.stdin = MockFile(server_process)
+        self.stdout = MockFile(server_process)
+        self.stderr = MockFile(server_process)
+        self.pid = 1
+
+    def poll(self):
+        return 1
+
+    def wait(self):
+        return 0
+
+
+class FakeServerProcess(server_process.ServerProcess):
+    def _start(self):
+        self._proc = MockProc(self)
+        self.stdin = self._proc.stdin
+        self.stdout = self._proc.stdout
+        self.stderr = self._proc.stderr
+        self._pid = self._proc.pid
+        self.broken_pipes = []
+
+
+class TestServerProcess(unittest.TestCase):
+    def test_basic(self):
+        cmd = [sys.executable, '-c', 'import sys; import time; time.sleep(0.02); print "stdout"; sys.stdout.flush(); print >>sys.stderr, "stderr"']
+        host = SystemHost()
+        factory = PortFactory(host)
+        port = factory.get()
+        now = time.time()
+        proc = server_process.ServerProcess(port, 'python', cmd)
+        proc.write('')
+
+        self.assertEquals(proc.poll(), None)
+        self.assertFalse(proc.has_crashed())
+
+        # check that doing a read after an expired deadline returns
+        # nothing immediately.
+        line = proc.read_stdout_line(now - 1)
+        self.assertEquals(line, None)
+
+        # FIXME: This part appears to be flaky. line should always be non-None.
+        # FIXME: https://bugs.webkit.org/show_bug.cgi?id=88280
+        line = proc.read_stdout_line(now + 1.0)
+        if line:
+            self.assertEquals(line.strip(), "stdout")
+
+        line = proc.read_stderr_line(now + 1.0)
+        if line:
+            self.assertEquals(line.strip(), "stderr")
+
+        proc.stop(0)
+
+    def test_cleanup(self):
+        port_obj = TrivialMockPort()
+        server_process = FakeServerProcess(port_obj=port_obj, name="test", cmd=["test"])
+        server_process._start()
+        server_process.stop()
+        self.assertTrue(server_process.stdin.closed)
+        self.assertTrue(server_process.stdout.closed)
+        self.assertTrue(server_process.stderr.closed)
+
+    def test_broken_pipe(self):
+        port_obj = TrivialMockPort()
+
+        port_obj.host.platform.os_name = 'win'
+        server_process = FakeServerProcess(port_obj=port_obj, name="test", cmd=["test"])
+        server_process.write("should break")
+        self.assertTrue(server_process.has_crashed())
+        self.assertNotEquals(server_process.pid(), None)
+        self.assertEquals(server_process._proc, None)
+        self.assertEquals(server_process.broken_pipes, [server_process.stdin])
+
+        port_obj.host.platform.os_name = 'mac'
+        server_process = FakeServerProcess(port_obj=port_obj, name="test", cmd=["test"])
+        server_process.write("should break")
+        self.assertTrue(server_process.has_crashed())
+        self.assertEquals(server_process._proc, None)
+        self.assertEquals(server_process.broken_pipes, [server_process.stdin])
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/test.py b/Tools/Scripts/webkitpy/layout_tests/port/test.py
new file mode 100644
index 0000000..f7dd291
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/port/test.py
@@ -0,0 +1,598 @@
+#!/usr/bin/env python
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the Google name nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import base64
+import sys
+import time
+
+from webkitpy.layout_tests.port import Port, Driver, DriverOutput
+from webkitpy.layout_tests.port.base import VirtualTestSuite
+from webkitpy.layout_tests.models.test_configuration import TestConfiguration
+from webkitpy.common.system.filesystem_mock import MockFileSystem
+from webkitpy.common.system.crashlogs import CrashLogs
+
+
+# This sets basic expectations for a test. Each individual expectation
+# can be overridden by a keyword argument in TestList.add().
+class TestInstance(object):
+    def __init__(self, name):
+        self.name = name
+        self.base = name[(name.rfind("/") + 1):name.rfind(".")]
+        self.crash = False
+        self.web_process_crash = False
+        self.exception = False
+        self.hang = False
+        self.keyboard = False
+        self.error = ''
+        self.timeout = False
+        self.is_reftest = False
+
+        # The values of each field are treated as raw byte strings. They
+        # will be converted to unicode strings where appropriate using
+        # FileSystem.read_text_file().
+        self.actual_text = self.base + '-txt'
+        self.actual_checksum = self.base + '-checksum'
+
+        # We add the '\x8a' for the image file to prevent the value from
+        # being treated as UTF-8 (the character is invalid)
+        self.actual_image = self.base + '\x8a' + '-png' + 'tEXtchecksum\x00' + self.actual_checksum
+
+        self.expected_text = self.actual_text
+        self.expected_image = self.actual_image
+
+        self.actual_audio = None
+        self.expected_audio = None
+
+
+# This is an in-memory list of tests, what we want them to produce, and
+# what we want to claim are the expected results.
+class TestList(object):
+    def __init__(self):
+        self.tests = {}
+
+    def add(self, name, **kwargs):
+        test = TestInstance(name)
+        for key, value in kwargs.items():
+            test.__dict__[key] = value
+        self.tests[name] = test
+
+    def add_reftest(self, name, reference_name, same_image):
+        self.add(name, actual_checksum='xxx', actual_image='XXX', is_reftest=True)
+        if same_image:
+            self.add(reference_name, actual_checksum='xxx', actual_image='XXX', is_reftest=True)
+        else:
+            self.add(reference_name, actual_checksum='yyy', actual_image='YYY', is_reftest=True)
+
+    def keys(self):
+        return self.tests.keys()
+
+    def __contains__(self, item):
+        return item in self.tests
+
+    def __getitem__(self, item):
+        return self.tests[item]
+
+
+def unit_test_list():
+    tests = TestList()
+    tests.add('failures/expected/crash.html', crash=True)
+    tests.add('failures/expected/exception.html', exception=True)
+    tests.add('failures/expected/timeout.html', timeout=True)
+    tests.add('failures/expected/hang.html', hang=True)
+    tests.add('failures/expected/missing_text.html', expected_text=None)
+    tests.add('failures/expected/image.html',
+              actual_image='image_fail-pngtEXtchecksum\x00checksum_fail',
+              expected_image='image-pngtEXtchecksum\x00checksum-png')
+    tests.add('failures/expected/image_checksum.html',
+              actual_checksum='image_checksum_fail-checksum',
+              actual_image='image_checksum_fail-png')
+    tests.add('failures/expected/audio.html',
+              actual_audio=base64.b64encode('audio_fail-wav'), expected_audio='audio-wav',
+              actual_text=None, expected_text=None,
+              actual_image=None, expected_image=None,
+              actual_checksum=None)
+    tests.add('failures/expected/keyboard.html', keyboard=True)
+    tests.add('failures/expected/missing_check.html',
+              expected_image='missing_check-png')
+    tests.add('failures/expected/missing_image.html', expected_image=None)
+    tests.add('failures/expected/missing_audio.html', expected_audio=None,
+              actual_text=None, expected_text=None,
+              actual_image=None, expected_image=None,
+              actual_checksum=None)
+    tests.add('failures/expected/missing_text.html', expected_text=None)
+    tests.add('failures/expected/newlines_leading.html',
+              expected_text="\nfoo\n", actual_text="foo\n")
+    tests.add('failures/expected/newlines_trailing.html',
+              expected_text="foo\n\n", actual_text="foo\n")
+    tests.add('failures/expected/newlines_with_excess_CR.html',
+              expected_text="foo\r\r\r\n", actual_text="foo\n")
+    tests.add('failures/expected/text.html', actual_text='text_fail-png')
+    tests.add('failures/expected/skip_text.html', actual_text='text diff')
+    tests.add('failures/flaky/text.html')
+    tests.add('failures/unexpected/missing_text.html', expected_text=None)
+    tests.add('failures/unexpected/missing_check.html', expected_image='missing-check-png')
+    tests.add('failures/unexpected/missing_image.html', expected_image=None)
+    tests.add('failures/unexpected/missing_render_tree_dump.html', actual_text="""layer at (0,0) size 800x600
+  RenderView at (0,0) size 800x600
+layer at (0,0) size 800x34
+  RenderBlock {HTML} at (0,0) size 800x34
+    RenderBody {BODY} at (8,8) size 784x18
+      RenderText {#text} at (0,0) size 133x18
+        text run at (0,0) width 133: "This is an image test!"
+""", expected_text=None)
+    tests.add('failures/unexpected/crash.html', crash=True)
+    tests.add('failures/unexpected/crash-with-stderr.html', crash=True,
+              error="mock-std-error-output")
+    tests.add('failures/unexpected/web-process-crash-with-stderr.html', web_process_crash=True,
+              error="mock-std-error-output")
+    tests.add('failures/unexpected/pass.html')
+    tests.add('failures/unexpected/text-checksum.html',
+              actual_text='text-checksum_fail-txt',
+              actual_checksum='text-checksum_fail-checksum')
+    tests.add('failures/unexpected/text-image-checksum.html',
+              actual_text='text-image-checksum_fail-txt',
+              actual_image='text-image-checksum_fail-pngtEXtchecksum\x00checksum_fail',
+              actual_checksum='text-image-checksum_fail-checksum')
+    tests.add('failures/unexpected/checksum-with-matching-image.html',
+              actual_checksum='text-image-checksum_fail-checksum')
+    tests.add('failures/unexpected/skip_pass.html')
+    tests.add('failures/unexpected/text.html', actual_text='text_fail-txt')
+    tests.add('failures/unexpected/timeout.html', timeout=True)
+    tests.add('http/tests/passes/text.html')
+    tests.add('http/tests/passes/image.html')
+    tests.add('http/tests/ssl/text.html')
+    tests.add('passes/args.html')
+    tests.add('passes/error.html', error='stuff going to stderr')
+    tests.add('passes/image.html')
+    tests.add('passes/audio.html',
+              actual_audio=base64.b64encode('audio-wav'), expected_audio='audio-wav',
+              actual_text=None, expected_text=None,
+              actual_image=None, expected_image=None,
+              actual_checksum=None)
+    tests.add('passes/platform_image.html')
+    tests.add('passes/checksum_in_image.html',
+              expected_image='tEXtchecksum\x00checksum_in_image-checksum')
+    tests.add('passes/skipped/skip.html')
+
+    # Note that here the checksums don't match but the images do, so this test passes "unexpectedly".
+    # See https://bugs.webkit.org/show_bug.cgi?id=69444 .
+    tests.add('failures/unexpected/checksum.html', actual_checksum='checksum_fail-checksum')
+
+    # Text output files contain "\r\n" on Windows.  This may be
+    # helpfully filtered to "\r\r\n" by our Python/Cygwin tooling.
+    tests.add('passes/text.html',
+              expected_text='\nfoo\n\n', actual_text='\nfoo\r\n\r\r\n')
+
+    # For reftests.
+    tests.add_reftest('passes/reftest.html', 'passes/reftest-expected.html', same_image=True)
+    tests.add_reftest('passes/mismatch.html', 'passes/mismatch-expected-mismatch.html', same_image=False)
+    tests.add_reftest('passes/svgreftest.svg', 'passes/svgreftest-expected.svg', same_image=True)
+    tests.add_reftest('passes/xhtreftest.xht', 'passes/xhtreftest-expected.html', same_image=True)
+    tests.add_reftest('passes/phpreftest.php', 'passes/phpreftest-expected-mismatch.svg', same_image=False)
+    tests.add_reftest('failures/expected/reftest.html', 'failures/expected/reftest-expected.html', same_image=False)
+    tests.add_reftest('failures/expected/mismatch.html', 'failures/expected/mismatch-expected-mismatch.html', same_image=True)
+    tests.add_reftest('failures/unexpected/reftest.html', 'failures/unexpected/reftest-expected.html', same_image=False)
+    tests.add_reftest('failures/unexpected/mismatch.html', 'failures/unexpected/mismatch-expected-mismatch.html', same_image=True)
+    tests.add('failures/unexpected/reftest-nopixel.html', actual_checksum=None, actual_image=None, is_reftest=True)
+    tests.add('failures/unexpected/reftest-nopixel-expected.html', actual_checksum=None, actual_image=None, is_reftest=True)
+    # FIXME: Add a reftest which crashes.
+    tests.add('reftests/foo/test.html')
+    tests.add('reftests/foo/test-ref.html')
+
+    tests.add('reftests/foo/multiple-match-success.html', actual_checksum='abc', actual_image='abc')
+    tests.add('reftests/foo/multiple-match-failure.html', actual_checksum='abc', actual_image='abc')
+    tests.add('reftests/foo/multiple-mismatch-success.html', actual_checksum='abc', actual_image='abc')
+    tests.add('reftests/foo/multiple-mismatch-failure.html', actual_checksum='abc', actual_image='abc')
+    tests.add('reftests/foo/multiple-both-success.html', actual_checksum='abc', actual_image='abc')
+    tests.add('reftests/foo/multiple-both-failure.html', actual_checksum='abc', actual_image='abc')
+
+    tests.add('reftests/foo/matching-ref.html', actual_checksum='abc', actual_image='abc')
+    tests.add('reftests/foo/mismatching-ref.html', actual_checksum='def', actual_image='def')
+    tests.add('reftests/foo/second-mismatching-ref.html', actual_checksum='ghi', actual_image='ghi')
+
+    # The following files shouldn't be treated as reftests
+    tests.add_reftest('reftests/foo/unlistedtest.html', 'reftests/foo/unlistedtest-expected.html', same_image=True)
+    tests.add('reftests/foo/reference/bar/common.html')
+    tests.add('reftests/foo/reftest/bar/shared.html')
+
+    tests.add('websocket/tests/passes/text.html')
+
+    # For testing test are properly included from platform directories.
+    tests.add('platform/test-mac-leopard/http/test.html')
+    tests.add('platform/test-win-win7/http/test.html')
+
+    # For --no-http tests, test that platform specific HTTP tests are properly skipped.
+    tests.add('platform/test-snow-leopard/http/test.html')
+    tests.add('platform/test-snow-leopard/websocket/test.html')
+
+    # For testing if perf tests are running in a locked shard.
+    tests.add('perf/foo/test.html')
+    tests.add('perf/foo/test-ref.html')
+
+    # For testing --pixel-test-directories.
+    tests.add('failures/unexpected/pixeldir/image_in_pixeldir.html',
+        actual_image='image_in_pixeldir-pngtEXtchecksum\x00checksum_fail',
+        expected_image='image_in_pixeldir-pngtEXtchecksum\x00checksum-png')
+    tests.add('failures/unexpected/image_not_in_pixeldir.html',
+        actual_image='image_not_in_pixeldir-pngtEXtchecksum\x00checksum_fail',
+        expected_image='image_not_in_pixeldir-pngtEXtchecksum\x00checksum-png')
+
+    # For testing that virtual test suites don't expand names containing themselves
+    # See webkit.org/b/97925 and base_unittest.PortTest.test_tests().
+    tests.add('passes/test-virtual-passes.html')
+    tests.add('passes/passes/test-virtual-passes.html')
+
+    return tests
+
+
+# Here we use a non-standard location for the layout tests, to ensure that
+# this works. The path contains a '.' in the name because we've seen bugs
+# related to this before.
+
+LAYOUT_TEST_DIR = '/test.checkout/LayoutTests'
+PERF_TEST_DIR = '/test.checkout/PerformanceTests'
+
+
+# Here we synthesize an in-memory filesystem from the test list
+# in order to fully control the test output and to demonstrate that
+# we don't need a real filesystem to run the tests.
+def add_unit_tests_to_mock_filesystem(filesystem):
+    # Add the test_expectations file.
+    filesystem.maybe_make_directory(LAYOUT_TEST_DIR + '/platform/test')
+    if not filesystem.exists(LAYOUT_TEST_DIR + '/platform/test/TestExpectations'):
+        filesystem.write_text_file(LAYOUT_TEST_DIR + '/platform/test/TestExpectations', """
+Bug(test) failures/expected/crash.html [ Crash ]
+Bug(test) failures/expected/image.html [ ImageOnlyFailure ]
+Bug(test) failures/expected/audio.html [ Failure ]
+Bug(test) failures/expected/image_checksum.html [ ImageOnlyFailure ]
+Bug(test) failures/expected/mismatch.html [ ImageOnlyFailure ]
+Bug(test) failures/expected/missing_check.html [ Missing Pass ]
+Bug(test) failures/expected/missing_image.html [ Missing Pass ]
+Bug(test) failures/expected/missing_audio.html [ Missing Pass ]
+Bug(test) failures/expected/missing_text.html [ Missing Pass ]
+Bug(test) failures/expected/newlines_leading.html [ Failure ]
+Bug(test) failures/expected/newlines_trailing.html [ Failure ]
+Bug(test) failures/expected/newlines_with_excess_CR.html [ Failure ]
+Bug(test) failures/expected/reftest.html [ ImageOnlyFailure ]
+Bug(test) failures/expected/text.html [ Failure ]
+Bug(test) failures/expected/timeout.html [ Timeout ]
+Bug(test) failures/expected/hang.html [ WontFix ]
+Bug(test) failures/expected/keyboard.html [ WontFix ]
+Bug(test) failures/expected/exception.html [ WontFix ]
+Bug(test) failures/unexpected/pass.html [ Failure ]
+Bug(test) passes/skipped/skip.html [ Skip ]
+""")
+
+    filesystem.maybe_make_directory(LAYOUT_TEST_DIR + '/reftests/foo')
+    filesystem.write_text_file(LAYOUT_TEST_DIR + '/reftests/foo/reftest.list', """
+== test.html test-ref.html
+
+== multiple-match-success.html mismatching-ref.html
+== multiple-match-success.html matching-ref.html
+== multiple-match-failure.html mismatching-ref.html
+== multiple-match-failure.html second-mismatching-ref.html
+!= multiple-mismatch-success.html mismatching-ref.html
+!= multiple-mismatch-success.html second-mismatching-ref.html
+!= multiple-mismatch-failure.html mismatching-ref.html
+!= multiple-mismatch-failure.html matching-ref.html
+== multiple-both-success.html matching-ref.html
+== multiple-both-success.html mismatching-ref.html
+!= multiple-both-success.html second-mismatching-ref.html
+== multiple-both-failure.html matching-ref.html
+!= multiple-both-failure.html second-mismatching-ref.html
+!= multiple-both-failure.html matching-ref.html
+""")
+
+    # FIXME: This test was only being ignored because of missing a leading '/'.
+    # Fixing the typo causes several tests to assert, so disabling the test entirely.
+    # Add in a file should be ignored by port.find_test_files().
+    #files[LAYOUT_TEST_DIR + '/userscripts/resources/iframe.html'] = 'iframe'
+
+    def add_file(test, suffix, contents):
+        dirname = filesystem.join(LAYOUT_TEST_DIR, test.name[0:test.name.rfind('/')])
+        base = test.base
+        filesystem.maybe_make_directory(dirname)
+        filesystem.write_binary_file(filesystem.join(dirname, base + suffix), contents)
+
+    # Add each test and the expected output, if any.
+    test_list = unit_test_list()
+    for test in test_list.tests.values():
+        add_file(test, test.name[test.name.rfind('.'):], '')
+        if test.is_reftest:
+            continue
+        if test.actual_audio:
+            add_file(test, '-expected.wav', test.expected_audio)
+            continue
+        add_file(test, '-expected.txt', test.expected_text)
+        add_file(test, '-expected.png', test.expected_image)
+
+    filesystem.write_text_file(filesystem.join(LAYOUT_TEST_DIR, 'virtual', 'passes', 'args-expected.txt'), 'args-txt --virtual-arg')
+    # Clear the list of written files so that we can watch what happens during testing.
+    filesystem.clear_written_files()
+
+
+class TestPort(Port):
+    port_name = 'test'
+
+    """Test implementation of the Port interface."""
+    ALL_BASELINE_VARIANTS = (
+        'test-linux-x86_64',
+        'test-mac-snowleopard', 'test-mac-leopard',
+        'test-win-vista', 'test-win-win7', 'test-win-xp',
+    )
+
+    @classmethod
+    def determine_full_port_name(cls, host, options, port_name):
+        if port_name == 'test':
+            return 'test-mac-leopard'
+        return port_name
+
+    def __init__(self, host, port_name=None, **kwargs):
+        # FIXME: Consider updating all of the callers to pass in a port_name so it can be a
+        # required parameter like all of the other Port objects.
+        port_name = port_name or 'test-mac-leopard'
+        Port.__init__(self, host, port_name, **kwargs)
+        self._tests = unit_test_list()
+        self._flakes = set()
+        self._expectations_path = LAYOUT_TEST_DIR + '/platform/test/TestExpectations'
+        self._results_directory = None
+
+        self._operating_system = 'mac'
+        if port_name.startswith('test-win'):
+            self._operating_system = 'win'
+        elif port_name.startswith('test-linux'):
+            self._operating_system = 'linux'
+
+        version_map = {
+            'test-win-xp': 'xp',
+            'test-win-win7': 'win7',
+            'test-win-vista': 'vista',
+            'test-mac-leopard': 'leopard',
+            'test-mac-snowleopard': 'snowleopard',
+            'test-linux-x86_64': 'lucid',
+        }
+        self._version = version_map[port_name]
+
+    def default_pixel_tests(self):
+        return True
+
+    def _path_to_driver(self):
+        # This routine shouldn't normally be called, but it is called by
+        # the mock_drt Driver. We return something, but make sure it's useless.
+        return 'MOCK _path_to_driver'
+
+    def baseline_search_path(self):
+        search_paths = {
+            'test-mac-snowleopard': ['test-mac-snowleopard'],
+            'test-mac-leopard': ['test-mac-leopard', 'test-mac-snowleopard'],
+            'test-win-win7': ['test-win-win7'],
+            'test-win-vista': ['test-win-vista', 'test-win-win7'],
+            'test-win-xp': ['test-win-xp', 'test-win-vista', 'test-win-win7'],
+            'test-linux-x86_64': ['test-linux', 'test-win-win7'],
+        }
+        return [self._webkit_baseline_path(d) for d in search_paths[self.name()]]
+
+    def default_child_processes(self):
+        return 1
+
+    def worker_startup_delay_secs(self):
+        return 0
+
+    def check_build(self, needs_http):
+        return True
+
+    def check_sys_deps(self, needs_http):
+        return True
+
+    def default_configuration(self):
+        return 'Release'
+
+    def diff_image(self, expected_contents, actual_contents, tolerance=None):
+        diffed = actual_contents != expected_contents
+        if not actual_contents and not expected_contents:
+            return (None, 0, None)
+        if not actual_contents or not expected_contents:
+            return (True, 0, None)
+        if 'ref' in expected_contents:
+            assert tolerance == 0
+        if diffed:
+            return ("< %s\n---\n> %s\n" % (expected_contents, actual_contents), 1, None)
+        return (None, 0, None)
+
+    def layout_tests_dir(self):
+        return LAYOUT_TEST_DIR
+
+    def perf_tests_dir(self):
+        return PERF_TEST_DIR
+
+    def webkit_base(self):
+        return '/test.checkout'
+
+    def skipped_layout_tests(self, test_list):
+        # This allows us to test the handling Skipped files, both with a test
+        # that actually passes, and a test that does fail.
+        return set(['failures/expected/skip_text.html',
+                    'failures/unexpected/skip_pass.html',
+                    'virtual/skipped'])
+
+    def name(self):
+        return self._name
+
+    def operating_system(self):
+        return self._operating_system
+
+    def _path_to_wdiff(self):
+        return None
+
+    def default_results_directory(self):
+        return '/tmp/layout-test-results'
+
+    def setup_test_run(self):
+        pass
+
+    def _driver_class(self):
+        return TestDriver
+
+    def start_http_server(self, additional_dirs=None, number_of_servers=None):
+        pass
+
+    def start_websocket_server(self):
+        pass
+
+    def acquire_http_lock(self):
+        pass
+
+    def stop_http_server(self):
+        pass
+
+    def stop_websocket_server(self):
+        pass
+
+    def release_http_lock(self):
+        pass
+
+    def _path_to_lighttpd(self):
+        return "/usr/sbin/lighttpd"
+
+    def _path_to_lighttpd_modules(self):
+        return "/usr/lib/lighttpd"
+
+    def _path_to_lighttpd_php(self):
+        return "/usr/bin/php-cgi"
+
+    def _path_to_apache(self):
+        return "/usr/sbin/httpd"
+
+    def _path_to_apache_config_file(self):
+        return self._filesystem.join(self.layout_tests_dir(), 'http', 'conf', 'httpd.conf')
+
+    def path_to_test_expectations_file(self):
+        return self._expectations_path
+
+    def all_test_configurations(self):
+        """Returns a sequence of the TestConfigurations the port supports."""
+        # By default, we assume we want to test every graphics type in
+        # every configuration on every system.
+        test_configurations = []
+        for version, architecture in self._all_systems():
+            for build_type in self._all_build_types():
+                test_configurations.append(TestConfiguration(
+                    version=version,
+                    architecture=architecture,
+                    build_type=build_type))
+        return test_configurations
+
+    def _all_systems(self):
+        return (('leopard', 'x86'),
+                ('snowleopard', 'x86'),
+                ('xp', 'x86'),
+                ('vista', 'x86'),
+                ('win7', 'x86'),
+                ('lucid', 'x86'),
+                ('lucid', 'x86_64'))
+
+    def _all_build_types(self):
+        return ('debug', 'release')
+
+    def configuration_specifier_macros(self):
+        """To avoid surprises when introducing new macros, these are intentionally fixed in time."""
+        return {'mac': ['leopard', 'snowleopard'], 'win': ['xp', 'vista', 'win7'], 'linux': ['lucid']}
+
+    def all_baseline_variants(self):
+        return self.ALL_BASELINE_VARIANTS
+
+    def virtual_test_suites(self):
+        return [
+            VirtualTestSuite('virtual/passes', 'passes', ['--virtual-arg']),
+            VirtualTestSuite('virtual/skipped', 'failures/expected', ['--virtual-arg2']),
+        ]
+
+
+class TestDriver(Driver):
+    """Test/Dummy implementation of the DumpRenderTree interface."""
+
+    def cmd_line(self, pixel_tests, per_test_args):
+        pixel_tests_flag = '-p' if pixel_tests else ''
+        return [self._port._path_to_driver()] + [pixel_tests_flag] + self._port.get_option('additional_drt_flag', []) + per_test_args
+
+    def run_test(self, test_input, stop_when_done):
+        start_time = time.time()
+        test_name = test_input.test_name
+        test_args = test_input.args or []
+        test = self._port._tests[test_name]
+        if test.keyboard:
+            raise KeyboardInterrupt
+        if test.exception:
+            raise ValueError('exception from ' + test_name)
+        if test.hang:
+            time.sleep((float(test_input.timeout) * 4) / 1000.0 + 1.0)  # The 1.0 comes from thread_padding_sec in layout_test_runnery.
+
+        audio = None
+        actual_text = test.actual_text
+
+        if 'flaky' in test_name and not test_name in self._port._flakes:
+            self._port._flakes.add(test_name)
+            actual_text = 'flaky text failure'
+
+        if actual_text and test_args and test_name == 'passes/args.html':
+            actual_text = actual_text + ' ' + ' '.join(test_args)
+
+        if test.actual_audio:
+            audio = base64.b64decode(test.actual_audio)
+        crashed_process_name = None
+        crashed_pid = None
+        if test.crash:
+            crashed_process_name = self._port.driver_name()
+            crashed_pid = 1
+        elif test.web_process_crash:
+            crashed_process_name = 'WebProcess'
+            crashed_pid = 2
+
+        crash_log = ''
+        if crashed_process_name:
+            crash_logs = CrashLogs(self._port.host)
+            crash_log = crash_logs.find_newest_log(crashed_process_name, None) or ''
+
+        if stop_when_done:
+            self.stop()
+
+        if test.actual_checksum == test_input.image_hash:
+            image = None
+        else:
+            image = test.actual_image
+        return DriverOutput(actual_text, image, test.actual_checksum, audio,
+            crash=test.crash or test.web_process_crash, crashed_process_name=crashed_process_name,
+            crashed_pid=crashed_pid, crash_log=crash_log,
+            test_time=time.time() - start_time, timeout=test.timeout, error=test.error)
+
+    def start(self, pixel_tests, per_test_args):
+        pass
+
+    def stop(self):
+        pass
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/win.py b/Tools/Scripts/webkitpy/layout_tests/port/win.py
new file mode 100644
index 0000000..ff473fe
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/port/win.py
@@ -0,0 +1,99 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the Google name nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import logging
+import re
+import sys
+
+from webkitpy.common.system.systemhost import SystemHost
+from webkitpy.common.system.executive import ScriptError, Executive
+from webkitpy.common.system.path import abspath_to_uri
+from webkitpy.layout_tests.port.apple import ApplePort
+
+
+_log = logging.getLogger(__name__)
+
+
+class WinPort(ApplePort):
+    port_name = "win"
+
+    VERSION_FALLBACK_ORDER = ["win-xp", "win-vista", "win-7sp0", "win-win7"]
+
+    ARCHITECTURES = ['x86']
+
+    def do_text_results_differ(self, expected_text, actual_text):
+        # Sanity was restored in WK2, so we don't need this hack there.
+        if self.get_option('webkit_test_runner'):
+            return ApplePort.do_text_results_differ(self, expected_text, actual_text)
+
+        # This is a hack (which dates back to ORWT).
+        # Windows does not have an EDITING DELEGATE, so we strip any EDITING DELEGATE
+        # messages to make more of the tests pass.
+        # It's possible more of the ports might want this and this could move down into WebKitPort.
+        delegate_regexp = re.compile("^EDITING DELEGATE: .*?\n", re.MULTILINE)
+        expected_text = delegate_regexp.sub("", expected_text)
+        actual_text = delegate_regexp.sub("", actual_text)
+        return expected_text != actual_text
+
+    def default_baseline_search_path(self):
+        if self._name.endswith(self.FUTURE_VERSION):
+            fallback_names = [self.port_name]
+        else:
+            fallback_names = self.VERSION_FALLBACK_ORDER[self.VERSION_FALLBACK_ORDER.index(self._name):-1] + [self.port_name]
+        # FIXME: The AppleWin port falls back to AppleMac for some results.  Eventually we'll have a shared 'apple' port.
+        if self.get_option('webkit_test_runner'):
+            fallback_names.insert(0, 'win-wk2')
+            fallback_names.append('mac-wk2')
+            # Note we do not add 'wk2' here, even though it's included in _skipped_search_paths().
+        # FIXME: Perhaps we should get this list from MacPort?
+        fallback_names.extend(['mac-lion', 'mac'])
+        return map(self._webkit_baseline_path, fallback_names)
+
+    def operating_system(self):
+        return 'win'
+
+    def show_results_html_file(self, results_filename):
+        self._run_script('run-safari', [abspath_to_uri(SystemHost().platform, results_filename)])
+
+    # FIXME: webkitperl/httpd.pm installs /usr/lib/apache/libphp4.dll on cycwin automatically
+    # as part of running old-run-webkit-tests.  That's bad design, but we may need some similar hack.
+    # We might use setup_environ_for_server for such a hack (or modify apache_http_server.py).
+
+    def _runtime_feature_list(self):
+        supported_features_command = [self._path_to_driver(), '--print-supported-features']
+        try:
+            output = self._executive.run_command(supported_features_command, error_handler=Executive.ignore_error)
+        except OSError, e:
+            _log.warn("Exception running driver: %s, %s.  Driver must be built before calling WebKitPort.test_expectations()." % (supported_features_command, e))
+            return None
+
+        # Note: win/DumpRenderTree.cpp does not print a leading space before the features_string.
+        match_object = re.match("SupportedFeatures:\s*(?P<features_string>.*)\s*", output)
+        if not match_object:
+            return None
+        return match_object.group('features_string').split(' ')
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/win_unittest.py b/Tools/Scripts/webkitpy/layout_tests/port/win_unittest.py
new file mode 100644
index 0000000..930dcd8
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/port/win_unittest.py
@@ -0,0 +1,109 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#    * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import StringIO
+import unittest
+
+from webkitpy.common.system.executive import ScriptError
+from webkitpy.common.system.executive_mock import MockExecutive, MockExecutive2
+from webkitpy.common.system.filesystem_mock import MockFileSystem
+from webkitpy.common.system.outputcapture import OutputCapture
+from webkitpy.common.system.systemhost_mock import MockSystemHost
+from webkitpy.layout_tests.port import port_testcase
+from webkitpy.layout_tests.port.win import WinPort
+from webkitpy.tool.mocktool import MockOptions
+
+
+class WinPortTest(port_testcase.PortTestCase):
+    os_name = 'win'
+    os_version = 'xp'
+    port_name = 'win-xp'
+    port_maker = WinPort
+
+    def test_show_results_html_file(self):
+        port = self.make_port()
+        port._executive = MockExecutive(should_log=True)
+        capture = OutputCapture()
+        capture.capture_output()
+        port.show_results_html_file('test.html')
+        _, stderr, _ = capture.restore_output()
+        # We can't know for sure what path will be produced by cygpath, but we can assert about
+        # everything else.
+        self.assertTrue(stderr.startswith("MOCK run_command: ['Tools/Scripts/run-safari', '--release', '"))
+        self.assertTrue(stderr.endswith("test.html'], cwd=/mock-checkout\n"))
+
+    def _assert_search_path(self, expected_search_paths, version, use_webkit2=False):
+        port = self.make_port(port_name='win', os_version=version, options=MockOptions(webkit_test_runner=use_webkit2))
+        absolute_search_paths = map(port._webkit_baseline_path, expected_search_paths)
+        self.assertEquals(port.baseline_search_path(), absolute_search_paths)
+
+    def test_baseline_search_path(self):
+        self._assert_search_path(['win-xp', 'win-vista', 'win-7sp0', 'win', 'mac-lion', 'mac'], 'xp')
+        self._assert_search_path(['win-vista', 'win-7sp0', 'win', 'mac-lion', 'mac'], 'vista')
+        self._assert_search_path(['win-7sp0', 'win', 'mac-lion', 'mac'], '7sp0')
+
+        self._assert_search_path(['win-wk2', 'win-xp', 'win-vista', 'win-7sp0', 'win', 'mac-wk2', 'mac-lion', 'mac'], 'xp', use_webkit2=True)
+        self._assert_search_path(['win-wk2', 'win-vista', 'win-7sp0', 'win', 'mac-wk2', 'mac-lion', 'mac'], 'vista', use_webkit2=True)
+        self._assert_search_path(['win-wk2', 'win-7sp0', 'win', 'mac-wk2', 'mac-lion', 'mac'], '7sp0', use_webkit2=True)
+
+    def _assert_version(self, port_name, expected_version):
+        host = MockSystemHost(os_name='win', os_version=expected_version)
+        port = WinPort(host, port_name=port_name)
+        self.assertEquals(port.version(), expected_version)
+
+    def test_versions(self):
+        self._assert_version('win-xp', 'xp')
+        self._assert_version('win-vista', 'vista')
+        self._assert_version('win-7sp0', '7sp0')
+        self.assertRaises(AssertionError, self._assert_version, 'win-me', 'xp')
+
+    def test_compare_text(self):
+        expected = "EDITING DELEGATE: webViewDidChangeSelection:WebViewDidChangeSelectionNotification\nfoo\nEDITING DELEGATE: webViewDidChangeSelection:WebViewDidChangeSelectionNotification\n"
+        port = self.make_port()
+        self.assertFalse(port.do_text_results_differ(expected, "foo\n"))
+        self.assertTrue(port.do_text_results_differ(expected, "foo"))
+        self.assertTrue(port.do_text_results_differ(expected, "bar"))
+
+        # This hack doesn't exist in WK2.
+        port._options = MockOptions(webkit_test_runner=True)
+        self.assertTrue(port.do_text_results_differ(expected, "foo\n"))
+
+    def test_operating_system(self):
+        self.assertEqual('win', self.make_port().operating_system())
+
+    def test_runtime_feature_list(self):
+        port = self.make_port()
+        port._executive.run_command = lambda command, cwd=None, error_handler=None: "Nonsense"
+        # runtime_features_list returns None when its results are meaningless (it couldn't run DRT or parse the output, etc.)
+        self.assertEquals(port._runtime_feature_list(), None)
+        port._executive.run_command = lambda command, cwd=None, error_handler=None: "SupportedFeatures:foo bar"
+        self.assertEquals(port._runtime_feature_list(), ['foo', 'bar'])
+
+    def test_expectations_files(self):
+        self.assertEquals(len(self.make_port().expectations_files()), 2)
+        self.assertEquals(len(self.make_port(options=MockOptions(webkit_test_runner=True)).expectations_files()), 4)
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/xvfbdriver.py b/Tools/Scripts/webkitpy/layout_tests/port/xvfbdriver.py
new file mode 100644
index 0000000..b98c039
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/port/xvfbdriver.py
@@ -0,0 +1,104 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the Google name nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import logging
+import os
+import re
+import time
+
+from webkitpy.layout_tests.port.server_process import ServerProcess
+from webkitpy.layout_tests.port.driver import Driver
+from webkitpy.common.system.file_lock import FileLock
+
+_log = logging.getLogger(__name__)
+
+
+class XvfbDriver(Driver):
+    def __init__(self, *args, **kwargs):
+        Driver.__init__(self, *args, **kwargs)
+        self._guard_lock = None
+        self._startup_delay_secs = 1.0
+
+    def _next_free_display(self):
+        running_pids = self._port.host.executive.run_command(['ps', '-eo', 'comm,command'])
+        reserved_screens = set()
+        for pid in running_pids.split('\n'):
+            match = re.match('(X|Xvfb|Xorg)\s+.*\s:(?P<screen_number>\d+)', pid)
+            if match:
+                reserved_screens.add(int(match.group('screen_number')))
+        for i in range(99):
+            if i not in reserved_screens:
+                _guard_lock_file = self._port.host.filesystem.join('/tmp', 'WebKitXvfb.lock.%i' % i)
+                self._guard_lock = FileLock(_guard_lock_file)
+                if self._guard_lock.acquire_lock():
+                    return i
+
+    def _start(self, pixel_tests, per_test_args):
+        # Use even displays for pixel tests and odd ones otherwise. When pixel tests are disabled,
+        # DriverProxy creates two drivers, one for normal and the other for ref tests. Both have
+        # the same worker number, so this prevents them from using the same Xvfb instance.
+        display_id = self._next_free_display()
+        self._lock_file = "/tmp/.X%d-lock" % display_id
+
+        run_xvfb = ["Xvfb", ":%d" % display_id, "-screen",  "0", "800x600x24", "-nolisten", "tcp"]
+        with open(os.devnull, 'w') as devnull:
+            self._xvfb_process = self._port.host.executive.popen(run_xvfb, stderr=devnull)
+
+        # Crashes intend to occur occasionally in the first few tests that are run through each
+        # worker because the Xvfb display isn't ready yet. Halting execution a bit should avoid that.
+        time.sleep(self._startup_delay_secs)
+
+        server_name = self._port.driver_name()
+        environment = self._port.setup_environ_for_server(server_name)
+        # We must do this here because the DISPLAY number depends on _worker_number
+        environment['DISPLAY'] = ":%d" % display_id
+        self._driver_tempdir = self._port._filesystem.mkdtemp(prefix='%s-' % self._port.driver_name())
+        environment['DUMPRENDERTREE_TEMP'] = str(self._driver_tempdir)
+        environment['LOCAL_RESOURCE_ROOT'] = self._port.layout_tests_dir()
+
+        # Currently on WebKit2, there is no API for setting the application
+        # cache directory. Each worker should have it's own and it should be
+        # cleaned afterwards, so we set it to inside the temporary folder by
+        # prepending XDG_CACHE_HOME with DUMPRENDERTREE_TEMP.
+        environment['XDG_CACHE_HOME'] = self._port.host.filesystem.join(str(self._driver_tempdir), 'appcache')
+
+        self._crashed_process_name = None
+        self._crashed_pid = None
+        self._server_process = self._port._server_process_constructor(self._port, server_name, self.cmd_line(pixel_tests, per_test_args), environment)
+        self._server_process.start()
+
+    def stop(self):
+        super(XvfbDriver, self).stop()
+        if self._guard_lock:
+            self._guard_lock.release_lock()
+            self._guard_lock = None
+        if getattr(self, '_xvfb_process', None):
+            self._port.host.executive.kill_process(self._xvfb_process.pid)
+            self._xvfb_process = None
+            if self._port.host.filesystem.exists(self._lock_file):
+                self._port.host.filesystem.remove(self._lock_file)
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/xvfbdriver_unittest.py b/Tools/Scripts/webkitpy/layout_tests/port/xvfbdriver_unittest.py
new file mode 100644
index 0000000..220dd35
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/port/xvfbdriver_unittest.py
@@ -0,0 +1,132 @@
+# Copyright (C) 2012 Zan Dobersek <zandobersek@gmail.com>
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#    * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from webkitpy.common.system.deprecated_logging import log
+from webkitpy.common.system.filesystem_mock import MockFileSystem
+from webkitpy.common.system.executive_mock import MockExecutive2
+from webkitpy.common.system.outputcapture import OutputCapture
+from webkitpy.common.system.systemhost_mock import MockSystemHost
+from webkitpy.layout_tests.port import Port
+from webkitpy.layout_tests.port.config_mock import MockConfig
+from webkitpy.layout_tests.port.server_process_mock import MockServerProcess
+from webkitpy.layout_tests.port.xvfbdriver import XvfbDriver
+
+
+class XvfbDriverTest(unittest.TestCase):
+    def make_driver(self, worker_number=0, xorg_running=False, executive=None):
+        port = Port(host=MockSystemHost(log_executive=True, executive=executive), config=MockConfig())
+        port._server_process_constructor = MockServerProcess
+        if xorg_running:
+            port._executive._running_pids['Xorg'] = 108
+
+        driver = XvfbDriver(port, worker_number=worker_number, pixel_tests=True)
+        driver._startup_delay_secs = 0
+        return driver
+
+    def cleanup_driver(self, driver):
+        # Setting _xvfb_process member to None is necessary as the Driver object is stopped on deletion,
+        # killing the Xvfb process if present. Thus, this method should only be called from tests that do not
+        # intend to test the behavior of XvfbDriver.stop.
+        driver._xvfb_process = None
+
+    def assertDriverStartSuccessful(self, driver, expected_stderr, expected_display, pixel_tests=False):
+        OutputCapture().assert_outputs(self, driver.start, [pixel_tests, []], expected_stderr=expected_stderr)
+        self.assertTrue(driver._server_process.started)
+        self.assertEqual(driver._server_process.env["DISPLAY"], expected_display)
+
+    def test_start_no_pixel_tests(self):
+        driver = self.make_driver()
+        expected_stderr = "MOCK run_command: ['ps', '-eo', 'comm,command'], cwd=None\nMOCK popen: ['Xvfb', ':0', '-screen', '0', '800x600x24', '-nolisten', 'tcp']\n"
+        self.assertDriverStartSuccessful(driver, expected_stderr=expected_stderr, expected_display=":0")
+        self.cleanup_driver(driver)
+
+    def test_start_pixel_tests(self):
+        driver = self.make_driver()
+        expected_stderr = "MOCK run_command: ['ps', '-eo', 'comm,command'], cwd=None\nMOCK popen: ['Xvfb', ':0', '-screen', '0', '800x600x24', '-nolisten', 'tcp']\n"
+        self.assertDriverStartSuccessful(driver, expected_stderr=expected_stderr, expected_display=":0", pixel_tests=True)
+        self.cleanup_driver(driver)
+
+    def test_start_arbitrary_worker_number(self):
+        driver = self.make_driver(worker_number=17)
+        expected_stderr = "MOCK run_command: ['ps', '-eo', 'comm,command'], cwd=None\nMOCK popen: ['Xvfb', ':0', '-screen', '0', '800x600x24', '-nolisten', 'tcp']\n"
+        self.assertDriverStartSuccessful(driver, expected_stderr=expected_stderr, expected_display=":0", pixel_tests=True)
+        self.cleanup_driver(driver)
+
+    def disabled_test_next_free_display(self):
+        output = "Xorg            /usr/bin/X :0 -auth /var/run/lightdm/root/:0 -nolisten tcp vt7 -novtswitch -background none\nXvfb            Xvfb :1 -screen 0 800x600x24 -nolisten tcp"
+        executive = MockExecutive2(output)
+        driver = self.make_driver(executive=executive)
+        self.assertEqual(driver._next_free_display(), 2)
+        self.cleanup_driver(driver)
+        output = "X               /usr/bin/X :0 vt7 -nolisten tcp -auth /var/run/xauth/A:0-8p7Ybb"
+        executive = MockExecutive2(output)
+        driver = self.make_driver(executive=executive)
+        self.assertEqual(driver._next_free_display(), 1)
+        self.cleanup_driver(driver)
+        output = "Xvfb            Xvfb :0 -screen 0 800x600x24 -nolisten tcp"
+        executive = MockExecutive2(output)
+        driver = self.make_driver(executive=executive)
+        self.assertEqual(driver._next_free_display(), 1)
+        self.cleanup_driver(driver)
+        output = "Xvfb            Xvfb :1 -screen 0 800x600x24 -nolisten tcp\nXvfb            Xvfb :0 -screen 0 800x600x24 -nolisten tcp\nXvfb            Xvfb :3 -screen 0 800x600x24 -nolisten tcp"
+        executive = MockExecutive2(output)
+        driver = self.make_driver(executive=executive)
+        self.assertEqual(driver._next_free_display(), 2)
+        self.cleanup_driver(driver)
+
+    def test_start_next_worker(self):
+        driver = self.make_driver()
+        driver._next_free_display = lambda: 0
+        expected_stderr = "MOCK popen: ['Xvfb', ':0', '-screen', '0', '800x600x24', '-nolisten', 'tcp']\n"
+        self.assertDriverStartSuccessful(driver, expected_stderr=expected_stderr, expected_display=":0", pixel_tests=True)
+        self.cleanup_driver(driver)
+        driver = self.make_driver()
+        driver._next_free_display = lambda: 3
+        expected_stderr = "MOCK popen: ['Xvfb', ':3', '-screen', '0', '800x600x24', '-nolisten', 'tcp']\n"
+        self.assertDriverStartSuccessful(driver, expected_stderr=expected_stderr, expected_display=":3", pixel_tests=True)
+        self.cleanup_driver(driver)
+
+    def test_stop(self):
+        filesystem = MockFileSystem(files={'/tmp/.X42-lock': '1234\n'})
+        port = Port(host=MockSystemHost(log_executive=True, filesystem=filesystem), config=MockConfig())
+        port._executive.kill_process = lambda x: log("MOCK kill_process pid: " + str(x))
+        driver = XvfbDriver(port, worker_number=0, pixel_tests=True)
+
+        class FakeXvfbProcess(object):
+            pid = 1234
+
+        driver._xvfb_process = FakeXvfbProcess()
+        driver._lock_file = '/tmp/.X42-lock'
+
+        expected_stderr = "MOCK kill_process pid: 1234\n"
+        OutputCapture().assert_outputs(self, driver.stop, [], expected_stderr=expected_stderr)
+
+        self.assertEqual(driver._xvfb_process, None)
+        self.assertFalse(port._filesystem.exists(driver._lock_file))
diff --git a/Tools/Scripts/webkitpy/layout_tests/reftests/__init__.py b/Tools/Scripts/webkitpy/layout_tests/reftests/__init__.py
new file mode 100644
index 0000000..ef65bee
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/reftests/__init__.py
@@ -0,0 +1 @@
+# Required for Python to search this directory for module files
diff --git a/Tools/Scripts/webkitpy/layout_tests/reftests/extract_reference_link.py b/Tools/Scripts/webkitpy/layout_tests/reftests/extract_reference_link.py
new file mode 100644
index 0000000..e21d73d
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/reftests/extract_reference_link.py
@@ -0,0 +1,63 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Utility module for reftests."""
+
+
+from HTMLParser import HTMLParser
+
+
+class ExtractReferenceLinkParser(HTMLParser):
+
+    def __init__(self):
+        HTMLParser.__init__(self)
+        self.matches = []
+        self.mismatches = []
+
+    def handle_starttag(self, tag, attrs):
+        if tag != "link":
+            return
+        attrs = dict(attrs)
+        if not "rel" in attrs:
+            return
+        if not "href" in attrs:
+            return
+        if attrs["rel"] == "match":
+            self.matches.append(attrs["href"])
+        if attrs["rel"] == "mismatch":
+            self.mismatches.append(attrs["href"])
+
+
+def get_reference_link(html_string):
+    """Returns reference links in the given html_string.
+
+    Returns:
+        a tuple of two URL lists, (matches, mismatches).
+    """
+    parser = ExtractReferenceLinkParser()
+    parser.feed(html_string)
+    parser.close()
+
+    return parser.matches, parser.mismatches
diff --git a/Tools/Scripts/webkitpy/layout_tests/reftests/extract_reference_link_unittest.py b/Tools/Scripts/webkitpy/layout_tests/reftests/extract_reference_link_unittest.py
new file mode 100644
index 0000000..717bc7c
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/reftests/extract_reference_link_unittest.py
@@ -0,0 +1,85 @@
+#!/usr/bin/env python
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from webkitpy.layout_tests.reftests import extract_reference_link
+
+
+class ExtractLinkMatchTest(unittest.TestCase):
+
+    def test_getExtractMatch(self):
+        html_1 = """<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"
+"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<title>CSS Test: DESCRIPTION OF TEST</title>
+<link rel="author" title="NAME_OF_AUTHOR"
+href="mailto:EMAIL OR http://CONTACT_PAGE"/>
+<link rel="help" href="RELEVANT_SPEC_SECTION"/>
+<link rel="match" href="green-box-ref.xht" />
+<link rel="match" href="blue-box-ref.xht" />
+<link rel="mismatch" href="red-box-notref.xht" />
+<link rel="mismatch" href="red-box-notref.xht" />
+<meta name="flags" content="TOKENS" />
+<meta name="assert" content="TEST ASSERTION"/>
+<style type="text/css"><![CDATA[
+CSS FOR TEST
+]]></style>
+</head>
+<body>
+CONTENT OF TEST
+</body>
+</html>
+"""
+        matches, mismatches = extract_reference_link.get_reference_link(html_1)
+        self.assertEqual(matches,
+                         ["green-box-ref.xht", "blue-box-ref.xht"])
+        self.assertEqual(mismatches,
+                         ["red-box-notref.xht", "red-box-notref.xht"])
+
+        html_2 = ""
+        empty_tuple_1 = extract_reference_link.get_reference_link(html_2)
+        self.assertEqual(empty_tuple_1, ([], []))
+
+        # Link does not have a "ref" attribute.
+        html_3 = """<link href="RELEVANT_SPEC_SECTION"/>"""
+        empty_tuple_2 = extract_reference_link.get_reference_link(html_3)
+        self.assertEqual(empty_tuple_2, ([], []))
+
+        # Link does not have a "href" attribute.
+        html_4 = """<link rel="match"/>"""
+        empty_tuple_3 = extract_reference_link.get_reference_link(html_4)
+        self.assertEqual(empty_tuple_3, ([], []))
+
+        # Link does not have a "/" at the end.
+        html_5 = """<link rel="help" href="RELEVANT_SPEC_SECTION">"""
+        empty_tuple_4 = extract_reference_link.get_reference_link(html_5)
+        self.assertEqual(empty_tuple_4, ([], []))
+
+
+if __name__ == "__main__":
+    unittest.main()
diff --git a/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests.py b/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests.py
new file mode 100755
index 0000000..1c8e732
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests.py
@@ -0,0 +1,495 @@
+#!/usr/bin/env python
+# Copyright (C) 2010 Google Inc. All rights reserved.
+# Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Szeged
+# Copyright (C) 2011 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import errno
+import logging
+import optparse
+import os
+import signal
+import sys
+import traceback
+
+from webkitpy.common.host import Host
+from webkitpy.common.system import stack_utils
+from webkitpy.layout_tests.controllers.manager import Manager, WorkerException, TestRunInterruptedException
+from webkitpy.layout_tests.models import test_expectations
+from webkitpy.layout_tests.port import configuration_options, platform_options
+from webkitpy.layout_tests.views import printing
+
+
+_log = logging.getLogger(__name__)
+
+
+# This mirrors what the shell normally does.
+INTERRUPTED_EXIT_STATUS = signal.SIGINT + 128
+
+# This is a randomly chosen exit code that can be tested against to
+# indicate that an unexpected exception occurred.
+EXCEPTIONAL_EXIT_STATUS = 254
+
+
+def lint(port, options):
+    host = port.host
+    if options.platform:
+        ports_to_lint = [port]
+    else:
+        ports_to_lint = [host.port_factory.get(name) for name in host.port_factory.all_port_names()]
+
+    files_linted = set()
+    lint_failed = False
+
+    for port_to_lint in ports_to_lint:
+        expectations_dict = port_to_lint.expectations_dict()
+        for expectations_file in expectations_dict.keys():
+            if expectations_file in files_linted:
+                continue
+
+            try:
+                test_expectations.TestExpectations(port_to_lint, expectations_to_lint={expectations_file: expectations_dict[expectations_file]})
+            except test_expectations.ParseError, e:
+                lint_failed = True
+                _log.error('')
+                for warning in e.warnings:
+                    _log.error(warning)
+                _log.error('')
+            files_linted.add(expectations_file)
+
+    if lint_failed:
+        _log.error('Lint failed.')
+        return -1
+    _log.info('Lint succeeded.')
+    return 0
+
+
+def run(port, options, args, regular_output=sys.stderr, buildbot_output=sys.stdout):
+    try:
+        warnings = _set_up_derived_options(port, options)
+
+        printer = printing.Printer(port, options, regular_output, buildbot_output, logger=logging.getLogger())
+
+        for warning in warnings:
+            _log.warning(warning)
+
+        if options.lint_test_files:
+            return lint(port, options)
+
+        # We wrap any parts of the run that are slow or likely to raise exceptions
+        # in a try/finally to ensure that we clean up the logging configuration.
+        unexpected_result_count = -1
+
+        manager = Manager(port, options, printer)
+        printer.print_config(port.results_directory())
+
+        unexpected_result_count = manager.run(args)
+        _log.debug("Testing completed, Exit status: %d" % unexpected_result_count)
+    except Exception:
+        exception_type, exception_value, exception_traceback = sys.exc_info()
+        if exception_type not in (KeyboardInterrupt, TestRunInterruptedException, WorkerException):
+            print >> sys.stderr, '\n%s raised: %s' % (exception_type.__name__, exception_value)
+            stack_utils.log_traceback(_log.error, exception_traceback)
+        raise
+    finally:
+        printer.cleanup()
+
+    return unexpected_result_count
+
+
+def _set_up_derived_options(port, options):
+    """Sets the options values that depend on other options values."""
+    # We return a list of warnings to print after the printer is initialized.
+    warnings = []
+
+    if not options.child_processes:
+        options.child_processes = os.environ.get("WEBKIT_TEST_CHILD_PROCESSES",
+                                                 str(port.default_child_processes()))
+    if not options.max_locked_shards:
+        options.max_locked_shards = int(os.environ.get("WEBKIT_TEST_MAX_LOCKED_SHARDS",
+                                                       str(port.default_max_locked_shards())))
+
+    if not options.configuration:
+        options.configuration = port.default_configuration()
+
+    if options.pixel_tests is None:
+        options.pixel_tests = port.default_pixel_tests()
+
+    if not options.time_out_ms:
+        options.time_out_ms = str(port.default_timeout_ms())
+
+    options.slow_time_out_ms = str(5 * int(options.time_out_ms))
+
+    if options.additional_platform_directory:
+        additional_platform_directories = []
+        for path in options.additional_platform_directory:
+            additional_platform_directories.append(port.host.filesystem.abspath(path))
+        options.additional_platform_directory = additional_platform_directories
+
+    if not options.http and options.skipped in ('ignore', 'only'):
+        warnings.append("--force/--skipped=%s overrides --no-http." % (options.skipped))
+        options.http = True
+
+    if options.ignore_metrics and (options.new_baseline or options.reset_results):
+        warnings.append("--ignore-metrics has no effect with --new-baselines or with --reset-results")
+
+    if options.new_baseline:
+        options.reset_results = True
+        options.add_platform_exceptions = True
+
+    if options.pixel_test_directories:
+        options.pixel_tests = True
+        varified_dirs = set()
+        pixel_test_directories = options.pixel_test_directories
+        for directory in pixel_test_directories:
+            # FIXME: we should support specifying the directories all the ways we support it for additional
+            # arguments specifying which tests and directories to run. We should also move the logic for that
+            # to Port.
+            filesystem = port.host.filesystem
+            if not filesystem.isdir(filesystem.join(port.layout_tests_dir(), directory)):
+                warnings.append("'%s' was passed to --pixel-test-directories, which doesn't seem to be a directory" % str(directory))
+            else:
+                varified_dirs.add(directory)
+
+        options.pixel_test_directories = list(varified_dirs)
+
+    if options.run_singly:
+        options.verbose = True
+
+    return warnings
+
+
+def _compat_shim_callback(option, opt_str, value, parser):
+    print "Ignoring unsupported option: %s" % opt_str
+
+
+def _compat_shim_option(option_name, **kwargs):
+    return optparse.make_option(option_name, action="callback",
+        callback=_compat_shim_callback,
+        help="Ignored, for old-run-webkit-tests compat only.", **kwargs)
+
+
+def parse_args(args=None):
+    """Provides a default set of command line args.
+
+    Returns a tuple of options, args from optparse"""
+
+    option_group_definitions = []
+
+    option_group_definitions.append(("Platform options", platform_options()))
+    option_group_definitions.append(("Configuration options", configuration_options()))
+    option_group_definitions.append(("Printing Options", printing.print_options()))
+
+    # FIXME: These options should move onto the ChromiumPort.
+    option_group_definitions.append(("Chromium-specific Options", [
+        optparse.make_option("--startup-dialog", action="store_true",
+            default=False, help="create a dialog on DumpRenderTree startup"),
+        optparse.make_option("--gp-fault-error-box", action="store_true",
+            default=False, help="enable Windows GP fault error box"),
+        optparse.make_option("--js-flags",
+            type="string", help="JavaScript flags to pass to tests"),
+        optparse.make_option("--stress-opt", action="store_true",
+            default=False,
+            help="Enable additional stress test to JavaScript optimization"),
+        optparse.make_option("--stress-deopt", action="store_true",
+            default=False,
+            help="Enable additional stress test to JavaScript optimization"),
+        optparse.make_option("--nocheck-sys-deps", action="store_true",
+            default=False,
+            help="Don't check the system dependencies (themes)"),
+        optparse.make_option("--accelerated-video",
+            action="store_true",
+            help="Use hardware-accelerated compositing for video"),
+        optparse.make_option("--no-accelerated-video",
+            action="store_false",
+            dest="accelerated_video",
+            help="Don't use hardware-accelerated compositing for video"),
+        optparse.make_option("--threaded-compositing",
+            action="store_true",
+            help="Use threaded compositing for rendering"),
+        optparse.make_option("--accelerated-2d-canvas",
+            action="store_true",
+            help="Use hardware-accelerated 2D Canvas calls"),
+        optparse.make_option("--no-accelerated-2d-canvas",
+            action="store_false",
+            dest="accelerated_2d_canvas",
+            help="Don't use hardware-accelerated 2D Canvas calls"),
+        optparse.make_option("--accelerated-painting",
+            action="store_true",
+            default=False,
+            help="Use hardware accelerated painting of composited pages"),
+        optparse.make_option("--per-tile-painting",
+            action="store_true",
+            help="Use per-tile painting of composited pages"),
+        optparse.make_option("--adb-device",
+            action="append", default=[],
+            help="Run Android layout tests on these devices."),
+    ]))
+
+    option_group_definitions.append(("EFL-specific Options", [
+        optparse.make_option("--webprocess-cmd-prefix", type="string",
+            default=False, help="Prefix used when spawning the Web process (Debug mode only)"),
+    ]))
+
+    option_group_definitions.append(("WebKit Options", [
+        optparse.make_option("--gc-between-tests", action="store_true", default=False,
+            help="Force garbage collection between each test"),
+        optparse.make_option("--complex-text", action="store_true", default=False,
+            help="Use the complex text code path for all text (Mac OS X and Windows only)"),
+        optparse.make_option("-l", "--leaks", action="store_true", default=False,
+            help="Enable leaks checking (Mac OS X only)"),
+        optparse.make_option("-g", "--guard-malloc", action="store_true", default=False,
+            help="Enable Guard Malloc (Mac OS X only)"),
+        optparse.make_option("--threaded", action="store_true", default=False,
+            help="Run a concurrent JavaScript thread with each test"),
+        optparse.make_option("--webkit-test-runner", "-2", action="store_true",
+            help="Use WebKitTestRunner rather than DumpRenderTree."),
+        # FIXME: We should merge this w/ --build-directory and only have one flag.
+        optparse.make_option("--root", action="store",
+            help="Path to a directory containing the executables needed to run tests."),
+    ]))
+
+    option_group_definitions.append(("ORWT Compatibility Options", [
+        # FIXME: Remove this option once the bots don't refer to it.
+        # results.html is smart enough to figure this out itself.
+        _compat_shim_option("--use-remote-links-to-tests"),
+    ]))
+
+    option_group_definitions.append(("Results Options", [
+        optparse.make_option("-p", "--pixel-tests", action="store_true",
+            dest="pixel_tests", help="Enable pixel-to-pixel PNG comparisons"),
+        optparse.make_option("--no-pixel-tests", action="store_false",
+            dest="pixel_tests", help="Disable pixel-to-pixel PNG comparisons"),
+        optparse.make_option("--no-sample-on-timeout", action="store_false",
+            dest="sample_on_timeout", help="Don't run sample on timeout (Mac OS X only)"),
+        optparse.make_option("--no-ref-tests", action="store_true",
+            dest="no_ref_tests", help="Skip all ref tests"),
+        optparse.make_option("--tolerance",
+            help="Ignore image differences less than this percentage (some "
+                "ports may ignore this option)", type="float"),
+        optparse.make_option("--results-directory", help="Location of test results"),
+        optparse.make_option("--build-directory",
+            help="Path to the directory under which build files are kept (should not include configuration)"),
+        optparse.make_option("--add-platform-exceptions", action="store_true", default=False,
+            help="Save generated results into the *most-specific-platform* directory rather than the *generic-platform* directory"),
+        optparse.make_option("--new-baseline", action="store_true",
+            default=False, help="Save generated results as new baselines "
+                 "into the *most-specific-platform* directory, overwriting whatever's "
+                 "already there. Equivalent to --reset-results --add-platform-exceptions"),
+        optparse.make_option("--reset-results", action="store_true",
+            default=False, help="Reset expectations to the "
+                 "generated results in their existing location."),
+        optparse.make_option("--no-new-test-results", action="store_false",
+            dest="new_test_results", default=True,
+            help="Don't create new baselines when no expected results exist"),
+
+        #FIXME: we should support a comma separated list with --pixel-test-directory as well.
+        optparse.make_option("--pixel-test-directory", action="append", default=[], dest="pixel_test_directories",
+            help="A directory where it is allowed to execute tests as pixel tests. "
+                 "Specify multiple times to add multiple directories. "
+                 "This option implies --pixel-tests. If specified, only those tests "
+                 "will be executed as pixel tests that are located in one of the "
+                 "directories enumerated with the option. Some ports may ignore this "
+                 "option while others can have a default value that can be overridden here."),
+
+        optparse.make_option("--skip-failing-tests", action="store_true",
+            default=False, help="Skip tests that are expected to fail. "
+                 "Note: When using this option, you might miss new crashes "
+                 "in these tests."),
+        optparse.make_option("--additional-drt-flag", action="append",
+            default=[], help="Additional command line flag to pass to DumpRenderTree "
+                 "Specify multiple times to add multiple flags."),
+        optparse.make_option("--driver-name", type="string",
+            help="Alternative DumpRenderTree binary to use"),
+        optparse.make_option("--additional-platform-directory", action="append",
+            default=[], help="Additional directory where to look for test "
+                 "baselines (will take precendence over platform baselines). "
+                 "Specify multiple times to add multiple search path entries."),
+        optparse.make_option("--additional-expectations", action="append", default=[],
+            help="Path to a test_expectations file that will override previous expectations. "
+                 "Specify multiple times for multiple sets of overrides."),
+        optparse.make_option("--compare-port", action="store", default=None,
+            help="Use the specified port's baselines first"),
+        optparse.make_option("--no-show-results", action="store_false",
+            default=True, dest="show_results",
+            help="Don't launch a browser with results after the tests "
+                 "are done"),
+        # FIXME: We should have a helper function to do this sort of
+        # deprectated mapping and automatically log, etc.
+        optparse.make_option("--noshow-results", action="store_false", dest="show_results", help="Deprecated, same as --no-show-results."),
+        optparse.make_option("--no-launch-safari", action="store_false", dest="show_results", help="Deprecated, same as --no-show-results."),
+        optparse.make_option("--full-results-html", action="store_true",
+            default=False,
+            help="Show all failures in results.html, rather than only regressions"),
+        optparse.make_option("--clobber-old-results", action="store_true",
+            default=False, help="Clobbers test results from previous runs."),
+        optparse.make_option("--no-record-results", action="store_false",
+            default=True, dest="record_results",
+            help="Don't record the results."),
+        optparse.make_option("--http", action="store_true", dest="http",
+            default=True, help="Run HTTP and WebSocket tests (default)"),
+        optparse.make_option("--no-http", action="store_false", dest="http",
+            help="Don't run HTTP and WebSocket tests"),
+        optparse.make_option("--ignore-metrics", action="store_true", dest="ignore_metrics",
+            default=False, help="Ignore rendering metrics related information from test "
+            "output, only compare the structure of the rendertree."),
+    ]))
+
+    option_group_definitions.append(("Testing Options", [
+        optparse.make_option("--build", dest="build",
+            action="store_true", default=True,
+            help="Check to ensure the DumpRenderTree build is up-to-date "
+                 "(default)."),
+        optparse.make_option("--no-build", dest="build",
+            action="store_false", help="Don't check to see if the "
+                                       "DumpRenderTree build is up-to-date."),
+        optparse.make_option("-n", "--dry-run", action="store_true",
+            default=False,
+            help="Do everything but actually run the tests or upload results."),
+        optparse.make_option("--wrapper",
+            help="wrapper command to insert before invocations of "
+                 "DumpRenderTree; option is split on whitespace before "
+                 "running. (Example: --wrapper='valgrind --smc-check=all')"),
+        optparse.make_option("-i", "--ignore-tests", action="append", default=[],
+            help="directories or test to ignore (may specify multiple times)"),
+        optparse.make_option("--test-list", action="append",
+            help="read list of tests to run from file", metavar="FILE"),
+        optparse.make_option("--skipped", action="store", default="default",
+            help=("control how tests marked SKIP are run. "
+                 "'default' == Skip tests unless explicitly listed on the command line, "
+                 "'ignore' == Run them anyway, "
+                 "'only' == only run the SKIP tests, "
+                 "'always' == always skip, even if listed on the command line.")),
+        optparse.make_option("--force", dest="skipped", action="store_const", const='ignore',
+            help="Run all tests, even those marked SKIP in the test list (same as --skipped=ignore)"),
+        optparse.make_option("--time-out-ms",
+            help="Set the timeout for each test"),
+        optparse.make_option("--randomize-order", action="store_true",
+            default=False, help=("Run tests in random order (useful "
+                                "for tracking down corruption)")),
+        optparse.make_option("--run-chunk",
+            help=("Run a specified chunk (n:l), the nth of len l, "
+                 "of the layout tests")),
+        optparse.make_option("--run-part", help=("Run a specified part (n:m), "
+                  "the nth of m parts, of the layout tests")),
+        optparse.make_option("--batch-size",
+            help=("Run a the tests in batches (n), after every n tests, "
+                  "DumpRenderTree is relaunched."), type="int", default=None),
+        optparse.make_option("--run-singly", action="store_true",
+            default=False, help="run a separate DumpRenderTree for each test (implies --verbose)"),
+        optparse.make_option("--child-processes",
+            help="Number of DumpRenderTrees to run in parallel."),
+        # FIXME: Display default number of child processes that will run.
+        optparse.make_option("-f", "--fully-parallel", action="store_true",
+            help="run all tests in parallel"),
+        optparse.make_option("--exit-after-n-failures", type="int", default=None,
+            help="Exit after the first N failures instead of running all "
+            "tests"),
+        optparse.make_option("--exit-after-n-crashes-or-timeouts", type="int",
+            default=None, help="Exit after the first N crashes instead of "
+            "running all tests"),
+        optparse.make_option("--iterations", type="int", default=1, help="Number of times to run the set of tests (e.g. ABCABCABC)"),
+        optparse.make_option("--repeat-each", type="int", default=1, help="Number of times to run each test (e.g. AAABBBCCC)"),
+        optparse.make_option("--retry-failures", action="store_true",
+            default=True,
+            help="Re-try any tests that produce unexpected results (default)"),
+        optparse.make_option("--no-retry-failures", action="store_false",
+            dest="retry_failures",
+            help="Don't re-try any tests that produce unexpected results."),
+        optparse.make_option("--max-locked-shards", type="int", default=0,
+            help="Set the maximum number of locked shards"),
+        optparse.make_option("--additional-env-var", type="string", action="append", default=[],
+            help="Passes that environment variable to the tests (--additional-env-var=NAME=VALUE)"),
+    ]))
+
+    option_group_definitions.append(("Miscellaneous Options", [
+        optparse.make_option("--lint-test-files", action="store_true",
+        default=False, help=("Makes sure the test files parse for all "
+                            "configurations. Does not run any tests.")),
+    ]))
+
+    # FIXME: Move these into json_results_generator.py
+    option_group_definitions.append(("Result JSON Options", [
+        optparse.make_option("--master-name", help="The name of the buildbot master."),
+        optparse.make_option("--builder-name", default="",
+            help=("The name of the builder shown on the waterfall running "
+                  "this script e.g. WebKit.")),
+        optparse.make_option("--build-name", default="DUMMY_BUILD_NAME",
+            help=("The name of the builder used in its path, e.g. "
+                  "webkit-rel.")),
+        optparse.make_option("--build-number", default="DUMMY_BUILD_NUMBER",
+            help=("The build number of the builder running this script.")),
+        optparse.make_option("--test-results-server", default="",
+            help=("If specified, upload results json files to this appengine "
+                  "server.")),
+    ]))
+
+    option_parser = optparse.OptionParser()
+
+    for group_name, group_options in option_group_definitions:
+        option_group = optparse.OptionGroup(option_parser, group_name)
+        option_group.add_options(group_options)
+        option_parser.add_option_group(option_group)
+
+    return option_parser.parse_args(args)
+
+
+def main(argv=None):
+    try:
+        options, args = parse_args(argv)
+        if options.platform and 'test' in options.platform:
+            # It's a bit lame to import mocks into real code, but this allows the user
+            # to run tests against the test platform interactively, which is useful for
+            # debugging test failures.
+            from webkitpy.common.host_mock import MockHost
+            host = MockHost()
+        else:
+            host = Host()
+        port = host.port_factory.get(options.platform, options)
+    except NotImplementedError, e:
+        # FIXME: is this the best way to handle unsupported port names?
+        print >> sys.stderr, str(e)
+        return EXCEPTIONAL_EXIT_STATUS
+    except Exception, e:
+        print >> sys.stderr, '\n%s raised: %s' % (e.__class__.__name__, str(e))
+        traceback.print_exc(file=sys.stderr)
+        raise
+
+    logging.getLogger().setLevel(logging.DEBUG if options.debug_rwt_logging else logging.INFO)
+    return run(port, options, args)
+
+
+if '__main__' == __name__:
+    try:
+        return_code = main()
+    except BaseException, e:
+        if e.__class__ in (KeyboardInterrupt, TestRunInterruptedException):
+            sys.exit(INTERRUPTED_EXIT_STATUS)
+        sys.exit(EXCEPTIONAL_EXIT_STATUS)
+
+    sys.exit(return_code)
diff --git a/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests_integrationtest.py b/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests_integrationtest.py
new file mode 100755
index 0000000..8e35747
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests_integrationtest.py
@@ -0,0 +1,1091 @@
+#!/usr/bin/python
+# Copyright (C) 2010 Google Inc. All rights reserved.
+# Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Szeged
+# Copyright (C) 2011 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import codecs
+import itertools
+import json
+import logging
+import os
+import platform
+import Queue
+import re
+import StringIO
+import sys
+import thread
+import time
+import threading
+import unittest
+
+from webkitpy.common.system import outputcapture, path
+from webkitpy.common.system.crashlogs_unittest import make_mock_crash_report_darwin
+from webkitpy.common.system.systemhost import SystemHost
+from webkitpy.common.host import Host
+from webkitpy.common.host_mock import MockHost
+
+from webkitpy.layout_tests import port
+from webkitpy.layout_tests import run_webkit_tests
+from webkitpy.layout_tests.controllers.manager import WorkerException
+from webkitpy.layout_tests.port import Port
+from webkitpy.layout_tests.port.test import TestPort, TestDriver
+from webkitpy.test.skip import skip_if
+from webkitpy.tool.mocktool import MockOptions
+
+
+def parse_args(extra_args=None, record_results=False, tests_included=False, new_results=False, print_nothing=True):
+    extra_args = extra_args or []
+    args = []
+    if not '--platform' in extra_args:
+        args.extend(['--platform', 'test'])
+    if not record_results:
+        args.append('--no-record-results')
+    if not new_results:
+        args.append('--no-new-test-results')
+
+    if not '--child-processes' in extra_args:
+        args.extend(['--child-processes', 1])
+    args.extend(extra_args)
+    if not tests_included:
+        # We use the glob to test that globbing works.
+        args.extend(['passes',
+                     'http/tests',
+                     'websocket/tests',
+                     'failures/expected/*'])
+    return run_webkit_tests.parse_args(args)
+
+
+def passing_run(extra_args=None, port_obj=None, record_results=False, tests_included=False, host=None, shared_port=True):
+    options, parsed_args = parse_args(extra_args, record_results, tests_included)
+    if not port_obj:
+        host = host or MockHost()
+        port_obj = host.port_factory.get(port_name=options.platform, options=options)
+
+    if shared_port:
+        port_obj.host.port_factory.get = lambda *args, **kwargs: port_obj
+
+    buildbot_output = StringIO.StringIO()
+    regular_output = StringIO.StringIO()
+    res = run_webkit_tests.run(port_obj, options, parsed_args, buildbot_output=buildbot_output, regular_output=regular_output)
+    return res == 0
+
+
+def logging_run(extra_args=None, port_obj=None, record_results=False, tests_included=False, host=None, new_results=False, shared_port=True):
+    options, parsed_args = parse_args(extra_args=extra_args,
+                                      record_results=record_results,
+                                      tests_included=tests_included,
+                                      print_nothing=False, new_results=new_results)
+    host = host or MockHost()
+    if not port_obj:
+        port_obj = host.port_factory.get(port_name=options.platform, options=options)
+
+    res, buildbot_output, regular_output = run_and_capture(port_obj, options, parsed_args, shared_port)
+    return (res, buildbot_output, regular_output, host.user)
+
+
+def run_and_capture(port_obj, options, parsed_args, shared_port=True):
+    if shared_port:
+        port_obj.host.port_factory.get = lambda *args, **kwargs: port_obj
+    oc = outputcapture.OutputCapture()
+    try:
+        oc.capture_output()
+        buildbot_output = StringIO.StringIO()
+        regular_output = StringIO.StringIO()
+        res = run_webkit_tests.run(port_obj, options, parsed_args,
+                                   buildbot_output=buildbot_output,
+                                   regular_output=regular_output)
+    finally:
+        oc.restore_output()
+    return (res, buildbot_output, regular_output)
+
+
+def get_tests_run(extra_args=None, tests_included=False, flatten_batches=False,
+                  host=None, include_reference_html=False):
+    extra_args = extra_args or []
+    if not tests_included:
+        # Not including http tests since they get run out of order (that
+        # behavior has its own test, see test_get_test_file_queue)
+        extra_args = ['passes', 'failures'] + extra_args
+    options, parsed_args = parse_args(extra_args, tests_included=True)
+
+    host = host or MockHost()
+    test_batches = []
+
+    class RecordingTestDriver(TestDriver):
+        def __init__(self, port, worker_number):
+            TestDriver.__init__(self, port, worker_number, pixel_tests=port.get_option('pixel_test'), no_timeout=False)
+            self._current_test_batch = None
+
+        def start(self):
+            pass
+
+        def stop(self):
+            self._current_test_batch = None
+
+        def run_test(self, test_input, stop_when_done):
+            if self._current_test_batch is None:
+                self._current_test_batch = []
+                test_batches.append(self._current_test_batch)
+            test_name = test_input.test_name
+            # In case of reftest, one test calls the driver's run_test() twice.
+            # We should not add a reference html used by reftests to tests unless include_reference_html parameter
+            # is explicitly given.
+            filesystem = self._port.host.filesystem
+            dirname, filename = filesystem.split(test_name)
+            if include_reference_html or not Port.is_reference_html_file(filesystem, dirname, filename):
+                self._current_test_batch.append(test_name)
+            return TestDriver.run_test(self, test_input, stop_when_done)
+
+    class RecordingTestPort(TestPort):
+        def create_driver(self, worker_number):
+            return RecordingTestDriver(self, worker_number)
+
+    recording_port = RecordingTestPort(host, options=options)
+    run_and_capture(recording_port, options, parsed_args)
+
+    if flatten_batches:
+        return list(itertools.chain(*test_batches))
+
+    return test_batches
+
+
+# Update this magic number if you add an unexpected test to webkitpy.layout_tests.port.test
+# FIXME: It's nice to have a routine in port/test.py that returns this number.
+unexpected_failures = 12
+unexpected_tests_count = unexpected_failures + 4
+
+
+class StreamTestingMixin(object):
+    def assertContains(self, stream, string):
+        self.assertTrue(string in stream.getvalue())
+
+    def assertEmpty(self, stream):
+        self.assertFalse(stream.getvalue())
+
+    def assertNotEmpty(self, stream):
+        self.assertTrue(stream.getvalue())
+
+
+class LintTest(unittest.TestCase, StreamTestingMixin):
+    def test_all_configurations(self):
+
+        class FakePort(object):
+            def __init__(self, host, name, path):
+                self.host = host
+                self.name = name
+                self.path = path
+
+            def test_configuration(self):
+                return None
+
+            def expectations_dict(self):
+                self.host.ports_parsed.append(self.name)
+                return {self.path: ''}
+
+            def skipped_layout_tests(self, tests):
+                return set([])
+
+            def all_test_configurations(self):
+                return []
+
+            def configuration_specifier_macros(self):
+                return []
+
+            def path_from_webkit_base(self):
+                return ''
+
+            def get_option(self, name, val):
+                return val
+
+        class FakeFactory(object):
+            def __init__(self, host, ports):
+                self.host = host
+                self.ports = {}
+                for port in ports:
+                    self.ports[port.name] = port
+
+            def get(self, port_name, *args, **kwargs):
+                return self.ports[port_name]
+
+            def all_port_names(self):
+                return sorted(self.ports.keys())
+
+        host = MockHost()
+        host.ports_parsed = []
+        host.port_factory = FakeFactory(host, (FakePort(host, 'a', 'path-to-a'),
+                                               FakePort(host, 'b', 'path-to-b'),
+                                               FakePort(host, 'b-win', 'path-to-b')))
+
+        self.assertEquals(run_webkit_tests.lint(host.port_factory.ports['a'], MockOptions(platform=None)), 0)
+        self.assertEquals(host.ports_parsed, ['a', 'b', 'b-win'])
+
+        host.ports_parsed = []
+        self.assertEquals(run_webkit_tests.lint(host.port_factory.ports['a'], MockOptions(platform='a')), 0)
+        self.assertEquals(host.ports_parsed, ['a'])
+
+    def test_lint_test_files(self):
+        res, out, err, user = logging_run(['--lint-test-files'])
+        self.assertEqual(res, 0)
+        self.assertEmpty(out)
+        self.assertContains(err, 'Lint succeeded')
+
+    def test_lint_test_files__errors(self):
+        options, parsed_args = parse_args(['--lint-test-files'])
+        host = MockHost()
+        port_obj = host.port_factory.get(options.platform, options=options)
+        port_obj.expectations_dict = lambda: {'': '-- syntax error'}
+        res, out, err = run_and_capture(port_obj, options, parsed_args)
+
+        self.assertEqual(res, -1)
+        self.assertEmpty(out)
+        self.assertTrue(any(['Lint failed' in msg for msg in err.buflist]))
+
+        # ensure we lint *all* of the files in the cascade.
+        port_obj.expectations_dict = lambda: {'foo': '-- syntax error1', 'bar': '-- syntax error2'}
+        res, out, err = run_and_capture(port_obj, options, parsed_args)
+
+        self.assertEqual(res, -1)
+        self.assertEmpty(out)
+        self.assertTrue(any(['foo:1' in msg for msg in err.buflist]))
+        self.assertTrue(any(['bar:1' in msg for msg in err.buflist]))
+
+
+class MainTest(unittest.TestCase, StreamTestingMixin):
+    def setUp(self):
+        # A real PlatformInfo object is used here instead of a
+        # MockPlatformInfo because we need to actually check for
+        # Windows and Mac to skip some tests.
+        self._platform = SystemHost().platform
+
+        # FIXME: Remove this when we fix test-webkitpy to work
+        # properly on cygwin (bug 63846).
+        self.should_test_processes = not self._platform.is_win()
+
+    def test_accelerated_compositing(self):
+        # This just tests that we recognize the command line args
+        self.assertTrue(passing_run(['--accelerated-video']))
+        self.assertTrue(passing_run(['--no-accelerated-video']))
+
+    def test_accelerated_2d_canvas(self):
+        # This just tests that we recognize the command line args
+        self.assertTrue(passing_run(['--accelerated-2d-canvas']))
+        self.assertTrue(passing_run(['--no-accelerated-2d-canvas']))
+
+    def test_all(self):
+        res, out, err, user = logging_run([], tests_included=True)
+        self.assertEquals(res, unexpected_tests_count)
+
+    def test_basic(self):
+        self.assertTrue(passing_run())
+
+    def test_batch_size(self):
+        batch_tests_run = get_tests_run(['--batch-size', '2'])
+        for batch in batch_tests_run:
+            self.assertTrue(len(batch) <= 2, '%s had too many tests' % ', '.join(batch))
+
+    def test_max_locked_shards(self):
+        # Tests for the default of using one locked shard even in the case of more than one child process.
+        if not self.should_test_processes:
+            return
+        save_env_webkit_test_max_locked_shards = None
+        if "WEBKIT_TEST_MAX_LOCKED_SHARDS" in os.environ:
+            save_env_webkit_test_max_locked_shards = os.environ["WEBKIT_TEST_MAX_LOCKED_SHARDS"]
+            del os.environ["WEBKIT_TEST_MAX_LOCKED_SHARDS"]
+        _, _, regular_output, _ = logging_run(['--debug-rwt-logging', '--child-processes', '2'], shared_port=False)
+        try:
+            self.assertTrue(any(['(1 locked)' in line for line in regular_output.buflist]))
+        finally:
+            if save_env_webkit_test_max_locked_shards:
+                os.environ["WEBKIT_TEST_MAX_LOCKED_SHARDS"] = save_env_webkit_test_max_locked_shards
+
+    def test_child_processes_2(self):
+        if self.should_test_processes:
+            _, _, regular_output, _ = logging_run(
+                ['--debug-rwt-logging', '--child-processes', '2'], shared_port=False)
+            self.assertTrue(any(['Running 2 ' in line for line in regular_output.buflist]))
+
+    def test_child_processes_min(self):
+        if self.should_test_processes:
+            _, _, regular_output, _ = logging_run(
+                ['--debug-rwt-logging', '--child-processes', '2', '-i', 'passes/passes', 'passes'],
+                tests_included=True, shared_port=False)
+            self.assertTrue(any(['Running 1 ' in line for line in regular_output.buflist]))
+
+    def test_dryrun(self):
+        batch_tests_run = get_tests_run(['--dry-run'])
+        self.assertEqual(batch_tests_run, [])
+
+        batch_tests_run = get_tests_run(['-n'])
+        self.assertEqual(batch_tests_run, [])
+
+    def test_exception_raised(self):
+        # Exceptions raised by a worker are treated differently depending on
+        # whether they are in-process or out. inline exceptions work as normal,
+        # which allows us to get the full stack trace and traceback from the
+        # worker. The downside to this is that it could be any error, but this
+        # is actually useful in testing.
+        #
+        # Exceptions raised in a separate process are re-packaged into
+        # WorkerExceptions, which have a string capture of the stack which can
+        # be printed, but don't display properly in the unit test exception handlers.
+        self.assertRaises(ValueError, logging_run,
+            ['failures/expected/exception.html', '--child-processes', '1'], tests_included=True)
+
+        if self.should_test_processes:
+            self.assertRaises(WorkerException, logging_run,
+                ['--child-processes', '2', '--force', 'failures/expected/exception.html', 'passes/text.html'], tests_included=True, shared_port=False)
+
+    def test_full_results_html(self):
+        # FIXME: verify html?
+        res, out, err, user = logging_run(['--full-results-html'])
+        self.assertEqual(res, 0)
+
+    def test_hung_thread(self):
+        res, out, err, user = logging_run(['--run-singly', '--time-out-ms=50',
+                                          'failures/expected/hang.html'],
+                                          tests_included=True)
+        # Note that hang.html is marked as WontFix and all WontFix tests are
+        # expected to Pass, so that actually running them generates an "unexpected" error.
+        self.assertEqual(res, 1)
+        self.assertNotEmpty(out)
+        self.assertNotEmpty(err)
+
+    def test_keyboard_interrupt(self):
+        # Note that this also tests running a test marked as SKIP if
+        # you specify it explicitly.
+        self.assertRaises(KeyboardInterrupt, logging_run,
+            ['failures/expected/keyboard.html', '--child-processes', '1'],
+            tests_included=True)
+
+        if self.should_test_processes:
+            self.assertRaises(KeyboardInterrupt, logging_run,
+                ['failures/expected/keyboard.html', 'passes/text.html', '--child-processes', '2', '--force'], tests_included=True, shared_port=False)
+
+    def test_no_tests_found(self):
+        res, out, err, user = logging_run(['resources'], tests_included=True)
+        self.assertEqual(res, -1)
+        self.assertEmpty(out)
+        self.assertContains(err, 'No tests to run.\n')
+
+    def test_no_tests_found_2(self):
+        res, out, err, user = logging_run(['foo'], tests_included=True)
+        self.assertEqual(res, -1)
+        self.assertEmpty(out)
+        self.assertContains(err, 'No tests to run.\n')
+
+    def test_randomize_order(self):
+        # FIXME: verify order was shuffled
+        self.assertTrue(passing_run(['--randomize-order']))
+
+    def test_gc_between_tests(self):
+        self.assertTrue(passing_run(['--gc-between-tests']))
+
+    def test_complex_text(self):
+        self.assertTrue(passing_run(['--complex-text']))
+
+    def test_threaded(self):
+        self.assertTrue(passing_run(['--threaded']))
+
+    def test_repeat_each(self):
+        tests_to_run = ['passes/image.html', 'passes/text.html']
+        tests_run = get_tests_run(['--repeat-each', '2'] + tests_to_run, tests_included=True, flatten_batches=True)
+        self.assertEquals(tests_run, ['passes/image.html', 'passes/image.html', 'passes/text.html', 'passes/text.html'])
+
+    def test_ignore_flag(self):
+        # Note that passes/image.html is expected to be run since we specified it directly.
+        tests_run = get_tests_run(['-i', 'passes', 'passes/image.html'], flatten_batches=True, tests_included=True)
+        self.assertFalse('passes/text.html' in tests_run)
+        self.assertTrue('passes/image.html' in tests_run)
+
+    def test_skipped_flag(self):
+        tests_run = get_tests_run(['passes'], tests_included=True, flatten_batches=True)
+        self.assertFalse('passes/skipped/skip.html' in tests_run)
+        num_tests_run_by_default = len(tests_run)
+
+        # Check that nothing changes when we specify skipped=default.
+        self.assertEquals(len(get_tests_run(['--skipped=default', 'passes'], tests_included=True, flatten_batches=True)),
+                          num_tests_run_by_default)
+
+        # Now check that we run one more test (the skipped one).
+        tests_run = get_tests_run(['--skipped=ignore', 'passes'], tests_included=True, flatten_batches=True)
+        self.assertTrue('passes/skipped/skip.html' in tests_run)
+        self.assertEquals(len(tests_run), num_tests_run_by_default + 1)
+
+        # Now check that we only run the skipped test.
+        self.assertEquals(get_tests_run(['--skipped=only', 'passes'], tests_included=True, flatten_batches=True),
+                          ['passes/skipped/skip.html'])
+
+        # Now check that we don't run anything.
+        self.assertEquals(get_tests_run(['--skipped=always', 'passes/skipped/skip.html'], tests_included=True, flatten_batches=True),
+                          [])
+
+    def test_iterations(self):
+        tests_to_run = ['passes/image.html', 'passes/text.html']
+        tests_run = get_tests_run(['--iterations', '2'] + tests_to_run, tests_included=True, flatten_batches=True)
+        self.assertEquals(tests_run, ['passes/image.html', 'passes/text.html', 'passes/image.html', 'passes/text.html'])
+
+    def test_repeat_each_iterations_num_tests(self):
+        # The total number of tests should be: number_of_tests *
+        # repeat_each * iterations
+        host = MockHost()
+        res, out, err, _ = logging_run(['--iterations', '2',
+                                        '--repeat-each', '4',
+                                        '--debug-rwt-logging',
+                                        'passes/text.html', 'failures/expected/text.html'],
+                                       tests_included=True, host=host, record_results=True)
+        self.assertContains(out, "=> Results: 8/16 tests passed (50.0%)\n")
+        self.assertContains(err, "All 16 tests ran as expected.\n")
+
+    def test_run_chunk(self):
+        # Test that we actually select the right chunk
+        all_tests_run = get_tests_run(flatten_batches=True)
+        chunk_tests_run = get_tests_run(['--run-chunk', '1:4'], flatten_batches=True)
+        self.assertEquals(all_tests_run[4:8], chunk_tests_run)
+
+        # Test that we wrap around if the number of tests is not evenly divisible by the chunk size
+        tests_to_run = ['passes/error.html', 'passes/image.html', 'passes/platform_image.html', 'passes/text.html']
+        chunk_tests_run = get_tests_run(['--run-chunk', '1:3'] + tests_to_run, tests_included=True, flatten_batches=True)
+        self.assertEquals(['passes/text.html', 'passes/error.html', 'passes/image.html'], chunk_tests_run)
+
+    def test_run_force(self):
+        # This raises an exception because we run
+        # failures/expected/exception.html, which is normally SKIPped.
+
+        # See also the comments in test_exception_raised() about ValueError vs. WorkerException.
+        self.assertRaises(ValueError, logging_run, ['--force'])
+
+    def test_run_part(self):
+        # Test that we actually select the right part
+        tests_to_run = ['passes/error.html', 'passes/image.html', 'passes/platform_image.html', 'passes/text.html']
+        tests_run = get_tests_run(['--run-part', '1:2'] + tests_to_run, tests_included=True, flatten_batches=True)
+        self.assertEquals(['passes/error.html', 'passes/image.html'], tests_run)
+
+        # Test that we wrap around if the number of tests is not evenly divisible by the chunk size
+        # (here we end up with 3 parts, each with 2 tests, and we only have 4 tests total, so the
+        # last part repeats the first two tests).
+        chunk_tests_run = get_tests_run(['--run-part', '3:3'] + tests_to_run, tests_included=True, flatten_batches=True)
+        self.assertEquals(['passes/error.html', 'passes/image.html'], chunk_tests_run)
+
+    def test_run_singly(self):
+        batch_tests_run = get_tests_run(['--run-singly'])
+        for batch in batch_tests_run:
+            self.assertEquals(len(batch), 1, '%s had too many tests' % ', '.join(batch))
+
+    def test_skip_failing_tests(self):
+        # This tests that we skip both known failing and known flaky tests. Because there are
+        # no known flaky tests in the default test_expectations, we add additional expectations.
+        host = MockHost()
+        host.filesystem.write_text_file('/tmp/overrides.txt', 'Bug(x) passes/image.html [ ImageOnlyFailure Pass ]\n')
+
+        batches = get_tests_run(['--skip-failing-tests', '--additional-expectations', '/tmp/overrides.txt'], host=host)
+        has_passes_text = False
+        for batch in batches:
+            self.assertFalse('failures/expected/text.html' in batch)
+            self.assertFalse('passes/image.html' in batch)
+            has_passes_text = has_passes_text or ('passes/text.html' in batch)
+        self.assertTrue(has_passes_text)
+
+    def test_run_singly_actually_runs_tests(self):
+        res, _, _, _ = logging_run(['--run-singly', 'failures/unexpected'])
+        self.assertEquals(res, unexpected_failures)
+
+    def test_single_file(self):
+        # FIXME: We should consider replacing more of the get_tests_run()-style tests
+        # with tests that read the tests_run* files, like this one.
+        host = MockHost()
+        tests_run = passing_run(['passes/text.html'], tests_included=True, host=host)
+        self.assertEquals(host.filesystem.read_text_file('/tmp/layout-test-results/tests_run0.txt'),
+                          'passes/text.html\n')
+
+    def test_single_file_with_prefix(self):
+        tests_run = get_tests_run(['LayoutTests/passes/text.html'], tests_included=True, flatten_batches=True)
+        self.assertEquals(['passes/text.html'], tests_run)
+
+    def test_single_skipped_file(self):
+        tests_run = get_tests_run(['failures/expected/keybaord.html'], tests_included=True, flatten_batches=True)
+        self.assertEquals([], tests_run)
+
+    def test_stderr_is_saved(self):
+        host = MockHost()
+        self.assertTrue(passing_run(host=host))
+        self.assertEquals(host.filesystem.read_text_file('/tmp/layout-test-results/passes/error-stderr.txt'),
+                          'stuff going to stderr')
+
+    def test_test_list(self):
+        host = MockHost()
+        filename = '/tmp/foo.txt'
+        host.filesystem.write_text_file(filename, 'passes/text.html')
+        tests_run = get_tests_run(['--test-list=%s' % filename], tests_included=True, flatten_batches=True, host=host)
+        self.assertEquals(['passes/text.html'], tests_run)
+        host.filesystem.remove(filename)
+        res, out, err, user = logging_run(['--test-list=%s' % filename],
+                                          tests_included=True, host=host)
+        self.assertEqual(res, -1)
+        self.assertNotEmpty(err)
+
+    def test_test_list_with_prefix(self):
+        host = MockHost()
+        filename = '/tmp/foo.txt'
+        host.filesystem.write_text_file(filename, 'LayoutTests/passes/text.html')
+        tests_run = get_tests_run(['--test-list=%s' % filename], tests_included=True, flatten_batches=True, host=host)
+        self.assertEquals(['passes/text.html'], tests_run)
+
+    def test_unexpected_failures(self):
+        # Run tests including the unexpected failures.
+        self._url_opened = None
+        res, out, err, user = logging_run(tests_included=True)
+
+        self.assertEqual(res, unexpected_tests_count)
+        self.assertNotEmpty(out)
+        self.assertNotEmpty(err)
+        self.assertEqual(user.opened_urls, [path.abspath_to_uri(MockHost().platform, '/tmp/layout-test-results/results.html')])
+
+    def test_missing_and_unexpected_results(self):
+        # Test that we update expectations in place. If the expectation
+        # is missing, update the expected generic location.
+        host = MockHost()
+        res, out, err, _ = logging_run(['--no-show-results',
+            'failures/expected/missing_image.html',
+            'failures/unexpected/missing_text.html',
+            'failures/unexpected/text-image-checksum.html'],
+            tests_included=True, host=host, record_results=True)
+        file_list = host.filesystem.written_files.keys()
+        file_list.remove('/tmp/layout-test-results/tests_run0.txt')
+        self.assertEquals(res, 1)
+        expected_token = '"unexpected":{"text-image-checksum.html":{"expected":"PASS","actual":"IMAGE+TEXT","image_diff_percent":1},"missing_text.html":{"expected":"PASS","is_missing_text":true,"actual":"MISSING"}'
+        json_string = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
+        self.assertTrue(json_string.find(expected_token) != -1)
+        self.assertTrue(json_string.find('"num_regressions":1') != -1)
+        self.assertTrue(json_string.find('"num_flaky":0') != -1)
+        self.assertTrue(json_string.find('"num_missing":1') != -1)
+
+    def test_pixel_test_directories(self):
+        host = MockHost()
+
+        """Both tests have faling checksum. We include only the first in pixel tests so only that should fail."""
+        args = ['--pixel-tests', '--pixel-test-directory', 'failures/unexpected/pixeldir',
+                'failures/unexpected/pixeldir/image_in_pixeldir.html',
+                'failures/unexpected/image_not_in_pixeldir.html']
+        res, out, err, _ = logging_run(extra_args=args, host=host, record_results=True, tests_included=True)
+
+        self.assertEquals(res, 1)
+        expected_token = '"unexpected":{"pixeldir":{"image_in_pixeldir.html":{"expected":"PASS","actual":"IMAGE"'
+        json_string = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
+        self.assertTrue(json_string.find(expected_token) != -1)
+
+    def test_missing_and_unexpected_results_with_custom_exit_code(self):
+        # Test that we update expectations in place. If the expectation
+        # is missing, update the expected generic location.
+        class CustomExitCodePort(TestPort):
+            def exit_code_from_summarized_results(self, unexpected_results):
+                return unexpected_results['num_regressions'] + unexpected_results['num_missing']
+
+        host = MockHost()
+        options, parsed_args = run_webkit_tests.parse_args(['--pixel-tests', '--no-new-test-results'])
+        test_port = CustomExitCodePort(host, options=options)
+        res, out, err, _ = logging_run(['--no-show-results',
+            'failures/expected/missing_image.html',
+            'failures/unexpected/missing_text.html',
+            'failures/unexpected/text-image-checksum.html'],
+            tests_included=True, host=host, record_results=True, port_obj=test_port)
+        self.assertEquals(res, 2)
+
+    def test_crash_with_stderr(self):
+        host = MockHost()
+        res, buildbot_output, regular_output, user = logging_run([
+                'failures/unexpected/crash-with-stderr.html',
+            ],
+            tests_included=True,
+            record_results=True,
+            host=host)
+        self.assertTrue(host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json').find('{"crash-with-stderr.html":{"expected":"PASS","actual":"CRASH","has_stderr":true}}') != -1)
+
+    def test_no_image_failure_with_image_diff(self):
+        host = MockHost()
+        res, buildbot_output, regular_output, user = logging_run([
+                'failures/unexpected/checksum-with-matching-image.html',
+            ],
+            tests_included=True,
+            record_results=True,
+            host=host)
+        self.assertTrue(host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json').find('"num_regressions":0') != -1)
+
+    def test_crash_log(self):
+        # FIXME: Need to rewrite these tests to not be mac-specific, or move them elsewhere.
+        # Currently CrashLog uploading only works on Darwin.
+        if not self._platform.is_mac():
+            return
+        mock_crash_report = make_mock_crash_report_darwin('DumpRenderTree', 12345)
+        host = MockHost()
+        host.filesystem.write_text_file('/Users/mock/Library/Logs/DiagnosticReports/DumpRenderTree_2011-06-13-150719_quadzen.crash', mock_crash_report)
+        res, buildbot_output, regular_output, user = logging_run([
+                'failures/unexpected/crash-with-stderr.html',
+            ],
+            tests_included=True,
+            record_results=True,
+            host=host)
+        expected_crash_log = mock_crash_report
+        self.assertEquals(host.filesystem.read_text_file('/tmp/layout-test-results/failures/unexpected/crash-with-stderr-crash-log.txt'), expected_crash_log)
+
+    def test_web_process_crash_log(self):
+        # FIXME: Need to rewrite these tests to not be mac-specific, or move them elsewhere.
+        # Currently CrashLog uploading only works on Darwin.
+        if not self._platform.is_mac():
+            return
+        mock_crash_report = make_mock_crash_report_darwin('WebProcess', 12345)
+        host = MockHost()
+        host.filesystem.write_text_file('/Users/mock/Library/Logs/DiagnosticReports/WebProcess_2011-06-13-150719_quadzen.crash', mock_crash_report)
+        res, buildbot_output, regular_output, user = logging_run([
+                'failures/unexpected/web-process-crash-with-stderr.html',
+            ],
+            tests_included=True,
+            record_results=True,
+            host=host)
+        self.assertEquals(host.filesystem.read_text_file('/tmp/layout-test-results/failures/unexpected/web-process-crash-with-stderr-crash-log.txt'), mock_crash_report)
+
+    def test_exit_after_n_failures_upload(self):
+        host = MockHost()
+        res, buildbot_output, regular_output, user = logging_run([
+                'failures/unexpected/text-image-checksum.html',
+                'passes/text.html',
+                '--exit-after-n-failures', '1',
+            ],
+            tests_included=True,
+            record_results=True,
+            host=host)
+
+        # By returning False, we know that the incremental results were generated and then deleted.
+        self.assertFalse(host.filesystem.exists('/tmp/layout-test-results/incremental_results.json'))
+
+        # This checks that we report only the number of tests that actually failed.
+        self.assertEquals(res, 1)
+
+        # This checks that passes/text.html is considered SKIPped.
+        self.assertTrue('"skipped":1' in host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json'))
+
+        # This checks that we told the user we bailed out.
+        self.assertTrue('Exiting early after 1 failures. 1 tests run.\n' in regular_output.getvalue())
+
+        # This checks that neither test ran as expected.
+        # FIXME: This log message is confusing; tests that were skipped should be called out separately.
+        self.assertTrue('0 tests ran as expected, 2 didn\'t:\n' in regular_output.getvalue())
+
+    def test_exit_after_n_failures(self):
+        # Unexpected failures should result in tests stopping.
+        tests_run = get_tests_run([
+                'failures/unexpected/text-image-checksum.html',
+                'passes/text.html',
+                '--exit-after-n-failures', '1',
+            ],
+            tests_included=True,
+            flatten_batches=True)
+        self.assertEquals(['failures/unexpected/text-image-checksum.html'], tests_run)
+
+        # But we'll keep going for expected ones.
+        tests_run = get_tests_run([
+                'failures/expected/text.html',
+                'passes/text.html',
+                '--exit-after-n-failures', '1',
+            ],
+            tests_included=True,
+            flatten_batches=True)
+        self.assertEquals(['failures/expected/text.html', 'passes/text.html'], tests_run)
+
+    def test_exit_after_n_crashes(self):
+        # Unexpected crashes should result in tests stopping.
+        tests_run = get_tests_run([
+                'failures/unexpected/crash.html',
+                'passes/text.html',
+                '--exit-after-n-crashes-or-timeouts', '1',
+            ],
+            tests_included=True,
+            flatten_batches=True)
+        self.assertEquals(['failures/unexpected/crash.html'], tests_run)
+
+        # Same with timeouts.
+        tests_run = get_tests_run([
+                'failures/unexpected/timeout.html',
+                'passes/text.html',
+                '--exit-after-n-crashes-or-timeouts', '1',
+            ],
+            tests_included=True,
+            flatten_batches=True)
+        self.assertEquals(['failures/unexpected/timeout.html'], tests_run)
+
+        # But we'll keep going for expected ones.
+        tests_run = get_tests_run([
+                'failures/expected/crash.html',
+                'passes/text.html',
+                '--exit-after-n-crashes-or-timeouts', '1',
+            ],
+            tests_included=True,
+            flatten_batches=True)
+        self.assertEquals(['failures/expected/crash.html', 'passes/text.html'], tests_run)
+
+    def test_results_directory_absolute(self):
+        # We run a configuration that should fail, to generate output, then
+        # look for what the output results url was.
+
+        host = MockHost()
+        with host.filesystem.mkdtemp() as tmpdir:
+            res, out, err, user = logging_run(['--results-directory=' + str(tmpdir)],
+                                              tests_included=True, host=host)
+            self.assertEqual(user.opened_urls, [path.abspath_to_uri(host.platform, host.filesystem.join(tmpdir, 'results.html'))])
+
+    def test_results_directory_default(self):
+        # We run a configuration that should fail, to generate output, then
+        # look for what the output results url was.
+
+        # This is the default location.
+        res, out, err, user = logging_run(tests_included=True)
+        self.assertEqual(user.opened_urls, [path.abspath_to_uri(MockHost().platform, '/tmp/layout-test-results/results.html')])
+
+    def test_results_directory_relative(self):
+        # We run a configuration that should fail, to generate output, then
+        # look for what the output results url was.
+        host = MockHost()
+        host.filesystem.maybe_make_directory('/tmp/cwd')
+        host.filesystem.chdir('/tmp/cwd')
+        res, out, err, user = logging_run(['--results-directory=foo'],
+                                          tests_included=True, host=host)
+        self.assertEqual(user.opened_urls, [path.abspath_to_uri(host.platform, '/tmp/cwd/foo/results.html')])
+
+    def test_retrying_and_flaky_tests(self):
+        host = MockHost()
+        res, out, err, _ = logging_run(['--debug-rwt-logging', 'failures/flaky'], tests_included=True, host=host)
+        self.assertEquals(res, 0)
+        self.assertTrue('Retrying' in err.getvalue())
+        self.assertTrue('Unexpected flakiness' in out.getvalue())
+        self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/failures/flaky/text-actual.txt'))
+        self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/retries/tests_run0.txt'))
+        self.assertFalse(host.filesystem.exists('/tmp/layout-test-results/retries/failures/flaky/text-actual.txt'))
+
+        # Now we test that --clobber-old-results does remove the old entries and the old retries,
+        # and that we don't retry again.
+        host = MockHost()
+        res, out, err, _ = logging_run(['--no-retry-failures', '--clobber-old-results', 'failures/flaky'], tests_included=True, host=host)
+        self.assertEquals(res, 1)
+        self.assertTrue('Clobbering old results' in err.getvalue())
+        self.assertTrue('flaky/text.html' in err.getvalue())
+        self.assertTrue('Unexpected text-only failures' in out.getvalue())
+        self.assertFalse('Unexpected flakiness' in out.getvalue())
+        self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/failures/flaky/text-actual.txt'))
+        self.assertFalse(host.filesystem.exists('retries'))
+
+    def test_run_order__inline(self):
+        # These next tests test that we run the tests in ascending alphabetical
+        # order per directory. HTTP tests are sharded separately from other tests,
+        # so we have to test both.
+        tests_run = get_tests_run(['-i', 'passes/passes', 'passes'], tests_included=True, flatten_batches=True)
+        self.assertEquals(tests_run, sorted(tests_run))
+
+        tests_run = get_tests_run(['http/tests/passes'], tests_included=True, flatten_batches=True)
+        self.assertEquals(tests_run, sorted(tests_run))
+
+    def test_tolerance(self):
+        class ImageDiffTestPort(TestPort):
+            def diff_image(self, expected_contents, actual_contents, tolerance=None):
+                self.tolerance_used_for_diff_image = self._options.tolerance
+                return (True, 1, None)
+
+        def get_port_for_run(args):
+            options, parsed_args = run_webkit_tests.parse_args(args)
+            host = MockHost()
+            test_port = ImageDiffTestPort(host, options=options)
+            res = passing_run(args, port_obj=test_port, tests_included=True)
+            self.assertTrue(res)
+            return test_port
+
+        base_args = ['--pixel-tests', '--no-new-test-results', 'failures/expected/*']
+
+        # If we pass in an explicit tolerance argument, then that will be used.
+        test_port = get_port_for_run(base_args + ['--tolerance', '.1'])
+        self.assertEqual(0.1, test_port.tolerance_used_for_diff_image)
+        test_port = get_port_for_run(base_args + ['--tolerance', '0'])
+        self.assertEqual(0, test_port.tolerance_used_for_diff_image)
+
+        # Otherwise the port's default tolerance behavior (including ignoring it)
+        # should be used.
+        test_port = get_port_for_run(base_args)
+        self.assertEqual(None, test_port.tolerance_used_for_diff_image)
+
+    def test_virtual(self):
+        self.assertTrue(passing_run(['passes/text.html', 'passes/args.html',
+                                     'virtual/passes/text.html', 'virtual/passes/args.html']))
+
+    def test_reftest_run(self):
+        tests_run = get_tests_run(['passes/reftest.html'], tests_included=True, flatten_batches=True)
+        self.assertEquals(['passes/reftest.html'], tests_run)
+
+    def test_reftest_run_reftests_if_pixel_tests_are_disabled(self):
+        tests_run = get_tests_run(['--no-pixel-tests', 'passes/reftest.html'], tests_included=True, flatten_batches=True)
+        self.assertEquals(['passes/reftest.html'], tests_run)
+
+    def test_reftest_skip_reftests_if_no_ref_tests(self):
+        tests_run = get_tests_run(['--no-ref-tests', 'passes/reftest.html'], tests_included=True, flatten_batches=True)
+        self.assertEquals([], tests_run)
+        tests_run = get_tests_run(['--no-ref-tests', '--no-pixel-tests', 'passes/reftest.html'], tests_included=True, flatten_batches=True)
+        self.assertEquals([], tests_run)
+
+    def test_reftest_expected_html_should_be_ignored(self):
+        tests_run = get_tests_run(['passes/reftest-expected.html'], tests_included=True, flatten_batches=True)
+        self.assertEquals([], tests_run)
+
+    def test_reftest_driver_should_run_expected_html(self):
+        tests_run = get_tests_run(['passes/reftest.html'], tests_included=True, flatten_batches=True, include_reference_html=True)
+        self.assertEquals(['passes/reftest.html', 'passes/reftest-expected.html'], tests_run)
+
+    def test_reftest_driver_should_run_expected_mismatch_html(self):
+        tests_run = get_tests_run(['passes/mismatch.html'], tests_included=True, flatten_batches=True, include_reference_html=True)
+        self.assertEquals(['passes/mismatch.html', 'passes/mismatch-expected-mismatch.html'], tests_run)
+
+    def test_reftest_should_not_use_naming_convention_if_not_listed_in_reftestlist(self):
+        host = MockHost()
+        res, out, err, _ = logging_run(['--no-show-results', 'reftests/foo/'], tests_included=True, host=host, record_results=True)
+        json_string = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
+        self.assertTrue(json_string.find('"unlistedtest.html":{"expected":"PASS","is_missing_text":true,"actual":"MISSING","is_missing_image":true}') != -1)
+        self.assertTrue(json_string.find('"num_regressions":4') != -1)
+        self.assertTrue(json_string.find('"num_flaky":0') != -1)
+        self.assertTrue(json_string.find('"num_missing":1') != -1)
+
+    def test_additional_platform_directory(self):
+        self.assertTrue(passing_run(['--additional-platform-directory', '/tmp/foo']))
+        self.assertTrue(passing_run(['--additional-platform-directory', '/tmp/../foo']))
+        self.assertTrue(passing_run(['--additional-platform-directory', '/tmp/foo', '--additional-platform-directory', '/tmp/bar']))
+        self.assertTrue(passing_run(['--additional-platform-directory', 'foo']))
+
+    def test_additional_expectations(self):
+        host = MockHost()
+        host.filesystem.write_text_file('/tmp/overrides.txt', 'Bug(x) failures/unexpected/mismatch.html [ ImageOnlyFailure ]\n')
+        self.assertTrue(passing_run(['--additional-expectations', '/tmp/overrides.txt', 'failures/unexpected/mismatch.html'],
+                                     tests_included=True, host=host))
+
+    def test_no_http_and_force(self):
+        # See test_run_force, using --force raises an exception.
+        # FIXME: We would like to check the warnings generated.
+        self.assertRaises(ValueError, logging_run, ['--force', '--no-http'])
+
+    @staticmethod
+    def has_test_of_type(tests, type):
+        return [test for test in tests if type in test]
+
+    def test_no_http_tests(self):
+        batch_tests_dryrun = get_tests_run(['LayoutTests/http', 'websocket/'], flatten_batches=True)
+        self.assertTrue(MainTest.has_test_of_type(batch_tests_dryrun, 'http'))
+        self.assertTrue(MainTest.has_test_of_type(batch_tests_dryrun, 'websocket'))
+
+        batch_tests_run_no_http = get_tests_run(['--no-http', 'LayoutTests/http', 'websocket/'], flatten_batches=True)
+        self.assertFalse(MainTest.has_test_of_type(batch_tests_run_no_http, 'http'))
+        self.assertFalse(MainTest.has_test_of_type(batch_tests_run_no_http, 'websocket'))
+
+        batch_tests_run_http = get_tests_run(['--http', 'LayoutTests/http', 'websocket/'], flatten_batches=True)
+        self.assertTrue(MainTest.has_test_of_type(batch_tests_run_http, 'http'))
+        self.assertTrue(MainTest.has_test_of_type(batch_tests_run_http, 'websocket'))
+
+    def test_platform_tests_are_found(self):
+        tests_run = get_tests_run(['--platform', 'test-mac-leopard', 'http'], tests_included=True, flatten_batches=True)
+        self.assertTrue('platform/test-mac-leopard/http/test.html' in tests_run)
+        self.assertFalse('platform/test-win-win7/http/test.html' in tests_run)
+
+    def test_output_diffs(self):
+        # Test to ensure that we don't generate -wdiff.html or -pretty.html if wdiff and PrettyPatch
+        # aren't available.
+        host = MockHost()
+        res, out, err, _ = logging_run(['--pixel-tests', 'failures/unexpected/text-image-checksum.html'],
+                                       tests_included=True, record_results=True, host=host)
+        written_files = host.filesystem.written_files
+        self.assertTrue(any(path.endswith('-diff.txt') for path in written_files.keys()))
+        self.assertFalse(any(path.endswith('-wdiff.html') for path in written_files.keys()))
+        self.assertFalse(any(path.endswith('-pretty-diff.html') for path in written_files.keys()))
+
+        full_results_text = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
+        full_results = json.loads(full_results_text.replace("ADD_RESULTS(", "").replace(");", ""))
+        self.assertEquals(full_results['has_wdiff'], False)
+        self.assertEquals(full_results['has_pretty_patch'], False)
+
+    def test_unsupported_platform(self):
+        oc = outputcapture.OutputCapture()
+        try:
+            oc.capture_output()
+            res = run_webkit_tests.main(['--platform', 'foo'])
+        finally:
+            stdout, stderr, logs = oc.restore_output()
+
+        self.assertEquals(res, run_webkit_tests.EXCEPTIONAL_EXIT_STATUS)
+        self.assertEquals(stdout, '')
+        self.assertTrue('unsupported platform' in stderr)
+
+        # This is empty because we don't even get a chance to configure the logger before failing.
+        self.assertEquals(logs, '')
+
+    def test_verbose_in_child_processes(self):
+        # When we actually run multiple processes, we may have to reconfigure logging in the
+        # child process (e.g., on win32) and we need to make sure that works and we still
+        # see the verbose log output. However, we can't use logging_run() because using
+        # outputcapture to capture stdout and stderr latter results in a nonpicklable host.
+
+        # Test is flaky on Windows: https://bugs.webkit.org/show_bug.cgi?id=98559
+        if not self.should_test_processes:
+            return
+
+        options, parsed_args = parse_args(['--verbose', '--fully-parallel', '--child-processes', '2', 'passes/text.html', 'passes/image.html'], tests_included=True, print_nothing=False)
+        host = MockHost()
+        port_obj = host.port_factory.get(port_name=options.platform, options=options)
+        buildbot_output = StringIO.StringIO()
+        regular_output = StringIO.StringIO()
+        res = run_webkit_tests.run(port_obj, options, parsed_args, buildbot_output=buildbot_output, regular_output=regular_output)
+        self.assertTrue('text.html passed' in regular_output.getvalue())
+        self.assertTrue('image.html passed' in regular_output.getvalue())
+
+
+class EndToEndTest(unittest.TestCase):
+    def parse_full_results(self, full_results_text):
+        json_to_eval = full_results_text.replace("ADD_RESULTS(", "").replace(");", "")
+        compressed_results = json.loads(json_to_eval)
+        return compressed_results
+
+    def test_end_to_end(self):
+        host = MockHost()
+        res, out, err, user = logging_run(record_results=True, tests_included=True, host=host)
+
+        self.assertEquals(res, unexpected_tests_count)
+        results = self.parse_full_results(host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json'))
+
+        # Check to ensure we're passing back image diff %age correctly.
+        self.assertEquals(results['tests']['failures']['expected']['image.html']['image_diff_percent'], 1)
+
+        # Check that we attempted to display the results page in a browser.
+        self.assertTrue(user.opened_urls)
+
+    def test_reftest_with_two_notrefs(self):
+        # Test that we update expectations in place. If the expectation
+        # is missing, update the expected generic location.
+        host = MockHost()
+        res, out, err, _ = logging_run(['--no-show-results', 'reftests/foo/'], tests_included=True, host=host, record_results=True)
+        file_list = host.filesystem.written_files.keys()
+        file_list.remove('/tmp/layout-test-results/tests_run0.txt')
+        json_string = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
+        json = self.parse_full_results(json_string)
+        self.assertTrue("multiple-match-success.html" not in json["tests"]["reftests"]["foo"])
+        self.assertTrue("multiple-mismatch-success.html" not in json["tests"]["reftests"]["foo"])
+        self.assertTrue("multiple-both-success.html" not in json["tests"]["reftests"]["foo"])
+        self.assertEqual(json["tests"]["reftests"]["foo"]["multiple-match-failure.html"],
+            {"expected": "PASS", "actual": "IMAGE", "reftest_type": ["=="], "image_diff_percent": 1})
+        self.assertEqual(json["tests"]["reftests"]["foo"]["multiple-mismatch-failure.html"],
+            {"expected": "PASS", "actual": "IMAGE", "reftest_type": ["!="]})
+        self.assertEqual(json["tests"]["reftests"]["foo"]["multiple-both-failure.html"],
+            {"expected": "PASS", "actual": "IMAGE", "reftest_type": ["==", "!="]})
+
+
+class RebaselineTest(unittest.TestCase, StreamTestingMixin):
+    def assertBaselines(self, file_list, file, extensions, err):
+        "assert that the file_list contains the baselines."""
+        for ext in extensions:
+            baseline = file + "-expected" + ext
+            baseline_msg = 'Writing new expected result "%s"\n' % baseline
+            self.assertTrue(any(f.find(baseline) != -1 for f in file_list))
+            self.assertContains(err, baseline_msg)
+
+    # FIXME: Add tests to ensure that we're *not* writing baselines when we're not
+    # supposed to be.
+
+    def test_reset_results(self):
+        # Test that we update expectations in place. If the expectation
+        # is missing, update the expected generic location.
+        host = MockHost()
+        res, out, err, _ = logging_run(['--pixel-tests',
+                        '--reset-results',
+                        'passes/image.html',
+                        'failures/expected/missing_image.html'],
+                        tests_included=True, host=host, new_results=True)
+        file_list = host.filesystem.written_files.keys()
+        file_list.remove('/tmp/layout-test-results/tests_run0.txt')
+        self.assertEquals(res, 0)
+        self.assertEmpty(out)
+        self.assertEqual(len(file_list), 4)
+        self.assertBaselines(file_list, "passes/image", [".txt", ".png"], err)
+        self.assertBaselines(file_list, "failures/expected/missing_image", [".txt", ".png"], err)
+
+    def test_missing_results(self):
+        # Test that we update expectations in place. If the expectation
+        # is missing, update the expected generic location.
+        host = MockHost()
+        res, out, err, _ = logging_run(['--no-show-results',
+                     'failures/unexpected/missing_text.html',
+                     'failures/unexpected/missing_image.html',
+                     'failures/unexpected/missing_audio.html',
+                     'failures/unexpected/missing_render_tree_dump.html'],
+                     tests_included=True, host=host, new_results=True)
+        file_list = host.filesystem.written_files.keys()
+        file_list.remove('/tmp/layout-test-results/tests_run0.txt')
+        self.assertEquals(res, 0)
+        self.assertNotEmpty(out)
+        self.assertEqual(len(file_list), 6)
+        self.assertBaselines(file_list, "failures/unexpected/missing_text", [".txt"], err)
+        self.assertBaselines(file_list, "platform/test/failures/unexpected/missing_image", [".png"], err)
+        self.assertBaselines(file_list, "platform/test/failures/unexpected/missing_render_tree_dump", [".txt"], err)
+
+    def test_new_baseline(self):
+        # Test that we update the platform expectations in the version-specific directories
+        # for both existing and new baselines.
+        host = MockHost()
+        res, out, err, _ = logging_run(['--pixel-tests',
+                        '--new-baseline',
+                        'passes/image.html',
+                        'failures/expected/missing_image.html'],
+                    tests_included=True, host=host, new_results=True)
+        file_list = host.filesystem.written_files.keys()
+        file_list.remove('/tmp/layout-test-results/tests_run0.txt')
+        self.assertEquals(res, 0)
+        self.assertEmpty(out)
+        self.assertEqual(len(file_list), 4)
+        self.assertBaselines(file_list,
+            "platform/test-mac-leopard/passes/image", [".txt", ".png"], err)
+        self.assertBaselines(file_list,
+            "platform/test-mac-leopard/failures/expected/missing_image", [".txt", ".png"], err)
+
+
+class PortTest(unittest.TestCase):
+    def assert_mock_port_works(self, port_name, args=[]):
+        self.assertTrue(passing_run(args + ['--platform', 'mock-' + port_name, 'fast/harness/results.html'], tests_included=True, host=Host()))
+
+    def disabled_test_chromium_mac_lion(self):
+        self.assert_mock_port_works('chromium-mac-lion')
+
+    def disabled_test_chromium_mac_lion_in_test_shell_mode(self):
+        self.assert_mock_port_works('chromium-mac-lion', args=['--additional-drt-flag=--test-shell'])
+
+    def disabled_test_qt_linux(self):
+        self.assert_mock_port_works('qt-linux')
+
+    def disabled_test_mac_lion(self):
+        self.assert_mock_port_works('mac-lion')
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/Tools/Scripts/webkitpy/layout_tests/servers/__init__.py b/Tools/Scripts/webkitpy/layout_tests/servers/__init__.py
new file mode 100644
index 0000000..ef65bee
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/servers/__init__.py
@@ -0,0 +1 @@
+# Required for Python to search this directory for module files
diff --git a/Tools/Scripts/webkitpy/layout_tests/servers/apache_http_server.py b/Tools/Scripts/webkitpy/layout_tests/servers/apache_http_server.py
new file mode 100644
index 0000000..7dede92
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/servers/apache_http_server.py
@@ -0,0 +1,193 @@
+#!/usr/bin/env python
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""A class to start/stop the apache http server used by layout tests."""
+
+
+import logging
+import os
+import re
+import socket
+import sys
+
+from webkitpy.layout_tests.servers import http_server_base
+
+
+_log = logging.getLogger(__name__)
+
+
+class LayoutTestApacheHttpd(http_server_base.HttpServerBase):
+    def __init__(self, port_obj, output_dir, additional_dirs=None, number_of_servers=None):
+        """Args:
+          port_obj: handle to the platform-specific routines
+          output_dir: the absolute path to the layout test result directory
+        """
+        http_server_base.HttpServerBase.__init__(self, port_obj, number_of_servers)
+        # We use the name "httpd" instead of "apache" to make our paths (e.g. the pid file: /tmp/WebKit/httpd.pid)
+        # match old-run-webkit-tests: https://bugs.webkit.org/show_bug.cgi?id=63956
+        self._name = 'httpd'
+        self._mappings = [{'port': 8000},
+                          {'port': 8080},
+                          {'port': 8443, 'sslcert': True}]
+        self._output_dir = output_dir
+        self._filesystem.maybe_make_directory(output_dir)
+
+        self._pid_file = self._filesystem.join(self._runtime_path, '%s.pid' % self._name)
+
+        test_dir = self._port_obj.layout_tests_dir()
+        js_test_resources_dir = self._filesystem.join(test_dir, "fast", "js", "resources")
+        media_resources_dir = self._filesystem.join(test_dir, "media")
+        mime_types_path = self._filesystem.join(test_dir, "http", "conf", "mime.types")
+        cert_file = self._filesystem.join(test_dir, "http", "conf", "webkit-httpd.pem")
+        access_log = self._filesystem.join(output_dir, "access_log.txt")
+        error_log = self._filesystem.join(output_dir, "error_log.txt")
+        document_root = self._filesystem.join(test_dir, "http", "tests")
+
+        # FIXME: We shouldn't be calling a protected method of _port_obj!
+        executable = self._port_obj._path_to_apache()
+
+        start_cmd = [executable,
+            '-f', "\"%s\"" % self._get_apache_config_file_path(test_dir, output_dir),
+            '-C', "\'DocumentRoot \"%s\"\'" % document_root,
+            '-c', "\'Alias /js-test-resources \"%s\"'" % js_test_resources_dir,
+            '-c', "\'Alias /media-resources \"%s\"'" % media_resources_dir,
+            '-c', "\'TypesConfig \"%s\"\'" % mime_types_path,
+            '-c', "\'CustomLog \"%s\" common\'" % access_log,
+            '-c', "\'ErrorLog \"%s\"\'" % error_log,
+            '-C', "\'User \"%s\"\'" % os.environ.get("USERNAME", os.environ.get("USER", "")),
+            '-c', "\'PidFile %s'" % self._pid_file,
+            '-k', "start"]
+
+        enable_ipv6 = self._port_obj.http_server_supports_ipv6()
+        # Perform part of the checks Apache's APR does when trying to listen to
+        # a specific host/port. This allows us to avoid trying to listen to
+        # IPV6 addresses when it fails on Apache. APR itself tries to call
+        # getaddrinfo() again without AI_ADDRCONFIG if the first call fails
+        # with EBADFLAGS, but that is not how it normally fails in our use
+        # cases, so ignore that for now.
+        # See https://bugs.webkit.org/show_bug.cgi?id=98602#c7
+        try:
+            socket.getaddrinfo('::1', 0, 0, 0, 0, socket.AI_ADDRCONFIG)
+        except:
+            enable_ipv6 = False
+
+        for mapping in self._mappings:
+            port = mapping['port']
+
+            start_cmd += ['-C', "\'Listen 127.0.0.1:%d\'" % port]
+
+            # We listen to both IPv4 and IPv6 loop-back addresses, but ignore
+            # requests to 8000 from random users on network.
+            # See https://bugs.webkit.org/show_bug.cgi?id=37104
+            if enable_ipv6:
+                start_cmd += ['-C', "\'Listen [::1]:%d\'" % port]
+
+        if additional_dirs:
+            for alias, path in additional_dirs.iteritems():
+                start_cmd += ['-c', "\'Alias %s \"%s\"\'" % (alias, path),
+                        # Disable CGI handler for additional dirs.
+                        '-c', "\'<Location %s>\'" % alias,
+                        '-c', "\'RemoveHandler .cgi .pl\'",
+                        '-c', "\'</Location>\'"]
+
+        if self._number_of_servers:
+            start_cmd += ['-c', "\'StartServers %d\'" % self._number_of_servers,
+                          '-c', "\'MinSpareServers %d\'" % self._number_of_servers,
+                          '-c', "\'MaxSpareServers %d\'" % self._number_of_servers]
+
+        stop_cmd = [executable,
+            '-f', "\"%s\"" % self._get_apache_config_file_path(test_dir, output_dir),
+            '-c', "\'PidFile %s'" % self._pid_file,
+            '-k', "stop"]
+
+        start_cmd.extend(['-c', "\'SSLCertificateFile %s\'" % cert_file])
+        # Join the string here so that Cygwin/Windows and Mac/Linux
+        # can use the same code. Otherwise, we could remove the single
+        # quotes above and keep cmd as a sequence.
+        # FIXME: It's unclear if this is still needed.
+        self._start_cmd = " ".join(start_cmd)
+        self._stop_cmd = " ".join(stop_cmd)
+
+    def _get_apache_config_file_path(self, test_dir, output_dir):
+        """Returns the path to the apache config file to use.
+        Args:
+          test_dir: absolute path to the LayoutTests directory.
+          output_dir: absolute path to the layout test results directory.
+        """
+        httpd_config = self._port_obj._path_to_apache_config_file()
+        httpd_config_copy = os.path.join(output_dir, "httpd.conf")
+        httpd_conf = self._filesystem.read_text_file(httpd_config)
+
+        # FIXME: Why do we need to copy the config file since we're not modifying it?
+        self._filesystem.write_text_file(httpd_config_copy, httpd_conf)
+
+        return httpd_config_copy
+
+    def _spawn_process(self):
+        _log.debug('Starting %s server, cmd="%s"' % (self._name, str(self._start_cmd)))
+        retval, err = self._run(self._start_cmd)
+        if retval or len(err):
+            raise http_server_base.ServerError('Failed to start %s: %s' % (self._name, err))
+
+        # For some reason apache isn't guaranteed to have created the pid file before
+        # the process exits, so we wait a little while longer.
+        if not self._wait_for_action(lambda: self._filesystem.exists(self._pid_file)):
+            raise http_server_base.ServerError('Failed to start %s: no pid file found' % self._name)
+
+        return int(self._filesystem.read_text_file(self._pid_file))
+
+    def _stop_running_server(self):
+        # If apache was forcefully killed, the pid file will not have been deleted, so check
+        # that the process specified by the pid_file no longer exists before deleting the file.
+        if self._pid and not self._executive.check_running_pid(self._pid):
+            self._filesystem.remove(self._pid_file)
+            return
+
+        retval, err = self._run(self._stop_cmd)
+        if retval or len(err):
+            raise http_server_base.ServerError('Failed to stop %s: %s' % (self._name, err))
+
+        # For some reason apache isn't guaranteed to have actually stopped after
+        # the stop command returns, so we wait a little while longer for the
+        # pid file to be removed.
+        if not self._wait_for_action(lambda: not self._filesystem.exists(self._pid_file)):
+            raise http_server_base.ServerError('Failed to stop %s: pid file still exists' % self._name)
+
+    def _run(self, cmd):
+        # Use shell=True because we join the arguments into a string for
+        # the sake of Window/Cygwin and it needs quoting that breaks
+        # shell=False.
+        # FIXME: We should not need to be joining shell arguments into strings.
+        # shell=True is a trail of tears.
+        # Note: Not thread safe: http://bugs.python.org/issue2320
+        process = self._executive.popen(cmd, shell=True, stderr=self._executive.PIPE)
+        process.wait()
+        retval = process.returncode
+        err = process.stderr.read()
+        return (retval, err)
diff --git a/Tools/Scripts/webkitpy/layout_tests/servers/apache_http_server_unittest.py b/Tools/Scripts/webkitpy/layout_tests/servers/apache_http_server_unittest.py
new file mode 100644
index 0000000..34ab97b
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/servers/apache_http_server_unittest.py
@@ -0,0 +1,70 @@
+# Copyright (C) 2012 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import re
+import sys
+import unittest
+
+from webkitpy.common.system.executive_mock import MockExecutive
+from webkitpy.common.system.outputcapture import OutputCapture
+from webkitpy.common.host_mock import MockHost
+from webkitpy.layout_tests.port import test
+from webkitpy.layout_tests.servers.apache_http_server import LayoutTestApacheHttpd
+from webkitpy.layout_tests.servers.http_server_base import ServerError
+
+
+class TestLayoutTestApacheHttpd(unittest.TestCase):
+    def test_start_cmd(self):
+        # Fails on win - see https://bugs.webkit.org/show_bug.cgi?id=84726
+        if sys.platform in ('cygwin', 'win32'):
+            return
+
+        def fake_pid(_):
+            host.filesystem.write_text_file('/tmp/WebKit/httpd.pid', '42')
+            return True
+
+        host = MockHost()
+        host.executive = MockExecutive(should_log=True)
+        test_port = test.TestPort(host)
+        host.filesystem.write_text_file(test_port._path_to_apache_config_file(), '')
+
+        server = LayoutTestApacheHttpd(test_port, "/mock/output_dir", number_of_servers=4)
+        server._check_that_all_ports_are_available = lambda: True
+        server._is_server_running_on_all_ports = lambda: True
+        server._wait_for_action = fake_pid
+        oc = OutputCapture()
+        try:
+            oc.capture_output()
+            server.start()
+            server.stop()
+        finally:
+            out, err, logs = oc.restore_output()
+        self.assertTrue("StartServers 4" in err)
+        self.assertTrue("MinSpareServers 4" in err)
+        self.assertTrue("MaxSpareServers 4" in err)
+        self.assertTrue(host.filesystem.exists("/mock/output_dir/httpd.conf"))
diff --git a/Tools/Scripts/webkitpy/layout_tests/servers/http_server.py b/Tools/Scripts/webkitpy/layout_tests/servers/http_server.py
new file mode 100755
index 0000000..107c242
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/servers/http_server.py
@@ -0,0 +1,220 @@
+#!/usr/bin/env python
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""A class to help start/stop the lighttpd server used by layout tests."""
+
+import logging
+import os
+import time
+
+from webkitpy.layout_tests.servers import http_server_base
+
+
+_log = logging.getLogger(__name__)
+
+
+class Lighttpd(http_server_base.HttpServerBase):
+
+    def __init__(self, port_obj, output_dir, background=False, port=None,
+                 root=None, run_background=None, additional_dirs=None,
+                 layout_tests_dir=None, number_of_servers=None):
+        """Args:
+          output_dir: the absolute path to the layout test result directory
+        """
+        # Webkit tests
+        http_server_base.HttpServerBase.__init__(self, port_obj, number_of_servers)
+        self._name = 'lighttpd'
+        self._output_dir = output_dir
+        self._port = port
+        self._root = root
+        self._run_background = run_background
+        self._additional_dirs = additional_dirs
+        self._layout_tests_dir = layout_tests_dir
+
+        self._pid_file = self._filesystem.join(self._runtime_path, '%s.pid' % self._name)
+
+        if self._port:
+            self._port = int(self._port)
+
+        if not self._layout_tests_dir:
+            self._layout_tests_dir = self._port_obj.layout_tests_dir()
+
+        self._webkit_tests = os.path.join(self._layout_tests_dir, 'http', 'tests')
+        self._js_test_resource = os.path.join(self._layout_tests_dir, 'fast', 'js', 'resources')
+        self._media_resource = os.path.join(self._layout_tests_dir, 'media')
+
+        # Self generated certificate for SSL server (for client cert get
+        # <base-path>\chrome\test\data\ssl\certs\root_ca_cert.crt)
+        self._pem_file = os.path.join(
+            os.path.dirname(os.path.abspath(__file__)), 'httpd2.pem')
+
+        # One mapping where we can get to everything
+        self.VIRTUALCONFIG = []
+
+        if self._webkit_tests:
+            self.VIRTUALCONFIG.extend(
+               # Three mappings (one with SSL) for LayoutTests http tests
+               [{'port': 8000, 'docroot': self._webkit_tests},
+                {'port': 8080, 'docroot': self._webkit_tests},
+                {'port': 8443, 'docroot': self._webkit_tests,
+                 'sslcert': self._pem_file}])
+
+    def _prepare_config(self):
+        base_conf_file = self._port_obj.path_from_webkit_base('Tools',
+            'Scripts', 'webkitpy', 'layout_tests', 'servers', 'lighttpd.conf')
+        out_conf_file = os.path.join(self._output_dir, 'lighttpd.conf')
+        time_str = time.strftime("%d%b%Y-%H%M%S")
+        access_file_name = "access.log-" + time_str + ".txt"
+        access_log = os.path.join(self._output_dir, access_file_name)
+        log_file_name = "error.log-" + time_str + ".txt"
+        error_log = os.path.join(self._output_dir, log_file_name)
+
+        # Write out the config
+        base_conf = self._filesystem.read_text_file(base_conf_file)
+
+        # FIXME: This should be re-worked so that this block can
+        # use with open() instead of a manual file.close() call.
+        f = self._filesystem.open_text_file_for_writing(out_conf_file)
+        f.write(base_conf)
+
+        # Write out our cgi handlers.  Run perl through env so that it
+        # processes the #! line and runs perl with the proper command
+        # line arguments. Emulate apache's mod_asis with a cat cgi handler.
+        f.write(('cgi.assign = ( ".cgi"  => "/usr/bin/env",\n'
+                 '               ".pl"   => "/usr/bin/env",\n'
+                 '               ".asis" => "/bin/cat",\n'
+                 '               ".php"  => "%s" )\n\n') %
+                                     self._port_obj._path_to_lighttpd_php())
+
+        # Setup log files
+        f.write(('server.errorlog = "%s"\n'
+                 'accesslog.filename = "%s"\n\n') % (error_log, access_log))
+
+        # Setup upload folders. Upload folder is to hold temporary upload files
+        # and also POST data. This is used to support XHR layout tests that
+        # does POST.
+        f.write(('server.upload-dirs = ( "%s" )\n\n') % (self._output_dir))
+
+        # Setup a link to where the js test templates are stored
+        f.write(('alias.url = ( "/js-test-resources" => "%s" )\n\n') %
+                    (self._js_test_resource))
+
+        if self._additional_dirs:
+            for alias, path in self._additional_dirs.iteritems():
+                f.write(('alias.url += ( "%s" => "%s" )\n\n') % (alias, path))
+
+        # Setup a link to where the media resources are stored.
+        f.write(('alias.url += ( "/media-resources" => "%s" )\n\n') %
+                    (self._media_resource))
+
+        # dump out of virtual host config at the bottom.
+        if self._root:
+            if self._port:
+                # Have both port and root dir.
+                mappings = [{'port': self._port, 'docroot': self._root}]
+            else:
+                # Have only a root dir - set the ports as for LayoutTests.
+                # This is used in ui_tests to run http tests against a browser.
+
+                # default set of ports as for LayoutTests but with a
+                # specified root.
+                mappings = [{'port': 8000, 'docroot': self._root},
+                            {'port': 8080, 'docroot': self._root},
+                            {'port': 8443, 'docroot': self._root,
+                             'sslcert': self._pem_file}]
+        else:
+            mappings = self.VIRTUALCONFIG
+        for mapping in mappings:
+            ssl_setup = ''
+            if 'sslcert' in mapping:
+                ssl_setup = ('  ssl.engine = "enable"\n'
+                             '  ssl.pemfile = "%s"\n' % mapping['sslcert'])
+
+            f.write(('$SERVER["socket"] == "127.0.0.1:%d" {\n'
+                     '  server.document-root = "%s"\n' +
+                     ssl_setup +
+                     '}\n\n') % (mapping['port'], mapping['docroot']))
+        f.close()
+
+        executable = self._port_obj._path_to_lighttpd()
+        module_path = self._port_obj._path_to_lighttpd_modules()
+        start_cmd = [executable,
+                     # Newly written config file
+                     '-f', os.path.join(self._output_dir, 'lighttpd.conf'),
+                     # Where it can find its module dynamic libraries
+                     '-m', module_path]
+
+        if not self._run_background:
+            start_cmd.append(# Don't background
+                             '-D')
+
+        # Copy liblightcomp.dylib to /tmp/lighttpd/lib to work around the
+        # bug that mod_alias.so loads it from the hard coded path.
+        if self._port_obj.host.platform.is_mac():
+            tmp_module_path = '/tmp/lighttpd/lib'
+            if not self._filesystem.exists(tmp_module_path):
+                self._filesystem.maybe_make_directory(tmp_module_path)
+            lib_file = 'liblightcomp.dylib'
+            self._filesystem.copyfile(self._filesystem.join(module_path, lib_file),
+                                      self._filesystem.join(tmp_module_path, lib_file))
+
+        self._start_cmd = start_cmd
+        self._env = self._port_obj.setup_environ_for_server('lighttpd')
+        self._mappings = mappings
+
+    def _remove_stale_logs(self):
+        # Sometimes logs are open in other processes but they should clear eventually.
+        for log_prefix in ('access.log-', 'error.log-'):
+            try:
+                self._remove_log_files(self._output_dir, log_prefix)
+            except OSError, e:
+                _log.warning('Failed to remove old %s %s files' % (self._name, log_prefix))
+
+    def _spawn_process(self):
+        _log.debug('Starting %s server, cmd="%s"' % (self._name, self._start_cmd))
+        process = self._executive.popen(self._start_cmd, env=self._env, shell=False, stderr=self._executive.PIPE)
+        pid = process.pid
+        self._filesystem.write_text_file(self._pid_file, str(pid))
+        return pid
+
+    def _stop_running_server(self):
+        # FIXME: It would be nice if we had a cleaner way of killing this process.
+        # Currently we throw away the process object created in _spawn_process,
+        # since there doesn't appear to be any way to kill the server any more
+        # cleanly using it than just killing the pid, and we need to support
+        # killing a pid directly anyway for run-webkit-httpd and run-webkit-websocketserver.
+        self._wait_for_action(self._check_and_kill)
+        if self._filesystem.exists(self._pid_file):
+            self._filesystem.remove(self._pid_file)
+
+    def _check_and_kill(self):
+        if self._executive.check_running_pid(self._pid):
+            self._executive.kill_process(self._pid)
+            return False
+        return True
diff --git a/Tools/Scripts/webkitpy/layout_tests/servers/http_server_base.py b/Tools/Scripts/webkitpy/layout_tests/servers/http_server_base.py
new file mode 100755
index 0000000..c1c3da8
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/servers/http_server_base.py
@@ -0,0 +1,202 @@
+#!/usr/bin/env python
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Base class with common routines between the Apache, Lighttpd, and websocket servers."""
+
+import errno
+import logging
+import socket
+import sys
+import tempfile
+import time
+
+
+_log = logging.getLogger(__name__)
+
+
+class ServerError(Exception):
+    pass
+
+
+class HttpServerBase(object):
+    """A skeleton class for starting and stopping servers used by the layout tests."""
+
+    def __init__(self, port_obj, number_of_servers=None):
+        self._executive = port_obj._executive
+        self._filesystem = port_obj._filesystem
+        self._name = '<virtual>'
+        self._mappings = {}
+        self._pid = None
+        self._pid_file = None
+        self._port_obj = port_obj
+        self._number_of_servers = number_of_servers
+
+        # We need a non-checkout-dependent place to put lock files, etc. We
+        # don't use the Python default on the Mac because it defaults to a
+        # randomly-generated directory under /var/folders and no one would ever
+        # look there.
+        tmpdir = tempfile.gettempdir()
+        if port_obj.host.platform.is_mac():
+            tmpdir = '/tmp'
+
+        self._runtime_path = self._filesystem.join(tmpdir, "WebKit")
+        self._filesystem.maybe_make_directory(self._runtime_path)
+
+    def start(self):
+        """Starts the server. It is an error to start an already started server.
+
+        This method also stops any stale servers started by a previous instance."""
+        assert not self._pid, '%s server is already running' % self._name
+
+        # Stop any stale servers left over from previous instances.
+        if self._filesystem.exists(self._pid_file):
+            self._pid = int(self._filesystem.read_text_file(self._pid_file))
+            self._stop_running_server()
+            self._pid = None
+
+        self._remove_stale_logs()
+        self._prepare_config()
+        self._check_that_all_ports_are_available()
+
+        self._pid = self._spawn_process()
+
+        if self._wait_for_action(self._is_server_running_on_all_ports):
+            _log.debug("%s successfully started (pid = %d)" % (self._name, self._pid))
+        else:
+            self._stop_running_server()
+            raise ServerError('Failed to start %s server' % self._name)
+
+    def stop(self):
+        """Stops the server. Stopping a server that isn't started is harmless."""
+        actual_pid = None
+        if self._filesystem.exists(self._pid_file):
+            actual_pid = int(self._filesystem.read_text_file(self._pid_file))
+            if not self._pid:
+                self._pid = actual_pid
+
+        if not self._pid:
+            return
+
+        if not actual_pid:
+            _log.warning('Failed to stop %s: pid file is missing' % self._name)
+            return
+        if self._pid != actual_pid:
+            _log.warning('Failed to stop %s: pid file contains %d, not %d' %
+                         (self._name, actual_pid, self._pid))
+            # Try to kill the existing pid, anyway, in case it got orphaned.
+            self._executive.kill_process(self._pid)
+            self._pid = None
+            return
+
+        _log.debug("Attempting to shut down %s server at pid %d" % (self._name, self._pid))
+        self._stop_running_server()
+        _log.debug("%s server at pid %d stopped" % (self._name, self._pid))
+        self._pid = None
+
+    def _prepare_config(self):
+        """This routine can be overridden by subclasses to do any sort
+        of initialization required prior to starting the server that may fail."""
+        pass
+
+    def _remove_stale_logs(self):
+        """This routine can be overridden by subclasses to try and remove logs
+        left over from a prior run. This routine should log warnings if the
+        files cannot be deleted, but should not fail unless failure to
+        delete the logs will actually cause start() to fail."""
+        pass
+
+    def _spawn_process(self):
+        """This routine must be implemented by subclasses to actually start the server.
+
+        This routine returns the pid of the started process, and also ensures that that
+        pid has been written to self._pid_file."""
+        raise NotImplementedError()
+
+    def _stop_running_server(self):
+        """This routine must be implemented by subclasses to actually stop the running server listed in self._pid_file."""
+        raise NotImplementedError()
+
+    # Utility routines.
+
+    def _remove_log_files(self, folder, starts_with):
+        files = self._filesystem.listdir(folder)
+        for file in files:
+            if file.startswith(starts_with):
+                full_path = self._filesystem.join(folder, file)
+                self._filesystem.remove(full_path)
+
+    def _wait_for_action(self, action, wait_secs=20.0, sleep_secs=1.0):
+        """Repeat the action for wait_sec or until it succeeds, sleeping for sleep_secs
+        in between each attempt. Returns whether it succeeded."""
+        start_time = time.time()
+        while time.time() - start_time < wait_secs:
+            if action():
+                return True
+            _log.debug("Waiting for action: %s" % action)
+            time.sleep(sleep_secs)
+
+        return False
+
+    def _is_server_running_on_all_ports(self):
+        """Returns whether the server is running on all the desired ports."""
+        if not self._executive.check_running_pid(self._pid):
+            _log.debug("Server isn't running at all")
+            raise ServerError("Server exited")
+
+        for mapping in self._mappings:
+            s = socket.socket()
+            port = mapping['port']
+            try:
+                s.connect(('localhost', port))
+                _log.debug("Server running on %d" % port)
+            except IOError, e:
+                if e.errno not in (errno.ECONNREFUSED, errno.ECONNRESET):
+                    raise
+                _log.debug("Server NOT running on %d: %s" % (port, e))
+                return False
+            finally:
+                s.close()
+        return True
+
+    def _check_that_all_ports_are_available(self):
+        for mapping in self._mappings:
+            s = socket.socket()
+            s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+            port = mapping['port']
+            try:
+                s.bind(('localhost', port))
+            except IOError, e:
+                if e.errno in (errno.EALREADY, errno.EADDRINUSE):
+                    raise ServerError('Port %d is already in use.' % port)
+                elif sys.platform == 'win32' and e.errno in (errno.WSAEACCES,):
+                    raise ServerError('Port %d is already in use.' % port)
+                else:
+                    raise
+            finally:
+                s.close()
diff --git a/Tools/Scripts/webkitpy/layout_tests/servers/http_server_integrationtest.py b/Tools/Scripts/webkitpy/layout_tests/servers/http_server_integrationtest.py
new file mode 100755
index 0000000..237d689
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/servers/http_server_integrationtest.py
@@ -0,0 +1,145 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#    * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Integration tests for the new-run-webkit-httpd and new-run-webkit-websocketserver scripts"""
+
+# FIXME: Rename this file to something more descriptive.
+
+import errno
+import os
+import socket
+import subprocess
+import sys
+import tempfile
+import unittest
+
+
+class BaseTest(unittest.TestCase):
+    """Basic framework for script tests."""
+    HOST = 'localhost'
+
+    # Override in actual test classes.
+    PORTS = None
+    SCRIPT_NAME = None
+
+    def assert_servers_are_down(self, ports=None):
+        ports = ports or self.PORTS
+        for port in ports:
+            try:
+                test_socket = socket.socket()
+                test_socket.connect((self.HOST, port))
+                self.fail()
+            except IOError, e:
+                self.assertTrue(e.errno in (errno.ECONNREFUSED, errno.ECONNRESET))
+            finally:
+                test_socket.close()
+
+    def assert_servers_are_up(self, ports=None):
+        ports = ports or self.PORTS
+        for port in ports:
+            try:
+                test_socket = socket.socket()
+                test_socket.connect((self.HOST, port))
+            except IOError, e:
+                self.fail('failed to connect to %s:%d' % (self.HOST, port))
+            finally:
+                test_socket.close()
+
+    def run_script(self, args):
+        script_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
+        script_path = os.path.join(script_dir, self.SCRIPT_NAME)
+        return subprocess.call([sys.executable, script_path] + args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+
+    def integration_test_server__normal(self):
+        if not self.SCRIPT_NAME:
+            return
+
+        self.assert_servers_are_down()
+        self.assertEquals(self.run_script(['--server', 'start']), 0)
+        self.assert_servers_are_up()
+        self.assertEquals(self.run_script(['--server', 'stop']), 0)
+        self.assert_servers_are_down()
+
+    def integration_test_server__fails(self):
+        if not self.SCRIPT_NAME:
+            return
+
+        # Test that if a port isn't available, the call fails.
+        for port_number in self.PORTS:
+            test_socket = socket.socket()
+            try:
+                try:
+                    test_socket.bind((self.HOST, port_number))
+                except socket.error, e:
+                    if e.errno in (errno.EADDRINUSE, errno.EALREADY):
+                        self.fail('could not bind to port %d: %s' % (port_number, str(e)))
+                    raise
+                self.assertEquals(self.run_script(['--server', 'start']), 1)
+            finally:
+                self.run_script(['--server', 'stop'])
+                test_socket.close()
+
+        # Test that calling stop() twice is harmless.
+        self.assertEquals(self.run_script(['--server', 'stop']), 0)
+
+    def maybe_make_dir(self, *comps):
+        try:
+            os.makedirs(os.path.join(*comps))
+        except OSError, e:
+            if e.errno != errno.EEXIST:
+                raise
+
+    def integration_test_port_and_root(self):
+        if not self.SCRIPT_NAME:
+            return
+
+        tmpdir = tempfile.mkdtemp(prefix='webkitpytest')
+        self.maybe_make_dir(tmpdir, 'http', 'tests', 'websocket')
+        self.maybe_make_dir(tmpdir, 'fast', 'js', 'resources')
+        self.maybe_make_dir(tmpdir, 'media')
+
+        self.assert_servers_are_down([18000])
+        self.assertEquals(self.run_script(['--server', 'start', '--port=18000', '--root', tmpdir]), 0)
+        self.assert_servers_are_up([18000])
+        self.assertEquals(self.run_script(['--server', 'stop']), 0)
+        self.assert_servers_are_down([18000])
+
+
+class HTTPServerTest(BaseTest):
+    """Tests that new-run-webkit-http must pass."""
+
+    PORTS = (8000, 8080, 8443)
+    SCRIPT_NAME = 'new-run-webkit-httpd'
+
+
+class WebsocketserverTest(BaseTest):
+    """Tests that new-run-webkit-websocketserver must pass."""
+
+    # FIXME: test TLS at some point?
+    PORTS = (8880, )
+    SCRIPT_NAME = 'new-run-webkit-websocketserver'
diff --git a/Tools/Scripts/webkitpy/layout_tests/servers/http_server_unittest.py b/Tools/Scripts/webkitpy/layout_tests/servers/http_server_unittest.py
new file mode 100644
index 0000000..7a14526
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/servers/http_server_unittest.py
@@ -0,0 +1,64 @@
+# Copyright (C) 2012 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+import re
+import sys
+
+from webkitpy.common.host_mock import MockHost
+from webkitpy.layout_tests.port import test
+from webkitpy.layout_tests.servers.http_server import Lighttpd
+from webkitpy.layout_tests.servers.http_server_base import ServerError
+
+
+class TestHttpServer(unittest.TestCase):
+    def test_start_cmd(self):
+        # Fails on win - see https://bugs.webkit.org/show_bug.cgi?id=84726
+        if sys.platform in ('cygwin', 'win32'):
+            return
+
+        host = MockHost()
+        test_port = test.TestPort(host)
+        host.filesystem.write_text_file(
+            "/mock-checkout/Tools/Scripts/webkitpy/layout_tests/servers/lighttpd.conf", "Mock Config\n")
+        host.filesystem.write_text_file(
+            "/usr/lib/lighttpd/liblightcomp.dylib", "Mock dylib")
+
+        server = Lighttpd(test_port, "/mock/output_dir",
+                          additional_dirs={
+                              "/mock/one-additional-dir": "/mock-checkout/one-additional-dir",
+                              "/mock/another-additional-dir": "/mock-checkout/one-additional-dir"})
+        self.assertRaises(ServerError, server.start)
+
+        config_file = host.filesystem.read_text_file("/mock/output_dir/lighttpd.conf")
+        self.assertEquals(re.findall(r"alias.url.+", config_file), [
+            'alias.url = ( "/js-test-resources" => "/test.checkout/LayoutTests/fast/js/resources" )',
+            'alias.url += ( "/mock/one-additional-dir" => "/mock-checkout/one-additional-dir" )',
+            'alias.url += ( "/mock/another-additional-dir" => "/mock-checkout/one-additional-dir" )',
+            'alias.url += ( "/media-resources" => "/test.checkout/LayoutTests/media" )',
+        ])
diff --git a/Tools/Scripts/webkitpy/layout_tests/servers/httpd2.pem b/Tools/Scripts/webkitpy/layout_tests/servers/httpd2.pem
new file mode 100644
index 0000000..6349b78
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/servers/httpd2.pem
@@ -0,0 +1,41 @@
+-----BEGIN CERTIFICATE-----
+MIIEZDCCAkygAwIBAgIBATANBgkqhkiG9w0BAQUFADBgMRAwDgYDVQQDEwdUZXN0
+IENBMQswCQYDVQQGEwJVUzETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UEBxMN
+TW91bnRhaW4gVmlldzESMBAGA1UEChMJQ2VydCBUZXN0MB4XDTA4MDcyODIyMzIy
+OFoXDTEzMDcyNzIyMzIyOFowSjELMAkGA1UEBhMCVVMxEzARBgNVBAgTCkNhbGlm
+b3JuaWExEjAQBgNVBAoTCUNlcnQgVGVzdDESMBAGA1UEAxMJMTI3LjAuMC4xMIGf
+MA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDQj2tPWPUgbuI4H3/3dnttqVbndwU3
+3BdRCd67DFM44GRrsjDSH4bY/EbFyX9D52d/iy6ZaAmDePcCz5k/fgP3DMujykYG
+qgNiV2ywxTlMj7NlN2C7SRt68fQMZr5iI7rypdxuaZt9lSMD3ENBffYtuLTyZd9a
+3JPJe1TaIab5GwIDAQABo4HCMIG/MAkGA1UdEwQCMAAwHQYDVR0OBBYEFCYLBv5K
+x5sLNVlpLh5FwTwhdDl7MIGSBgNVHSMEgYowgYeAFF3Of5nj1BlBMU/Gz7El9Vqv
+45cxoWSkYjBgMRAwDgYDVQQDEwdUZXN0IENBMQswCQYDVQQGEwJVUzETMBEGA1UE
+CBMKQ2FsaWZvcm5pYTEWMBQGA1UEBxMNTW91bnRhaW4gVmlldzESMBAGA1UEChMJ
+Q2VydCBUZXN0ggkA1FGT1D/e2U4wDQYJKoZIhvcNAQEFBQADggIBAEtkVmLObUgk
+b2cIA2S+QDtifq1UgVfBbytvR2lFmnADOR55mo0gHQG3HHqq4g034LmoVXDHhUk8
+Gb6aFiv4QubmVhLXcUelTRXwiNvGzkW7pC6Jrq105hdPjzXMKTcmiLaopm5Fqfc7
+hj5Cn1Sjspc8pdeQjrbeMdvca7KlFrGP8YkwCU2xOOX9PiN9G0966BWfjnr/fZZp
++OQVuUFHdiAZwthEMuDpAAXHqYXIsermgdOpgJaA53cf8NqBV2QGhtFgtsJCRoiu
+7DKqhyRWBGyz19VIH2b7y+6qvQVxuHk19kKRM0nftw/yNcJnm7gtttespMUPsOMa
+a2SD1G0hm0TND6vxaBhgR3cVqpl/qIpAdFi00Tm7hTyYE7I43zPW03t+/DpCt3Um
+EMRZsQ90co5q+bcx/vQ7YAtwUh30uMb0wpibeyCwDp8cqNmSiRkEuc/FjTYes5t8
+5gR//WX1l0+qjrjusO9NmoLnq2Yk6UcioX+z+q6Z/dudGfqhLfeWD2Q0LWYA242C
+d7km5Y3KAt1PJdVsof/aiVhVdddY/OIEKTRQhWEdDbosy2eh16BCKXT2FFvhNDg1
+AYFvn6I8nj9IldMJiIc3DdhacEAEzRMeRgPdzAa1griKUGknxsyTyRii8ru0WS6w
+DCNrlDOVXdzYGEZooBI76BDVY0W0akjV
+-----END CERTIFICATE-----
+-----BEGIN RSA PRIVATE KEY-----
+MIICXQIBAAKBgQDQj2tPWPUgbuI4H3/3dnttqVbndwU33BdRCd67DFM44GRrsjDS
+H4bY/EbFyX9D52d/iy6ZaAmDePcCz5k/fgP3DMujykYGqgNiV2ywxTlMj7NlN2C7
+SRt68fQMZr5iI7rypdxuaZt9lSMD3ENBffYtuLTyZd9a3JPJe1TaIab5GwIDAQAB
+AoGANHXu8z2YIzlhE+bwhGm8MGBpKL3qhRuKjeriqMA36tWezOw8lY4ymEAU+Ulv
+BsCdaxqydQoTYou57m4TyUHEcxq9pq3H0zB0qL709DdHi/t4zbV9XIoAzC5v0/hG
+9+Ca29TwC02FCw+qLkNrtwCpwOcQmc+bPxqvFu1iMiahURECQQD2I/Hi2413CMZz
+TBjl8fMiVO9GhA2J0sc8Qi+YcgJakaLD9xcbaiLkTzPZDlA389C1b6Ia+poAr4YA
+Ve0FFbxpAkEA2OobayyHE/QtPEqoy6NLR57jirmVBNmSWWd4lAyL5UIHIYVttJZg
+8CLvbzaU/iDGwR+wKsM664rKPHEmtlyo4wJBAMeSqYO5ZOCJGu9NWjrHjM3fdAsG
+8zs2zhiLya+fcU0iHIksBW5TBmt71Jw/wMc9R5J1K0kYvFml98653O5si1ECQBCk
+RV4/mE1rmlzZzYFyEcB47DQkcM5ictvxGEsje0gnfKyRtAz6zI0f4QbDRUMJ+LWw
+XK+rMsYHa+SfOb0b9skCQQCLdeonsIpFDv/Uv+flHISy0WA+AFkLXrRkBKh6G/OD
+dMHaNevkJgUnpceVEnkrdenp5CcEoFTI17pd+nBgDm/B
+-----END RSA PRIVATE KEY-----
diff --git a/Tools/Scripts/webkitpy/layout_tests/servers/lighttpd.conf b/Tools/Scripts/webkitpy/layout_tests/servers/lighttpd.conf
new file mode 100644
index 0000000..4360c37
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/servers/lighttpd.conf
@@ -0,0 +1,89 @@
+server.tag                  = "LightTPD/1.4.19 (Win32)"
+server.modules              = ( "mod_accesslog",
+                                "mod_alias",
+                                "mod_cgi",
+                                "mod_rewrite" )
+
+# default document root required
+server.document-root = "."
+
+# files to check for if .../ is requested
+index-file.names            = ( "index.php", "index.pl", "index.cgi",
+                                "index.html", "index.htm", "default.htm" )
+# mimetype mapping
+mimetype.assign             = (
+  ".gif"          =>      "image/gif",
+  ".jpg"          =>      "image/jpeg",
+  ".jpeg"         =>      "image/jpeg",
+  ".png"          =>      "image/png",
+  ".svg"          =>      "image/svg+xml",
+  ".css"          =>      "text/css",
+  ".html"         =>      "text/html",
+  ".htm"          =>      "text/html",
+  ".xhtml"        =>      "application/xhtml+xml",
+  ".js"           =>      "application/x-javascript",
+  ".log"          =>      "text/plain",
+  ".conf"         =>      "text/plain",
+  ".text"         =>      "text/plain",
+  ".txt"          =>      "text/plain",
+  ".dtd"          =>      "text/xml",
+  ".xml"          =>      "text/xml",
+  ".manifest"     =>      "text/cache-manifest",
+ )
+
+# Use the "Content-Type" extended attribute to obtain mime type if possible
+mimetype.use-xattr          = "enable"
+
+##
+# which extensions should not be handle via static-file transfer
+#
+# .php, .pl, .fcgi are most often handled by mod_fastcgi or mod_cgi
+static-file.exclude-extensions = ( ".php", ".pl", ".cgi" )
+
+server.bind = "localhost"
+server.port = 8001
+
+## virtual directory listings
+dir-listing.activate        = "enable"
+#dir-listing.encoding       = "iso-8859-2"
+#dir-listing.external-css   = "style/oldstyle.css"
+
+## enable debugging
+#debug.log-request-header   = "enable"
+#debug.log-response-header  = "enable"
+#debug.log-request-handling = "enable"
+#debug.log-file-not-found   = "enable"
+
+#### SSL engine
+#ssl.engine                 = "enable"
+#ssl.pemfile                = "server.pem"
+
+# Rewrite rule for utf-8 path test (LayoutTests/http/tests/uri/utf8-path.html)
+# See the apache rewrite rule at LayoutTests/http/tests/uri/intercept/.htaccess
+# Rewrite rule for LayoutTests/http/tests/appcache/cyrillic-uri.html.
+# See the apache rewrite rule at
+# LayoutTests/http/tests/appcache/resources/intercept/.htaccess
+url.rewrite-once = (
+  "^/uri/intercept/(.*)" => "/uri/resources/print-uri.php",
+  "^/appcache/resources/intercept/(.*)" => "/appcache/resources/print-uri.php"
+)
+
+# LayoutTests/http/tests/xmlhttprequest/response-encoding.html uses an htaccess
+# to override charset for reply2.txt, reply2.xml, and reply4.txt.
+$HTTP["url"] =~ "^/xmlhttprequest/resources/reply2.(txt|xml)" {
+  mimetype.assign = (
+    ".txt" => "text/plain; charset=windows-1251",
+    ".xml" => "text/xml; charset=windows-1251"
+  )
+}
+$HTTP["url"] =~ "^/xmlhttprequest/resources/reply4.txt" {
+  mimetype.assign = ( ".txt" => "text/plain; charset=koi8-r" )
+}
+
+# LayoutTests/http/tests/appcache/wrong-content-type.html uses an htaccess
+# to override mime type for wrong-content-type.manifest.
+$HTTP["url"] =~ "^/appcache/resources/wrong-content-type.manifest" {
+  mimetype.assign = ( ".manifest" => "text/plain" )
+}
+
+# Autogenerated test-specific config follows.
diff --git a/Tools/Scripts/webkitpy/layout_tests/servers/websocket_server.py b/Tools/Scripts/webkitpy/layout_tests/servers/websocket_server.py
new file mode 100644
index 0000000..93747f6
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/servers/websocket_server.py
@@ -0,0 +1,177 @@
+#!/usr/bin/env python
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""A class to help start/stop the PyWebSocket server used by layout tests."""
+
+import logging
+import os
+import sys
+import time
+
+from webkitpy.layout_tests.servers import http_server
+from webkitpy.layout_tests.servers import http_server_base
+
+_log = logging.getLogger(__name__)
+
+
+_WS_LOG_PREFIX = 'pywebsocket.ws.log-'
+_WSS_LOG_PREFIX = 'pywebsocket.wss.log-'
+
+
+_DEFAULT_WS_PORT = 8880
+_DEFAULT_WSS_PORT = 9323
+
+
+class PyWebSocket(http_server.Lighttpd):
+    def __init__(self, port_obj, output_dir, port=_DEFAULT_WS_PORT,
+                 root=None, use_tls=False,
+                 private_key=None, certificate=None, ca_certificate=None,
+                 pidfile=None):
+        """Args:
+          output_dir: the absolute path to the layout test result directory
+        """
+        http_server.Lighttpd.__init__(self, port_obj, output_dir,
+                                      port=_DEFAULT_WS_PORT,
+                                      root=root)
+        self._output_dir = output_dir
+        self._pid_file = pidfile
+        self._process = None
+
+        self._port = port
+        self._root = root
+        self._use_tls = use_tls
+
+        self._name = 'pywebsocket'
+        if self._use_tls:
+            self._name = 'pywebsocket_secure'
+
+        if private_key:
+            self._private_key = private_key
+        else:
+            self._private_key = self._pem_file
+        if certificate:
+            self._certificate = certificate
+        else:
+            self._certificate = self._pem_file
+        self._ca_certificate = ca_certificate
+        if self._port:
+            self._port = int(self._port)
+        self._wsin = None
+        self._wsout = None
+        self._mappings = [{'port': self._port}]
+
+        if not self._pid_file:
+            self._pid_file = self._filesystem.join(self._runtime_path, '%s.pid' % self._name)
+
+        # Webkit tests
+        # FIXME: This is the wrong way to detect if we're in Chrome vs. WebKit!
+        # The port objects are supposed to abstract this.
+        if self._root:
+            self._layout_tests = self._filesystem.abspath(self._root)
+            self._web_socket_tests = self._filesystem.abspath(self._filesystem.join(self._root, 'http', 'tests', 'websocket', 'tests'))
+        else:
+            try:
+                self._layout_tests = self._port_obj.layout_tests_dir()
+                self._web_socket_tests = self._filesystem.join(self._layout_tests, 'http', 'tests', 'websocket', 'tests')
+            except:
+                self._web_socket_tests = None
+
+        if self._use_tls:
+            self._log_prefix = _WSS_LOG_PREFIX
+        else:
+            self._log_prefix = _WS_LOG_PREFIX
+
+    def _prepare_config(self):
+        time_str = time.strftime('%d%b%Y-%H%M%S')
+        log_file_name = self._log_prefix + time_str
+        # FIXME: Doesn't Executive have a devnull, so that we don't have to use os.devnull directly?
+        self._wsin = open(os.devnull, 'r')
+
+        error_log = self._filesystem.join(self._output_dir, log_file_name + "-err.txt")
+        output_log = self._filesystem.join(self._output_dir, log_file_name + "-out.txt")
+        self._wsout = self._filesystem.open_text_file_for_writing(output_log)
+
+        from webkitpy.thirdparty import mod_pywebsocket
+        python_interp = sys.executable
+        # FIXME: Use self._filesystem.path_to_module(self.__module__) instead of __file__
+        # I think this is trying to get the chrome directory?  Doesn't the port object know that?
+        pywebsocket_base = self._filesystem.join(self._filesystem.dirname(self._filesystem.dirname(self._filesystem.dirname(self._filesystem.abspath(__file__)))), 'thirdparty')
+        pywebsocket_script = self._filesystem.join(pywebsocket_base, 'mod_pywebsocket', 'standalone.py')
+        start_cmd = [
+            python_interp, '-u', pywebsocket_script,
+            '--server-host', 'localhost',
+            '--port', str(self._port),
+            # FIXME: Don't we have a self._port_obj.layout_test_path?
+            '--document-root', self._filesystem.join(self._layout_tests, 'http', 'tests'),
+            '--scan-dir', self._web_socket_tests,
+            '--cgi-paths', '/websocket/tests',
+            '--log-file', error_log,
+        ]
+
+        handler_map_file = self._filesystem.join(self._web_socket_tests, 'handler_map.txt')
+        if self._filesystem.exists(handler_map_file):
+            _log.debug('Using handler_map_file: %s' % handler_map_file)
+            start_cmd.append('--websock-handlers-map-file')
+            start_cmd.append(handler_map_file)
+        else:
+            _log.warning('No handler_map_file found')
+
+        if self._use_tls:
+            start_cmd.extend(['-t', '-k', self._private_key,
+                              '-c', self._certificate])
+            if self._ca_certificate:
+                start_cmd.append('--ca-certificate')
+                start_cmd.append(self._ca_certificate)
+
+        self._start_cmd = start_cmd
+        server_name = self._filesystem.basename(pywebsocket_script)
+        self._env = self._port_obj.setup_environ_for_server(server_name)
+        self._env['PYTHONPATH'] = (pywebsocket_base + os.path.pathsep + self._env.get('PYTHONPATH', ''))
+
+    def _remove_stale_logs(self):
+        try:
+            self._remove_log_files(self._output_dir, self._log_prefix)
+        except OSError, e:
+            _log.warning('Failed to remove stale %s log files: %s' % (self._name, str(e)))
+
+    def _spawn_process(self):
+        _log.debug('Starting %s server, cmd="%s"' % (self._name, self._start_cmd))
+        self._process = self._executive.popen(self._start_cmd, env=self._env, shell=False, stdin=self._wsin, stdout=self._wsout, stderr=self._executive.STDOUT)
+        self._filesystem.write_text_file(self._pid_file, str(self._process.pid))
+        return self._process.pid
+
+    def _stop_running_server(self):
+        super(PyWebSocket, self)._stop_running_server()
+
+        if self._wsin:
+            self._wsin.close()
+            self._wsin = None
+        if self._wsout:
+            self._wsout.close()
+            self._wsout = None
diff --git a/Tools/Scripts/webkitpy/layout_tests/views/__init__.py b/Tools/Scripts/webkitpy/layout_tests/views/__init__.py
new file mode 100644
index 0000000..ef65bee
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/views/__init__.py
@@ -0,0 +1 @@
+# Required for Python to search this directory for module files
diff --git a/Tools/Scripts/webkitpy/layout_tests/views/metered_stream.py b/Tools/Scripts/webkitpy/layout_tests/views/metered_stream.py
new file mode 100644
index 0000000..acea93e
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/views/metered_stream.py
@@ -0,0 +1,136 @@
+#!/usr/bin/env python
+# Copyright (C) 2010, 2012 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import logging
+import os
+import sys
+import time
+
+LOG_HANDLER_NAME = 'MeteredStreamLogHandler'
+
+
+class MeteredStream(object):
+    """
+    This class implements a stream wrapper that has 'meters' as well as
+    regular output. A 'meter' is a single line of text that can be erased
+    and rewritten repeatedly, without producing multiple lines of output. It
+    can be used to produce effects like progress bars.
+    """
+
+    @staticmethod
+    def _erasure(txt):
+        num_chars = len(txt)
+        return '\b' * num_chars + ' ' * num_chars + '\b' * num_chars
+
+    @staticmethod
+    def _ensure_newline(txt):
+        return txt if txt.endswith('\n') else txt + '\n'
+
+    def __init__(self, stream=None, verbose=False, logger=None, time_fn=None, pid=None, number_of_columns=None):
+        self._stream = stream or sys.stderr
+        self._verbose = verbose
+        self._time_fn = time_fn or time.time
+        self._pid = pid or os.getpid()
+        self._isatty = self._stream.isatty()
+        self._erasing = self._isatty and not verbose
+        self._last_partial_line = ''
+        self._last_write_time = 0.0
+        self._throttle_delay_in_secs = 0.066 if self._erasing else 10.0
+        self._number_of_columns = sys.maxint
+        if self._isatty and number_of_columns:
+            self._number_of_columns = number_of_columns
+
+        self._logger = logger
+        self._log_handler = None
+        if self._logger:
+            log_level = logging.DEBUG if verbose else logging.INFO
+            self._log_handler = _LogHandler(self)
+            self._log_handler.setLevel(log_level)
+            self._logger.addHandler(self._log_handler)
+
+    def __del__(self):
+        self.cleanup()
+
+    def cleanup(self):
+        if self._logger:
+            self._logger.removeHandler(self._log_handler)
+            self._log_handler = None
+
+    def write_throttled_update(self, txt):
+        now = self._time_fn()
+        if now - self._last_write_time >= self._throttle_delay_in_secs:
+            self.write_update(txt, now)
+
+    def write_update(self, txt, now=None):
+        self.write(txt, now)
+        if self._erasing:
+            self._last_partial_line = txt[txt.rfind('\n') + 1:]
+
+    def write(self, txt, now=None, pid=None):
+        now = now or self._time_fn()
+        pid = pid or self._pid
+        self._last_write_time = now
+        if self._last_partial_line:
+            self._erase_last_partial_line()
+        if self._verbose:
+            now_tuple = time.localtime(now)
+            msg = '%02d:%02d:%02d.%03d %d %s' % (now_tuple.tm_hour, now_tuple.tm_min, now_tuple.tm_sec, int((now * 1000) % 1000), pid, self._ensure_newline(txt))
+        elif self._isatty:
+            msg = txt
+        else:
+            msg = self._ensure_newline(txt)
+
+        self._stream.write(msg)
+
+    def writeln(self, txt, now=None, pid=None):
+        self.write(self._ensure_newline(txt), now, pid)
+
+    def _erase_last_partial_line(self):
+        num_chars = len(self._last_partial_line)
+        self._stream.write(self._erasure(self._last_partial_line))
+        self._last_partial_line = ''
+
+    def flush(self):
+        if self._last_partial_line:
+            self._stream.write('\n')
+            self._last_partial_line = ''
+            self._stream.flush()
+
+    def number_of_columns(self):
+        return self._number_of_columns
+
+
+class _LogHandler(logging.Handler):
+    def __init__(self, meter):
+        logging.Handler.__init__(self)
+        self._meter = meter
+        self.name = LOG_HANDLER_NAME
+
+    def emit(self, record):
+        self._meter.writeln(record.getMessage(), record.created, record.process)
diff --git a/Tools/Scripts/webkitpy/layout_tests/views/metered_stream_unittest.py b/Tools/Scripts/webkitpy/layout_tests/views/metered_stream_unittest.py
new file mode 100644
index 0000000..b388ec6
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/views/metered_stream_unittest.py
@@ -0,0 +1,158 @@
+#!/usr/bin/python
+# Copyright (C) 2010, 2012 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import logging
+import re
+import StringIO
+import unittest
+
+from webkitpy.layout_tests.views.metered_stream import MeteredStream
+
+
+class RegularTest(unittest.TestCase):
+    verbose = False
+    isatty = False
+
+    def setUp(self):
+        self.stream = StringIO.StringIO()
+        self.buflist = self.stream.buflist
+        self.stream.isatty = lambda: self.isatty
+
+        # configure a logger to test that log calls do normally get included.
+        self.logger = logging.getLogger(__name__)
+        self.logger.setLevel(logging.DEBUG)
+        self.logger.propagate = False
+
+        # add a dummy time counter for a default behavior.
+        self.times = range(10)
+
+        self.meter = MeteredStream(self.stream, self.verbose, self.logger, self.time_fn, 8675)
+
+    def tearDown(self):
+        if self.meter:
+            self.meter.cleanup()
+            self.meter = None
+
+    def time_fn(self):
+        return self.times.pop(0)
+
+    def test_logging_not_included(self):
+        # This tests that if we don't hand a logger to the MeteredStream,
+        # nothing is logged.
+        logging_stream = StringIO.StringIO()
+        handler = logging.StreamHandler(logging_stream)
+        root_logger = logging.getLogger()
+        orig_level = root_logger.level
+        root_logger.addHandler(handler)
+        root_logger.setLevel(logging.DEBUG)
+        try:
+            self.meter = MeteredStream(self.stream, self.verbose, None, self.time_fn, 8675)
+            self.meter.write_throttled_update('foo')
+            self.meter.write_update('bar')
+            self.meter.write('baz')
+            self.assertEquals(logging_stream.buflist, [])
+        finally:
+            root_logger.removeHandler(handler)
+            root_logger.setLevel(orig_level)
+
+    def _basic(self, times):
+        self.times = times
+        self.meter.write_update('foo')
+        self.meter.write_update('bar')
+        self.meter.write_throttled_update('baz')
+        self.meter.write_throttled_update('baz 2')
+        self.meter.writeln('done')
+        self.assertEquals(self.times, [])
+        return self.buflist
+
+    def test_basic(self):
+        buflist = self._basic([0, 1, 2, 13, 14])
+        self.assertEquals(buflist, ['foo\n', 'bar\n', 'baz 2\n', 'done\n'])
+
+    def _log_after_update(self):
+        self.meter.write_update('foo')
+        self.logger.info('bar')
+        return self.buflist
+
+    def test_log_after_update(self):
+        buflist = self._log_after_update()
+        self.assertEquals(buflist, ['foo\n', 'bar\n'])
+
+    def test_log_args(self):
+        self.logger.info('foo %s %d', 'bar', 2)
+        self.assertEquals(self.buflist, ['foo bar 2\n'])
+
+class TtyTest(RegularTest):
+    verbose = False
+    isatty = True
+
+    def test_basic(self):
+        buflist = self._basic([0, 1, 1.05, 1.1, 2])
+        self.assertEquals(buflist, ['foo',
+                                     MeteredStream._erasure('foo'), 'bar',
+                                     MeteredStream._erasure('bar'), 'baz 2',
+                                     MeteredStream._erasure('baz 2'), 'done\n'])
+
+    def test_log_after_update(self):
+        buflist = self._log_after_update()
+        self.assertEquals(buflist, ['foo',
+                                     MeteredStream._erasure('foo'), 'bar\n'])
+
+
+class VerboseTest(RegularTest):
+    isatty = False
+    verbose = True
+
+    def test_basic(self):
+        buflist = self._basic([0, 1, 2.1, 13, 14.1234])
+        # We don't bother to match the hours and minutes of the timestamp since
+        # the local timezone can vary and we can't set that portably and easily.
+        self.assertTrue(re.match('\d\d:\d\d:00.000 8675 foo\n', buflist[0]))
+        self.assertTrue(re.match('\d\d:\d\d:01.000 8675 bar\n', buflist[1]))
+        self.assertTrue(re.match('\d\d:\d\d:13.000 8675 baz 2\n', buflist[2]))
+        self.assertTrue(re.match('\d\d:\d\d:14.123 8675 done\n', buflist[3]))
+        self.assertEquals(len(buflist), 4)
+
+    def test_log_after_update(self):
+        buflist = self._log_after_update()
+        self.assertTrue(re.match('\d\d:\d\d:00.000 8675 foo\n', buflist[0]))
+
+        # The second argument should have a real timestamp and pid, so we just check the format.
+        self.assertTrue(re.match('\d\d:\d\d:\d\d.\d\d\d \d+ bar\n', buflist[1]))
+
+        self.assertEquals(len(buflist), 2)
+
+    def test_log_args(self):
+        self.logger.info('foo %s %d', 'bar', 2)
+        self.assertEquals(len(self.buflist), 1)
+        self.assertTrue(self.buflist[0].endswith('foo bar 2\n'))
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/Tools/Scripts/webkitpy/layout_tests/views/printing.py b/Tools/Scripts/webkitpy/layout_tests/views/printing.py
new file mode 100644
index 0000000..b7a9195
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/views/printing.py
@@ -0,0 +1,504 @@
+#!/usr/bin/env python
+# Copyright (C) 2010, 2012 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Package that handles non-debug, non-file output for run-webkit-tests."""
+
+import math
+import optparse
+
+from webkitpy.tool import grammar
+from webkitpy.common.net import resultsjsonparser
+from webkitpy.layout_tests.models import test_expectations
+from webkitpy.layout_tests.models.test_expectations import TestExpectations, TestExpectationParser
+from webkitpy.layout_tests.views.metered_stream import MeteredStream
+
+
+NUM_SLOW_TESTS_TO_LOG = 10
+
+
+def print_options():
+    return [
+        optparse.make_option('-q', '--quiet', action='store_true', default=False,
+                             help='run quietly (errors, warnings, and progress only)'),
+        optparse.make_option('-v', '--verbose', action='store_true', default=False,
+                             help='print a summarized result for every test (one line per test)'),
+        optparse.make_option('--details', action='store_true', default=False,
+                             help='print detailed results for every test'),
+        optparse.make_option('--debug-rwt-logging', action='store_true', default=False,
+                             help='print timestamps and debug information for run-webkit-tests itself'),
+    ]
+
+
+class Printer(object):
+    """Class handling all non-debug-logging printing done by run-webkit-tests.
+
+    Printing from run-webkit-tests falls into two buckets: general or
+    regular output that is read only by humans and can be changed at any
+    time, and output that is parsed by buildbots (and humans) and hence
+    must be changed more carefully and in coordination with the buildbot
+    parsing code (in chromium.org's buildbot/master.chromium/scripts/master/
+    log_parser/webkit_test_command.py script).
+
+    By default the buildbot-parsed code gets logged to stdout, and regular
+    output gets logged to stderr."""
+    def __init__(self, port, options, regular_output, buildbot_output, logger=None):
+        self.num_completed = 0
+        self.num_tests = 0
+        self._port = port
+        self._options = options
+        self._buildbot_stream = buildbot_output
+        self._meter = MeteredStream(regular_output, options.debug_rwt_logging, logger=logger,
+                                    number_of_columns=self._port.host.platform.terminal_width())
+        self._running_tests = []
+        self._completed_tests = []
+
+    def cleanup(self):
+        self._meter.cleanup()
+
+    def __del__(self):
+        self.cleanup()
+
+    def print_config(self, results_directory):
+        self._print_default("Using port '%s'" % self._port.name())
+        self._print_default("Test configuration: %s" % self._port.test_configuration())
+        self._print_default("Placing test results in %s" % results_directory)
+
+        # FIXME: should these options be in printing_options?
+        if self._options.new_baseline:
+            self._print_default("Placing new baselines in %s" % self._port.baseline_path())
+
+        fs = self._port.host.filesystem
+        fallback_path = [fs.split(x)[1] for x in self._port.baseline_search_path()]
+        self._print_default("Baseline search path: %s -> generic" % " -> ".join(fallback_path))
+
+        self._print_default("Using %s build" % self._options.configuration)
+        if self._options.pixel_tests:
+            self._print_default("Pixel tests enabled")
+        else:
+            self._print_default("Pixel tests disabled")
+
+        self._print_default("Regular timeout: %s, slow test timeout: %s" %
+                  (self._options.time_out_ms, self._options.slow_time_out_ms))
+
+        self._print_default('Command line: ' + ' '.join(self._port.driver_cmd_line()))
+        self._print_default('')
+
+    def print_found(self, num_all_test_files, num_to_run, repeat_each, iterations):
+        num_unique_tests = num_to_run / (repeat_each * iterations)
+        found_str = 'Found %s; running %d' % (grammar.pluralize('test', num_all_test_files), num_unique_tests)
+        if repeat_each * iterations > 1:
+            found_str += ' (%d times each: --repeat-each=%d --iterations=%d)' % (repeat_each * iterations, repeat_each, iterations)
+        found_str += ', skipping %d' % (num_all_test_files - num_unique_tests)
+        self._print_default(found_str + '.')
+
+    def print_expected(self, result_summary, tests_with_result_type_callback):
+        self._print_expected_results_of_type(result_summary, test_expectations.PASS, "passes", tests_with_result_type_callback)
+        self._print_expected_results_of_type(result_summary, test_expectations.FAIL, "failures", tests_with_result_type_callback)
+        self._print_expected_results_of_type(result_summary, test_expectations.FLAKY, "flaky", tests_with_result_type_callback)
+        self._print_debug('')
+
+    def print_workers_and_shards(self, num_workers, num_shards, num_locked_shards):
+        driver_name = self._port.driver_name()
+        if num_workers == 1:
+            self._print_default("Running 1 %s over %s." % (driver_name, grammar.pluralize('shard', num_shards)))
+        else:
+            self._print_default("Running %d %ss in parallel over %d shards (%d locked)." %
+                (num_workers, driver_name, num_shards, num_locked_shards))
+        self._print_default('')
+
+    def _print_expected_results_of_type(self, result_summary, result_type, result_type_str, tests_with_result_type_callback):
+        tests = tests_with_result_type_callback(result_type)
+        now = result_summary.tests_by_timeline[test_expectations.NOW]
+        wontfix = result_summary.tests_by_timeline[test_expectations.WONTFIX]
+
+        # We use a fancy format string in order to print the data out in a
+        # nicely-aligned table.
+        fmtstr = ("Expect: %%5d %%-8s (%%%dd now, %%%dd wontfix)"
+                  % (self._num_digits(now), self._num_digits(wontfix)))
+        self._print_debug(fmtstr % (len(tests), result_type_str, len(tests & now), len(tests & wontfix)))
+
+    def _num_digits(self, num):
+        ndigits = 1
+        if len(num):
+            ndigits = int(math.log10(len(num))) + 1
+        return ndigits
+
+    def print_results(self, run_time, thread_timings, test_timings, individual_test_timings, result_summary, unexpected_results):
+        self._print_timing_statistics(run_time, thread_timings, test_timings, individual_test_timings, result_summary)
+        self._print_result_summary(result_summary)
+        self._print_one_line_summary(result_summary.total - result_summary.expected_skips,
+                                     result_summary.expected - result_summary.expected_skips,
+                                     result_summary.unexpected)
+        self._print_unexpected_results(unexpected_results)
+
+    def _print_timing_statistics(self, total_time, thread_timings,
+                                 directory_test_timings, individual_test_timings,
+                                 result_summary):
+        self._print_debug("Test timing:")
+        self._print_debug("  %6.2f total testing time" % total_time)
+        self._print_debug("")
+        self._print_debug("Thread timing:")
+        cuml_time = 0
+        for t in thread_timings:
+            self._print_debug("    %10s: %5d tests, %6.2f secs" % (t['name'], t['num_tests'], t['total_time']))
+            cuml_time += t['total_time']
+        self._print_debug("   %6.2f cumulative, %6.2f optimal" % (cuml_time, cuml_time / int(self._options.child_processes)))
+        self._print_debug("")
+
+        self._print_aggregate_test_statistics(individual_test_timings)
+        self._print_individual_test_times(individual_test_timings, result_summary)
+        self._print_directory_timings(directory_test_timings)
+
+    def _print_aggregate_test_statistics(self, individual_test_timings):
+        times_for_dump_render_tree = [test_stats.test_run_time for test_stats in individual_test_timings]
+        self._print_statistics_for_test_timings("PER TEST TIME IN TESTSHELL (seconds):", times_for_dump_render_tree)
+
+    def _print_individual_test_times(self, individual_test_timings, result_summary):
+        # Reverse-sort by the time spent in DumpRenderTree.
+        individual_test_timings.sort(lambda a, b: cmp(b.test_run_time, a.test_run_time))
+        num_printed = 0
+        slow_tests = []
+        timeout_or_crash_tests = []
+        unexpected_slow_tests = []
+        for test_tuple in individual_test_timings:
+            test_name = test_tuple.test_name
+            is_timeout_crash_or_slow = False
+            if test_name in result_summary.slow_tests:
+                is_timeout_crash_or_slow = True
+                slow_tests.append(test_tuple)
+
+            if test_name in result_summary.failures:
+                result = result_summary.results[test_name].type
+                if (result == test_expectations.TIMEOUT or
+                    result == test_expectations.CRASH):
+                    is_timeout_crash_or_slow = True
+                    timeout_or_crash_tests.append(test_tuple)
+
+            if (not is_timeout_crash_or_slow and num_printed < NUM_SLOW_TESTS_TO_LOG):
+                num_printed = num_printed + 1
+                unexpected_slow_tests.append(test_tuple)
+
+        self._print_debug("")
+        self._print_test_list_timing("%s slowest tests that are not marked as SLOW and did not timeout/crash:" %
+            NUM_SLOW_TESTS_TO_LOG, unexpected_slow_tests)
+        self._print_debug("")
+        self._print_test_list_timing("Tests marked as SLOW:", slow_tests)
+        self._print_debug("")
+        self._print_test_list_timing("Tests that timed out or crashed:", timeout_or_crash_tests)
+        self._print_debug("")
+
+    def _print_test_list_timing(self, title, test_list):
+        self._print_debug(title)
+        for test_tuple in test_list:
+            test_run_time = round(test_tuple.test_run_time, 1)
+            self._print_debug("  %s took %s seconds" % (test_tuple.test_name, test_run_time))
+
+    def _print_directory_timings(self, directory_test_timings):
+        timings = []
+        for directory in directory_test_timings:
+            num_tests, time_for_directory = directory_test_timings[directory]
+            timings.append((round(time_for_directory, 1), directory, num_tests))
+        timings.sort()
+
+        self._print_debug("Time to process slowest subdirectories:")
+        min_seconds_to_print = 10
+        for timing in timings:
+            if timing[0] > min_seconds_to_print:
+                self._print_debug("  %s took %s seconds to run %s tests." % (timing[1], timing[0], timing[2]))
+        self._print_debug("")
+
+    def _print_statistics_for_test_timings(self, title, timings):
+        self._print_debug(title)
+        timings.sort()
+
+        num_tests = len(timings)
+        if not num_tests:
+            return
+        percentile90 = timings[int(.9 * num_tests)]
+        percentile99 = timings[int(.99 * num_tests)]
+
+        if num_tests % 2 == 1:
+            median = timings[((num_tests - 1) / 2) - 1]
+        else:
+            lower = timings[num_tests / 2 - 1]
+            upper = timings[num_tests / 2]
+            median = (float(lower + upper)) / 2
+
+        mean = sum(timings) / num_tests
+
+        for timing in timings:
+            sum_of_deviations = math.pow(timing - mean, 2)
+
+        std_deviation = math.sqrt(sum_of_deviations / num_tests)
+        self._print_debug("  Median:          %6.3f" % median)
+        self._print_debug("  Mean:            %6.3f" % mean)
+        self._print_debug("  90th percentile: %6.3f" % percentile90)
+        self._print_debug("  99th percentile: %6.3f" % percentile99)
+        self._print_debug("  Standard dev:    %6.3f" % std_deviation)
+        self._print_debug("")
+
+    def _print_result_summary(self, result_summary):
+        if not self._options.debug_rwt_logging:
+            return
+
+        failed = result_summary.total_failures
+        total = result_summary.total - result_summary.expected_skips
+        passed = total - failed - result_summary.remaining
+        pct_passed = 0.0
+        if total > 0:
+            pct_passed = float(passed) * 100 / total
+
+        self._print_for_bot("=> Results: %d/%d tests passed (%.1f%%)" % (passed, total, pct_passed))
+        self._print_for_bot("")
+        self._print_result_summary_entry(result_summary, test_expectations.NOW, "Tests to be fixed")
+
+        self._print_for_bot("")
+        # FIXME: We should be skipping anything marked WONTFIX, so we shouldn't bother logging these stats.
+        self._print_result_summary_entry(result_summary, test_expectations.WONTFIX,
+            "Tests that will only be fixed if they crash (WONTFIX)")
+        self._print_for_bot("")
+
+    def _print_result_summary_entry(self, result_summary, timeline, heading):
+        total = len(result_summary.tests_by_timeline[timeline])
+        not_passing = (total -
+           len(result_summary.tests_by_expectation[test_expectations.PASS] &
+               result_summary.tests_by_timeline[timeline]))
+        self._print_for_bot("=> %s (%d):" % (heading, not_passing))
+
+        for result in TestExpectations.EXPECTATION_ORDER:
+            if result in (test_expectations.PASS, test_expectations.SKIP):
+                continue
+            results = (result_summary.tests_by_expectation[result] &
+                       result_summary.tests_by_timeline[timeline])
+            desc = TestExpectations.EXPECTATION_DESCRIPTIONS[result]
+            if not_passing and len(results):
+                pct = len(results) * 100.0 / not_passing
+                self._print_for_bot("  %5d %-24s (%4.1f%%)" % (len(results), desc, pct))
+
+    def _print_one_line_summary(self, total, expected, unexpected):
+        incomplete = total - expected - unexpected
+        incomplete_str = ''
+        if incomplete:
+            self._print_default("")
+            incomplete_str = " (%d didn't run)" % incomplete
+
+        if self._options.verbose or self._options.debug_rwt_logging or unexpected:
+            self.writeln("")
+
+        summary = ''
+        if unexpected == 0:
+            if expected == total:
+                if expected > 1:
+                    summary = "All %d tests ran as expected." % expected
+                else:
+                    summary = "The test ran as expected."
+            else:
+                summary = "%s ran as expected%s." % (grammar.pluralize('test', expected), incomplete_str)
+        else:
+            summary = "%s ran as expected, %d didn't%s:" % (grammar.pluralize('test', expected), unexpected, incomplete_str)
+
+        self._print_quiet(summary)
+        self._print_quiet("")
+
+    def _test_status_line(self, test_name, suffix):
+        format_string = '[%d/%d] %s%s'
+        status_line = format_string % (self.num_completed, self.num_tests, test_name, suffix)
+        if len(status_line) > self._meter.number_of_columns():
+            overflow_columns = len(status_line) - self._meter.number_of_columns()
+            ellipsis = '...'
+            if len(test_name) < overflow_columns + len(ellipsis) + 2:
+                # We don't have enough space even if we elide, just show the test filename.
+                fs = self._port.host.filesystem
+                test_name = fs.split(test_name)[1]
+            else:
+                new_length = len(test_name) - overflow_columns - len(ellipsis)
+                prefix = int(new_length / 2)
+                test_name = test_name[:prefix] + ellipsis + test_name[-(new_length - prefix):]
+        return format_string % (self.num_completed, self.num_tests, test_name, suffix)
+
+    def print_started_test(self, test_name):
+        self._running_tests.append(test_name)
+        if len(self._running_tests) > 1:
+            suffix = ' (+%d)' % (len(self._running_tests) - 1)
+        else:
+            suffix = ''
+        if self._options.verbose:
+            write = self._meter.write_update
+        else:
+            write = self._meter.write_throttled_update
+        write(self._test_status_line(test_name, suffix))
+
+    def print_finished_test(self, result, expected, exp_str, got_str):
+        self.num_completed += 1
+        test_name = result.test_name
+
+        result_message = self._result_message(result.type, result.failures, expected, self._options.verbose)
+
+        if self._options.details:
+            self._print_test_trace(result, exp_str, got_str)
+        elif (self._options.verbose and not self._options.debug_rwt_logging) or not expected:
+            self.writeln(self._test_status_line(test_name, result_message))
+        elif self.num_completed == self.num_tests:
+            self._meter.write_update('')
+        else:
+            if test_name == self._running_tests[0]:
+                self._completed_tests.insert(0, [test_name, result_message])
+            else:
+                self._completed_tests.append([test_name, result_message])
+
+            for test_name, result_message in self._completed_tests:
+                self._meter.write_throttled_update(self._test_status_line(test_name, result_message))
+            self._completed_tests = []
+        self._running_tests.remove(test_name)
+
+    def _result_message(self, result_type, failures, expected, verbose):
+        exp_string = ' unexpectedly' if not expected else ''
+        if result_type == test_expectations.PASS:
+            return ' passed%s' % exp_string
+        else:
+            return ' failed%s (%s)' % (exp_string, ', '.join(failure.message() for failure in failures))
+
+
+    def _print_test_trace(self, result, exp_str, got_str):
+        test_name = result.test_name
+        self._print_default(self._test_status_line(test_name, ''))
+
+        base = self._port.lookup_virtual_test_base(test_name)
+        if base:
+            args = ' '.join(self._port.lookup_virtual_test_args(test_name))
+            self._print_default(' base: %s' % base)
+            self._print_default(' args: %s' % args)
+
+        for extension in ('.txt', '.png', '.wav', '.webarchive'):
+            self._print_baseline(test_name, extension)
+
+        self._print_default('  exp: %s' % exp_str)
+        self._print_default('  got: %s' % got_str)
+        self._print_default(' took: %-.3f' % result.test_run_time)
+        self._print_default('')
+
+    def _print_baseline(self, test_name, extension):
+        baseline = self._port.expected_filename(test_name, extension)
+        if self._port._filesystem.exists(baseline):
+            relpath = self._port.relative_test_filename(baseline)
+        else:
+            relpath = '<none>'
+        self._print_default('  %s: %s' % (extension[1:], relpath))
+
+    def _print_unexpected_results(self, unexpected_results):
+        # Prints to the buildbot stream
+        passes = {}
+        flaky = {}
+        regressions = {}
+
+        def add_to_dict_of_lists(dict, key, value):
+            dict.setdefault(key, []).append(value)
+
+        def add_result(test, results, passes=passes, flaky=flaky, regressions=regressions):
+            actual = results['actual'].split(" ")
+            expected = results['expected'].split(" ")
+            if actual == ['PASS']:
+                if 'CRASH' in expected:
+                    add_to_dict_of_lists(passes, 'Expected to crash, but passed', test)
+                elif 'TIMEOUT' in expected:
+                    add_to_dict_of_lists(passes, 'Expected to timeout, but passed', test)
+                else:
+                    add_to_dict_of_lists(passes, 'Expected to fail, but passed', test)
+            elif len(actual) > 1:
+                # We group flaky tests by the first actual result we got.
+                add_to_dict_of_lists(flaky, actual[0], test)
+            else:
+                add_to_dict_of_lists(regressions, results['actual'], test)
+
+        resultsjsonparser.for_each_test(unexpected_results['tests'], add_result)
+
+        if len(passes) or len(flaky) or len(regressions):
+            self._print_for_bot("")
+        if len(passes):
+            for key, tests in passes.iteritems():
+                self._print_for_bot("%s: (%d)" % (key, len(tests)))
+                tests.sort()
+                for test in tests:
+                    self._print_for_bot("  %s" % test)
+                self._print_for_bot("")
+            self._print_for_bot("")
+
+        if len(flaky):
+            descriptions = TestExpectations.EXPECTATION_DESCRIPTIONS
+            for key, tests in flaky.iteritems():
+                result = TestExpectations.EXPECTATIONS[key.lower()]
+                self._print_for_bot("Unexpected flakiness: %s (%d)" % (descriptions[result], len(tests)))
+                tests.sort()
+
+                for test in tests:
+                    result = resultsjsonparser.result_for_test(unexpected_results['tests'], test)
+                    actual = result['actual'].split(" ")
+                    expected = result['expected'].split(" ")
+                    result = TestExpectations.EXPECTATIONS[key.lower()]
+                    # FIXME: clean this up once the old syntax is gone
+                    new_expectations_list = [TestExpectationParser._inverted_expectation_tokens[exp] for exp in list(set(actual) | set(expected))]
+                    self._print_for_bot("  %s [ %s ]" % (test, " ".join(new_expectations_list)))
+                self._print_for_bot("")
+            self._print_for_bot("")
+
+        if len(regressions):
+            descriptions = TestExpectations.EXPECTATION_DESCRIPTIONS
+            for key, tests in regressions.iteritems():
+                result = TestExpectations.EXPECTATIONS[key.lower()]
+                self._print_for_bot("Regressions: Unexpected %s (%d)" % (descriptions[result], len(tests)))
+                tests.sort()
+                for test in tests:
+                    self._print_for_bot("  %s [ %s ]" % (test, TestExpectationParser._inverted_expectation_tokens[key]))
+                self._print_for_bot("")
+
+        if len(unexpected_results['tests']) and self._options.debug_rwt_logging:
+            self._print_for_bot("%s" % ("-" * 78))
+
+    def _print_quiet(self, msg):
+        self.writeln(msg)
+
+    def _print_default(self, msg):
+        if not self._options.quiet:
+            self.writeln(msg)
+
+    def _print_debug(self, msg):
+        if self._options.debug_rwt_logging:
+            self.writeln(msg)
+
+    def _print_for_bot(self, msg):
+        self._buildbot_stream.write(msg + "\n")
+
+    def write_update(self, msg):
+        self._meter.write_update(msg)
+
+    def writeln(self, msg):
+        self._meter.writeln(msg)
+
+    def flush(self):
+        self._meter.flush()
diff --git a/Tools/Scripts/webkitpy/layout_tests/views/printing_unittest.py b/Tools/Scripts/webkitpy/layout_tests/views/printing_unittest.py
new file mode 100644
index 0000000..bc30092
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/views/printing_unittest.py
@@ -0,0 +1,293 @@
+#!/usr/bin/python
+# Copyright (C) 2010, 2012 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Unit tests for printing.py."""
+
+import optparse
+import StringIO
+import sys
+import time
+import unittest
+
+from webkitpy.common.host_mock import MockHost
+
+from webkitpy.common.system import logtesting
+from webkitpy.layout_tests import port
+from webkitpy.layout_tests.controllers import manager
+from webkitpy.layout_tests.models import result_summary
+from webkitpy.layout_tests.models import test_expectations
+from webkitpy.layout_tests.models import test_failures
+from webkitpy.layout_tests.models import test_results
+from webkitpy.layout_tests.views import printing
+
+
+def get_options(args):
+    print_options = printing.print_options()
+    option_parser = optparse.OptionParser(option_list=print_options)
+    return option_parser.parse_args(args)
+
+
+class TestUtilityFunctions(unittest.TestCase):
+    def test_print_options(self):
+        options, args = get_options([])
+        self.assertTrue(options is not None)
+
+
+class  Testprinter(unittest.TestCase):
+    def assertEmpty(self, stream):
+        self.assertFalse(stream.getvalue())
+
+    def assertNotEmpty(self, stream):
+        self.assertTrue(stream.getvalue())
+
+    def assertWritten(self, stream, contents):
+        self.assertEquals(stream.buflist, contents)
+
+    def reset(self, stream):
+        stream.buflist = []
+        stream.buf = ''
+
+    def get_printer(self, args=None):
+        args = args or []
+        printing_options = printing.print_options()
+        option_parser = optparse.OptionParser(option_list=printing_options)
+        options, args = option_parser.parse_args(args)
+        host = MockHost()
+        self._port = host.port_factory.get('test', options)
+        nproc = 2
+
+        regular_output = StringIO.StringIO()
+        buildbot_output = StringIO.StringIO()
+        printer = printing.Printer(self._port, options, regular_output, buildbot_output)
+        return printer, regular_output, buildbot_output
+
+    def get_result(self, test_name, result_type=test_expectations.PASS, run_time=0):
+        failures = []
+        if result_type == test_expectations.TIMEOUT:
+            failures = [test_failures.FailureTimeout()]
+        elif result_type == test_expectations.CRASH:
+            failures = [test_failures.FailureCrash()]
+        return test_results.TestResult(test_name, failures=failures, test_run_time=run_time)
+
+    def get_result_summary(self, test_names, expectations_str):
+        port.test_expectations = lambda: expectations_str
+        port.test_expectations_overrides = lambda: None
+        expectations = test_expectations.TestExpectations(self._port, test_names)
+
+        rs = result_summary.ResultSummary(expectations, test_names, 1, set())
+        return test_names, rs, expectations
+
+    def test_configure_and_cleanup(self):
+        # This test verifies that calling cleanup repeatedly and deleting
+        # the object is safe.
+        printer, err, out = self.get_printer()
+        printer.cleanup()
+        printer.cleanup()
+        printer = None
+
+    def test_print_config(self):
+        printer, err, out = self.get_printer()
+        # FIXME: it's lame that i have to set these options directly.
+        printer._options.pixel_tests = True
+        printer._options.new_baseline = True
+        printer._options.time_out_ms = 6000
+        printer._options.slow_time_out_ms = 12000
+        printer.print_config('/tmp')
+        self.assertTrue("Using port 'test-mac-leopard'" in err.getvalue())
+        self.assertTrue('Test configuration: <leopard, x86, release>' in err.getvalue())
+        self.assertTrue('Placing test results in /tmp' in err.getvalue())
+        self.assertTrue('Baseline search path: test-mac-leopard -> test-mac-snowleopard -> generic' in err.getvalue())
+        self.assertTrue('Using Release build' in err.getvalue())
+        self.assertTrue('Pixel tests enabled' in err.getvalue())
+        self.assertTrue('Command line:' in err.getvalue())
+        self.assertTrue('Regular timeout: ' in err.getvalue())
+
+        self.reset(err)
+        printer._options.quiet = True
+        printer.print_config('/tmp')
+        self.assertFalse('Baseline search path: test-mac-leopard -> test-mac-snowleopard -> generic' in err.getvalue())
+
+    def test_print_one_line_summary(self):
+        printer, err, out = self.get_printer()
+        printer._print_one_line_summary(1, 1, 0)
+        self.assertWritten(err, ["The test ran as expected.\n", "\n"])
+
+        printer, err, out = self.get_printer()
+        printer._print_one_line_summary(1, 1, 0)
+        self.assertWritten(err, ["The test ran as expected.\n", "\n"])
+
+        printer, err, out = self.get_printer()
+        printer._print_one_line_summary(2, 1, 1)
+        self.assertWritten(err, ["\n", "1 test ran as expected, 1 didn't:\n", "\n"])
+
+        printer, err, out = self.get_printer()
+        printer._print_one_line_summary(3, 2, 1)
+        self.assertWritten(err, ["\n", "2 tests ran as expected, 1 didn't:\n", "\n"])
+
+        printer, err, out = self.get_printer()
+        printer._print_one_line_summary(3, 2, 0)
+        self.assertWritten(err, ['\n', "2 tests ran as expected (1 didn't run).\n", '\n'])
+
+    def test_print_unexpected_results(self):
+        # This routine is the only one that prints stuff that the bots
+        # care about.
+        #
+        # FIXME: there's some weird layering going on here. It seems
+        # like we shouldn't be both using an expectations string and
+        # having to specify whether or not the result was expected.
+        # This whole set of tests should probably be rewritten.
+        #
+        # FIXME: Plus, the fact that we're having to call into
+        # run_webkit_tests is clearly a layering inversion.
+        def get_unexpected_results(expected, passing, flaky):
+            """Return an unexpected results summary matching the input description.
+
+            There are a lot of different combinations of test results that
+            can be tested; this routine produces various combinations based
+            on the values of the input flags.
+
+            Args
+                expected: whether the tests ran as expected
+                passing: whether the tests should all pass
+                flaky: whether the tests should be flaky (if False, they
+                    produce the same results on both runs; if True, they
+                    all pass on the second run).
+
+            """
+            test_is_slow = False
+            paths, rs, exp = self.get_result_summary(tests, expectations)
+            if expected:
+                rs.add(self.get_result('passes/text.html', test_expectations.PASS), expected, test_is_slow)
+                rs.add(self.get_result('failures/expected/timeout.html', test_expectations.TIMEOUT), expected, test_is_slow)
+                rs.add(self.get_result('failures/expected/crash.html', test_expectations.CRASH), expected, test_is_slow)
+            elif passing:
+                rs.add(self.get_result('passes/text.html'), expected, test_is_slow)
+                rs.add(self.get_result('failures/expected/timeout.html'), expected, test_is_slow)
+                rs.add(self.get_result('failures/expected/crash.html'), expected, test_is_slow)
+            else:
+                rs.add(self.get_result('passes/text.html', test_expectations.TIMEOUT), expected, test_is_slow)
+                rs.add(self.get_result('failures/expected/timeout.html', test_expectations.CRASH), expected, test_is_slow)
+                rs.add(self.get_result('failures/expected/crash.html', test_expectations.TIMEOUT), expected, test_is_slow)
+            retry = rs
+            if flaky:
+                paths, retry, exp = self.get_result_summary(tests, expectations)
+                retry.add(self.get_result('passes/text.html'), True, test_is_slow)
+                retry.add(self.get_result('failures/expected/timeout.html'), True, test_is_slow)
+                retry.add(self.get_result('failures/expected/crash.html'), True, test_is_slow)
+            unexpected_results = manager.summarize_results(self._port, exp, rs, retry, test_timings={}, only_unexpected=True, interrupted=False)
+            return unexpected_results
+
+        tests = ['passes/text.html', 'failures/expected/timeout.html', 'failures/expected/crash.html']
+        expectations = ''
+
+        printer, err, out = self.get_printer()
+
+        # test everything running as expected
+        ur = get_unexpected_results(expected=True, passing=False, flaky=False)
+        printer._print_unexpected_results(ur)
+        self.assertEmpty(err)
+        self.assertEmpty(out)
+
+        # test failures
+        printer, err, out = self.get_printer()
+        ur = get_unexpected_results(expected=False, passing=False, flaky=False)
+        printer._print_unexpected_results(ur)
+        self.assertEmpty(err)
+        self.assertNotEmpty(out)
+
+        # test unexpected flaky
+        printer, err, out = self.get_printer()
+        ur = get_unexpected_results(expected=False, passing=False, flaky=True)
+        printer._print_unexpected_results(ur)
+        self.assertEmpty(err)
+        self.assertNotEmpty(out)
+
+        printer, err, out = self.get_printer()
+        ur = get_unexpected_results(expected=False, passing=False, flaky=False)
+        printer._print_unexpected_results(ur)
+        self.assertEmpty(err)
+        self.assertNotEmpty(out)
+
+        expectations = """
+BUGX : failures/expected/crash.html = CRASH
+BUGX : failures/expected/timeout.html = TIMEOUT
+"""
+        printer, err, out = self.get_printer()
+        ur = get_unexpected_results(expected=False, passing=False, flaky=False)
+        printer._print_unexpected_results(ur)
+        self.assertEmpty(err)
+        self.assertNotEmpty(out)
+
+        printer, err, out = self.get_printer()
+        ur = get_unexpected_results(expected=False, passing=True, flaky=False)
+        printer._print_unexpected_results(ur)
+        self.assertEmpty(err)
+        self.assertNotEmpty(out)
+
+    def test_print_unexpected_results_buildbot(self):
+        # FIXME: Test that print_unexpected_results() produces the printer the
+        # buildbot is expecting.
+        pass
+
+    def test_test_status_line(self):
+        printer, _, _ = self.get_printer()
+        printer._meter.number_of_columns = lambda: 80
+        actual = printer._test_status_line('fast/dom/HTMLFormElement/associated-elements-after-index-assertion-fail1.html', ' passed')
+        self.assertEquals(80, len(actual))
+        self.assertEquals(actual, '[0/0] fast/dom/HTMLFormElement/associa...after-index-assertion-fail1.html passed')
+
+        printer._meter.number_of_columns = lambda: 89
+        actual = printer._test_status_line('fast/dom/HTMLFormElement/associated-elements-after-index-assertion-fail1.html', ' passed')
+        self.assertEquals(89, len(actual))
+        self.assertEquals(actual, '[0/0] fast/dom/HTMLFormElement/associated-...ents-after-index-assertion-fail1.html passed')
+
+        printer._meter.number_of_columns = lambda: sys.maxint
+        actual = printer._test_status_line('fast/dom/HTMLFormElement/associated-elements-after-index-assertion-fail1.html', ' passed')
+        self.assertEquals(90, len(actual))
+        self.assertEquals(actual, '[0/0] fast/dom/HTMLFormElement/associated-elements-after-index-assertion-fail1.html passed')
+
+        printer._meter.number_of_columns = lambda: 18
+        actual = printer._test_status_line('fast/dom/HTMLFormElement/associated-elements-after-index-assertion-fail1.html', ' passed')
+        self.assertEquals(18, len(actual))
+        self.assertEquals(actual, '[0/0] f...l passed')
+
+        printer._meter.number_of_columns = lambda: 10
+        actual = printer._test_status_line('fast/dom/HTMLFormElement/associated-elements-after-index-assertion-fail1.html', ' passed')
+        self.assertEquals(actual, '[0/0] associated-elements-after-index-assertion-fail1.html passed')
+
+    def test_details(self):
+        printer, err, _ = self.get_printer(['--details'])
+        result = self.get_result('passes/image.html')
+        printer.print_started_test('passes/image.html')
+        printer.print_finished_test(result, expected=False, exp_str='', got_str='')
+        self.assertNotEmpty(err)
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/Tools/Scripts/webkitpy/performance_tests/__init__.py b/Tools/Scripts/webkitpy/performance_tests/__init__.py
new file mode 100644
index 0000000..ef65bee
--- /dev/null
+++ b/Tools/Scripts/webkitpy/performance_tests/__init__.py
@@ -0,0 +1 @@
+# Required for Python to search this directory for module files
diff --git a/Tools/Scripts/webkitpy/performance_tests/perftest.py b/Tools/Scripts/webkitpy/performance_tests/perftest.py
new file mode 100644
index 0000000..9e2f87d
--- /dev/null
+++ b/Tools/Scripts/webkitpy/performance_tests/perftest.py
@@ -0,0 +1,392 @@
+#!/usr/bin/env python
+# Copyright (C) 2012 Google Inc. All rights reserved.
+# Copyright (C) 2012 Zoltan Horvath, Adobe Systems Incorporated. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+import errno
+import logging
+import math
+import re
+import os
+import signal
+import socket
+import subprocess
+import sys
+import time
+
+# Import for auto-install
+if sys.platform not in ('cygwin', 'win32'):
+    # FIXME: webpagereplay doesn't work on win32. See https://bugs.webkit.org/show_bug.cgi?id=88279.
+    import webkitpy.thirdparty.autoinstalled.webpagereplay.replay
+
+from webkitpy.layout_tests.controllers.test_result_writer import TestResultWriter
+from webkitpy.layout_tests.port.driver import DriverInput
+from webkitpy.layout_tests.port.driver import DriverOutput
+
+
+_log = logging.getLogger(__name__)
+
+
+class PerfTest(object):
+    def __init__(self, port, test_name, path_or_url):
+        self._port = port
+        self._test_name = test_name
+        self._path_or_url = path_or_url
+
+    def test_name(self):
+        return self._test_name
+
+    def path_or_url(self):
+        return self._path_or_url
+
+    def prepare(self, time_out_ms):
+        return True
+
+    def run(self, driver, time_out_ms):
+        output = self.run_single(driver, self.path_or_url(), time_out_ms)
+        if self.run_failed(output):
+            return None
+        return self.parse_output(output)
+
+    def run_single(self, driver, path_or_url, time_out_ms, should_run_pixel_test=False):
+        return driver.run_test(DriverInput(path_or_url, time_out_ms, image_hash=None, should_run_pixel_test=should_run_pixel_test), stop_when_done=False)
+
+    def run_failed(self, output):
+        if output.text == None or output.error:
+            pass
+        elif output.timeout:
+            _log.error('timeout: %s' % self.test_name())
+        elif output.crash:
+            _log.error('crash: %s' % self.test_name())
+        else:
+            return False
+
+        if output.error:
+            _log.error('error: %s\n%s' % (self.test_name(), output.error))
+
+        return True
+
+    _lines_to_ignore_in_parser_result = [
+        re.compile(r'^Running \d+ times$'),
+        re.compile(r'^Ignoring warm-up '),
+        re.compile(r'^Info:'),
+        re.compile(r'^\d+(.\d+)?(\s*(runs\/s|ms|fps))?$'),
+        # Following are for handle existing test like Dromaeo
+        re.compile(re.escape("""main frame - has 1 onunload handler(s)""")),
+        re.compile(re.escape("""frame "<!--framePath //<!--frame0-->-->" - has 1 onunload handler(s)""")),
+        re.compile(re.escape("""frame "<!--framePath //<!--frame0-->/<!--frame0-->-->" - has 1 onunload handler(s)""")),
+        # Following is for html5.html
+        re.compile(re.escape("""Blocked access to external URL http://www.whatwg.org/specs/web-apps/current-work/"""))]
+
+    def _should_ignore_line_in_parser_test_result(self, line):
+        if not line:
+            return True
+        for regex in self._lines_to_ignore_in_parser_result:
+            if regex.search(line):
+                return True
+        return False
+
+    _description_regex = re.compile(r'^Description: (?P<description>.*)$', re.IGNORECASE)
+    _result_classes = ['Time', 'JS Heap', 'Malloc']
+    _result_class_regex = re.compile(r'^(?P<resultclass>' + r'|'.join(_result_classes) + '):')
+    _statistics_keys = ['avg', 'median', 'stdev', 'min', 'max', 'unit', 'values']
+    _score_regex = re.compile(r'^(?P<key>' + r'|'.join(_statistics_keys) + r')\s+(?P<value>([0-9\.]+(,\s+)?)+)\s*(?P<unit>.*)')
+
+    def parse_output(self, output):
+        test_failed = False
+        results = {}
+        ordered_results_keys = []
+        test_name = re.sub(r'\.\w+$', '', self._test_name)
+        description_string = ""
+        result_class = ""
+        for line in re.split('\n', output.text):
+            description = self._description_regex.match(line)
+            if description:
+                description_string = description.group('description')
+                continue
+
+            result_class_match = self._result_class_regex.match(line)
+            if result_class_match:
+                result_class = result_class_match.group('resultclass')
+                continue
+
+            score = self._score_regex.match(line)
+            if score:
+                key = score.group('key')
+                if key == 'values':
+                    value = [float(number) for number in score.group('value').split(', ')]
+                else:
+                    value = float(score.group('value'))
+                unit = score.group('unit')
+                name = test_name
+                if result_class != 'Time':
+                    name += ':' + result_class.replace(' ', '')
+                if name not in ordered_results_keys:
+                    ordered_results_keys.append(name)
+                results.setdefault(name, {})
+                results[name]['unit'] = unit
+                results[name][key] = value
+                continue
+
+            if not self._should_ignore_line_in_parser_test_result(line):
+                test_failed = True
+                _log.error(line)
+
+        if test_failed:
+            return None
+
+        if set(self._statistics_keys) != set(results[test_name].keys() + ['values']):
+            # values is not provided by Dromaeo tests.
+            _log.error("The test didn't report all statistics.")
+            return None
+
+        for result_name in ordered_results_keys:
+            if result_name == test_name:
+                self.output_statistics(result_name, results[result_name], description_string)
+            else:
+                self.output_statistics(result_name, results[result_name])
+        return results
+
+    def output_statistics(self, test_name, results, description_string=None):
+        unit = results['unit']
+        if description_string:
+            _log.info('DESCRIPTION: %s' % description_string)
+        _log.info('RESULT %s= %s %s' % (test_name.replace(':', ': ').replace('/', ': '), results['avg'], unit))
+        _log.info(', '.join(['%s= %s %s' % (key, results[key], unit) for key in self._statistics_keys[1:5]]))
+
+
+class ChromiumStylePerfTest(PerfTest):
+    _chromium_style_result_regex = re.compile(r'^RESULT\s+(?P<name>[^=]+)\s*=\s+(?P<value>\d+(\.\d+)?)\s*(?P<unit>\w+)$')
+
+    def __init__(self, port, test_name, path_or_url):
+        super(ChromiumStylePerfTest, self).__init__(port, test_name, path_or_url)
+
+    def parse_output(self, output):
+        test_failed = False
+        results = {}
+        for line in re.split('\n', output.text):
+            resultLine = ChromiumStylePerfTest._chromium_style_result_regex.match(line)
+            if resultLine:
+                # FIXME: Store the unit
+                results[self.test_name() + ':' + resultLine.group('name').replace(' ', '')] = float(resultLine.group('value'))
+                _log.info(line)
+            elif not len(line) == 0:
+                test_failed = True
+                _log.error(line)
+        return results if results and not test_failed else None
+
+
+class PageLoadingPerfTest(PerfTest):
+    _FORCE_GC_FILE = 'resources/force-gc.html'
+
+    def __init__(self, port, test_name, path_or_url):
+        super(PageLoadingPerfTest, self).__init__(port, test_name, path_or_url)
+        self.force_gc_test = self._port.host.filesystem.join(self._port.perf_tests_dir(), self._FORCE_GC_FILE)
+
+    def run_single(self, driver, path_or_url, time_out_ms, should_run_pixel_test=False):
+        # Force GC to prevent pageload noise. See https://bugs.webkit.org/show_bug.cgi?id=98203
+        super(PageLoadingPerfTest, self).run_single(driver, self.force_gc_test, time_out_ms, False)
+        return super(PageLoadingPerfTest, self).run_single(driver, path_or_url, time_out_ms, should_run_pixel_test)
+
+    def calculate_statistics(self, values):
+        sorted_values = sorted(values)
+
+        # Compute the mean and variance using Knuth's online algorithm (has good numerical stability).
+        squareSum = 0
+        mean = 0
+        for i, time in enumerate(sorted_values):
+            delta = time - mean
+            sweep = i + 1.0
+            mean += delta / sweep
+            squareSum += delta * (time - mean)
+
+        middle = int(len(sorted_values) / 2)
+        result = {'avg': mean,
+            'min': sorted_values[0],
+            'max': sorted_values[-1],
+            'median': sorted_values[middle] if len(sorted_values) % 2 else (sorted_values[middle - 1] + sorted_values[middle]) / 2,
+            'stdev': math.sqrt(squareSum / (len(sorted_values) - 1))}
+        return result
+
+    def run(self, driver, time_out_ms):
+        results = {}
+        results.setdefault(self.test_name(), {'unit': 'ms', 'values': []})
+
+        for i in range(0, 20):
+            output = self.run_single(driver, self.path_or_url(), time_out_ms)
+            if not output or self.run_failed(output):
+                return None
+            if i == 0:
+                continue
+
+            results[self.test_name()]['values'].append(output.test_time * 1000)
+
+            if not output.measurements:
+                continue
+
+            for result_class, result in output.measurements.items():
+                name = self.test_name() + ':' + result_class
+                if not name in results:
+                    results.setdefault(name, {'values': []})
+                results[name]['values'].append(result)
+                if result_class == 'Malloc' or result_class == 'JSHeap':
+                    results[name]['unit'] = 'bytes'
+
+        for result_class in results.keys():
+            results[result_class].update(self.calculate_statistics(results[result_class]['values']))
+            self.output_statistics(result_class, results[result_class], '')
+
+        return results
+
+
+class ReplayServer(object):
+    def __init__(self, archive, record):
+        self._process = None
+
+        # FIXME: Should error if local proxy isn't set to forward requests to localhost:8080 and localhost:8443
+
+        replay_path = webkitpy.thirdparty.autoinstalled.webpagereplay.replay.__file__
+        args = ['python', replay_path, '--no-dns_forwarding', '--port', '8080', '--ssl_port', '8443', '--use_closest_match', '--log_level', 'warning']
+        if record:
+            args.append('--record')
+        args.append(archive)
+
+        self._process = subprocess.Popen(args)
+
+    def wait_until_ready(self):
+        for i in range(0, 3):
+            try:
+                connection = socket.create_connection(('localhost', '8080'), timeout=1)
+                connection.close()
+                return True
+            except socket.error:
+                time.sleep(1)
+                continue
+        return False
+
+    def stop(self):
+        if self._process:
+            self._process.send_signal(signal.SIGINT)
+            self._process.wait()
+        self._process = None
+
+    def __del__(self):
+        self.stop()
+
+
+class ReplayPerfTest(PageLoadingPerfTest):
+    def __init__(self, port, test_name, path_or_url):
+        super(ReplayPerfTest, self).__init__(port, test_name, path_or_url)
+
+    def _start_replay_server(self, archive, record):
+        try:
+            return ReplayServer(archive, record)
+        except OSError as error:
+            if error.errno == errno.ENOENT:
+                _log.error("Replay tests require web-page-replay.")
+            else:
+                raise error
+
+    def prepare(self, time_out_ms):
+        filesystem = self._port.host.filesystem
+        path_without_ext = filesystem.splitext(self.path_or_url())[0]
+
+        self._archive_path = filesystem.join(path_without_ext + '.wpr')
+        self._expected_image_path = filesystem.join(path_without_ext + '-expected.png')
+        self._url = filesystem.read_text_file(self.path_or_url()).split('\n')[0]
+
+        if filesystem.isfile(self._archive_path) and filesystem.isfile(self._expected_image_path):
+            _log.info("Replay ready for %s" % self._archive_path)
+            return True
+
+        _log.info("Preparing replay for %s" % self.test_name())
+
+        driver = self._port.create_driver(worker_number=1, no_timeout=True)
+        try:
+            output = self.run_single(driver, self._archive_path, time_out_ms, record=True)
+        finally:
+            driver.stop()
+
+        if not output or not filesystem.isfile(self._archive_path):
+            _log.error("Failed to prepare a replay for %s" % self.test_name())
+            return False
+
+        _log.info("Prepared replay for %s" % self.test_name())
+
+        return True
+
+    def run_single(self, driver, url, time_out_ms, record=False):
+        server = self._start_replay_server(self._archive_path, record)
+        if not server:
+            _log.error("Web page replay didn't start.")
+            return None
+
+        try:
+            _log.debug("Waiting for Web page replay to start.")
+            if not server.wait_until_ready():
+                _log.error("Web page replay didn't start.")
+                return None
+
+            _log.debug("Web page replay started. Loading the page.")
+            output = super(ReplayPerfTest, self).run_single(driver, self._url, time_out_ms, should_run_pixel_test=True)
+            if self.run_failed(output):
+                return None
+
+            if not output.image:
+                _log.error("Loading the page did not generate image results")
+                _log.error(output.text)
+                return None
+
+            filesystem = self._port.host.filesystem
+            dirname = filesystem.dirname(self._archive_path)
+            filename = filesystem.split(self._archive_path)[1]
+            writer = TestResultWriter(filesystem, self._port, dirname, filename)
+            if record:
+                writer.write_image_files(actual_image=None, expected_image=output.image)
+            else:
+                writer.write_image_files(actual_image=output.image, expected_image=None)
+
+            return output
+        finally:
+            server.stop()
+
+
+class PerfTestFactory(object):
+
+    _pattern_map = [
+        (re.compile(r'^inspector/'), ChromiumStylePerfTest),
+        (re.compile(r'(.+)\.replay$'), ReplayPerfTest),
+    ]
+
+    @classmethod
+    def create_perf_test(cls, port, test_name, path):
+        for (pattern, test_class) in cls._pattern_map:
+            if pattern.match(test_name):
+                return test_class(port, test_name, path)
+        return PerfTest(port, test_name, path)
diff --git a/Tools/Scripts/webkitpy/performance_tests/perftest_unittest.py b/Tools/Scripts/webkitpy/performance_tests/perftest_unittest.py
new file mode 100755
index 0000000..259fc78
--- /dev/null
+++ b/Tools/Scripts/webkitpy/performance_tests/perftest_unittest.py
@@ -0,0 +1,367 @@
+#!/usr/bin/python
+# Copyright (C) 2012 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import StringIO
+import math
+import unittest
+
+from webkitpy.common.host_mock import MockHost
+from webkitpy.common.system.outputcapture import OutputCapture
+from webkitpy.layout_tests.port.driver import DriverOutput
+from webkitpy.layout_tests.port.test import TestDriver
+from webkitpy.layout_tests.port.test import TestPort
+from webkitpy.performance_tests.perftest import ChromiumStylePerfTest
+from webkitpy.performance_tests.perftest import PageLoadingPerfTest
+from webkitpy.performance_tests.perftest import PerfTest
+from webkitpy.performance_tests.perftest import PerfTestFactory
+from webkitpy.performance_tests.perftest import ReplayPerfTest
+
+
+class MockPort(TestPort):
+    def __init__(self, custom_run_test=None):
+        super(MockPort, self).__init__(host=MockHost(), custom_run_test=custom_run_test)
+
+class MainTest(unittest.TestCase):
+    def test_parse_output(self):
+        output = DriverOutput('\n'.join([
+            'Running 20 times',
+            'Ignoring warm-up run (1115)',
+            '',
+            'Time:',
+            'values 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19 ms',
+            'avg 1100 ms',
+            'median 1101 ms',
+            'stdev 11 ms',
+            'min 1080 ms',
+            'max 1120 ms']), image=None, image_hash=None, audio=None)
+        output_capture = OutputCapture()
+        output_capture.capture_output()
+        try:
+            test = PerfTest(None, 'some-test', '/path/some-dir/some-test')
+            self.assertEqual(test.parse_output(output),
+                {'some-test': {'avg': 1100.0, 'median': 1101.0, 'min': 1080.0, 'max': 1120.0, 'stdev': 11.0, 'unit': 'ms',
+                    'values': [i for i in range(1, 20)]}})
+        finally:
+            pass
+            actual_stdout, actual_stderr, actual_logs = output_capture.restore_output()
+        self.assertEqual(actual_stdout, '')
+        self.assertEqual(actual_stderr, '')
+        self.assertEqual(actual_logs, 'RESULT some-test= 1100.0 ms\nmedian= 1101.0 ms, stdev= 11.0 ms, min= 1080.0 ms, max= 1120.0 ms\n')
+
+    def test_parse_output_with_failing_line(self):
+        output = DriverOutput('\n'.join([
+            'Running 20 times',
+            'Ignoring warm-up run (1115)',
+            '',
+            'some-unrecognizable-line',
+            '',
+            'Time:'
+            'values 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19 ms',
+            'avg 1100 ms',
+            'median 1101 ms',
+            'stdev 11 ms',
+            'min 1080 ms',
+            'max 1120 ms']), image=None, image_hash=None, audio=None)
+        output_capture = OutputCapture()
+        output_capture.capture_output()
+        try:
+            test = PerfTest(None, 'some-test', '/path/some-dir/some-test')
+            self.assertEqual(test.parse_output(output), None)
+        finally:
+            actual_stdout, actual_stderr, actual_logs = output_capture.restore_output()
+        self.assertEqual(actual_stdout, '')
+        self.assertEqual(actual_stderr, '')
+        self.assertEqual(actual_logs, 'some-unrecognizable-line\n')
+
+
+class TestPageLoadingPerfTest(unittest.TestCase):
+    class MockDriver(object):
+        def __init__(self, values, test, measurements=None):
+            self._values = values
+            self._index = 0
+            self._test = test
+            self._measurements = measurements
+
+        def run_test(self, input, stop_when_done):
+            if input.test_name == self._test.force_gc_test:
+                return
+            value = self._values[self._index]
+            self._index += 1
+            if isinstance(value, str):
+                return DriverOutput('some output', image=None, image_hash=None, audio=None, error=value)
+            else:
+                return DriverOutput('some output', image=None, image_hash=None, audio=None, test_time=self._values[self._index - 1], measurements=self._measurements)
+
+    def test_run(self):
+        port = MockPort()
+        test = PageLoadingPerfTest(port, 'some-test', '/path/some-dir/some-test')
+        driver = TestPageLoadingPerfTest.MockDriver(range(1, 21), test)
+        output_capture = OutputCapture()
+        output_capture.capture_output()
+        try:
+            self.assertEqual(test.run(driver, None),
+                {'some-test': {'max': 20000, 'avg': 11000.0, 'median': 11000, 'stdev': 5627.314338711378, 'min': 2000, 'unit': 'ms',
+                    'values': [i * 1000 for i in range(2, 21)]}})
+        finally:
+            actual_stdout, actual_stderr, actual_logs = output_capture.restore_output()
+        self.assertEqual(actual_stdout, '')
+        self.assertEqual(actual_stderr, '')
+        self.assertEqual(actual_logs, 'RESULT some-test= 11000.0 ms\nmedian= 11000 ms, stdev= 5627.31433871 ms, min= 2000 ms, max= 20000 ms\n')
+
+    def test_run_with_memory_output(self):
+        port = MockPort()
+        test = PageLoadingPerfTest(port, 'some-test', '/path/some-dir/some-test')
+        memory_results = {'Malloc': 10, 'JSHeap': 5}
+        self.maxDiff = None
+        driver = TestPageLoadingPerfTest.MockDriver(range(1, 21), test, memory_results)
+        output_capture = OutputCapture()
+        output_capture.capture_output()
+        try:
+            self.assertEqual(test.run(driver, None),
+                {'some-test': {'max': 20000, 'avg': 11000.0, 'median': 11000, 'stdev': 5627.314338711378, 'min': 2000, 'unit': 'ms',
+                    'values': [i * 1000 for i in range(2, 21)]},
+                 'some-test:Malloc': {'max': 10, 'avg': 10.0, 'median': 10, 'min': 10, 'stdev': 0.0, 'unit': 'bytes',
+                    'values': [10] * 19},
+                 'some-test:JSHeap': {'max': 5, 'avg': 5.0, 'median': 5, 'min': 5, 'stdev': 0.0, 'unit': 'bytes',
+                    'values': [5] * 19}})
+        finally:
+            actual_stdout, actual_stderr, actual_logs = output_capture.restore_output()
+        self.assertEqual(actual_stdout, '')
+        self.assertEqual(actual_stderr, '')
+        self.assertEqual(actual_logs, 'RESULT some-test= 11000.0 ms\nmedian= 11000 ms, stdev= 5627.31433871 ms, min= 2000 ms, max= 20000 ms\n'
+            + 'RESULT some-test: Malloc= 10.0 bytes\nmedian= 10 bytes, stdev= 0.0 bytes, min= 10 bytes, max= 10 bytes\n'
+            + 'RESULT some-test: JSHeap= 5.0 bytes\nmedian= 5 bytes, stdev= 0.0 bytes, min= 5 bytes, max= 5 bytes\n')
+
+    def test_run_with_bad_output(self):
+        output_capture = OutputCapture()
+        output_capture.capture_output()
+        try:
+            port = MockPort()
+            test = PageLoadingPerfTest(port, 'some-test', '/path/some-dir/some-test')
+            driver = TestPageLoadingPerfTest.MockDriver([1, 2, 3, 4, 5, 6, 7, 'some error', 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20], test)
+            self.assertEqual(test.run(driver, None), None)
+        finally:
+            actual_stdout, actual_stderr, actual_logs = output_capture.restore_output()
+        self.assertEqual(actual_stdout, '')
+        self.assertEqual(actual_stderr, '')
+        self.assertEqual(actual_logs, 'error: some-test\nsome error\n')
+
+
+class TestReplayPerfTest(unittest.TestCase):
+
+    class ReplayTestPort(MockPort):
+        def __init__(self, custom_run_test=None):
+
+            class ReplayTestDriver(TestDriver):
+                def run_test(self, text_input, stop_when_done):
+                    return custom_run_test(text_input, stop_when_done) if custom_run_test else None
+
+            self._custom_driver_class = ReplayTestDriver
+            super(self.__class__, self).__init__()
+
+        def _driver_class(self):
+            return self._custom_driver_class
+
+    class MockReplayServer(object):
+        def __init__(self, wait_until_ready=True):
+            self.wait_until_ready = lambda: wait_until_ready
+
+        def stop(self):
+            pass
+
+    def _add_file(self, port, dirname, filename, content=True):
+        port.host.filesystem.maybe_make_directory(dirname)
+        port.host.filesystem.write_binary_file(port.host.filesystem.join(dirname, filename), content)
+
+    def _setup_test(self, run_test=None):
+        test_port = self.ReplayTestPort(run_test)
+        self._add_file(test_port, '/path/some-dir', 'some-test.replay', 'http://some-test/')
+        test = ReplayPerfTest(test_port, 'some-test.replay', '/path/some-dir/some-test.replay')
+        test._start_replay_server = lambda archive, record: self.__class__.MockReplayServer()
+        return test, test_port
+
+    def test_run_single(self):
+        output_capture = OutputCapture()
+        output_capture.capture_output()
+
+        loaded_pages = []
+
+        def run_test(test_input, stop_when_done):
+            if test_input.test_name == test.force_gc_test:
+                loaded_pages.append(test_input)
+                return
+            if test_input.test_name != "about:blank":
+                self.assertEqual(test_input.test_name, 'http://some-test/')
+            loaded_pages.append(test_input)
+            self._add_file(port, '/path/some-dir', 'some-test.wpr', 'wpr content')
+            return DriverOutput('actual text', 'actual image', 'actual checksum',
+                audio=None, crash=False, timeout=False, error=False)
+
+        test, port = self._setup_test(run_test)
+        test._archive_path = '/path/some-dir/some-test.wpr'
+        test._url = 'http://some-test/'
+
+        try:
+            driver = port.create_driver(worker_number=1, no_timeout=True)
+            self.assertTrue(test.run_single(driver, '/path/some-dir/some-test.replay', time_out_ms=100))
+        finally:
+            actual_stdout, actual_stderr, actual_logs = output_capture.restore_output()
+
+        self.assertEqual(len(loaded_pages), 2)
+        self.assertEqual(loaded_pages[0].test_name, test.force_gc_test)
+        self.assertEqual(loaded_pages[1].test_name, 'http://some-test/')
+        self.assertEqual(actual_stdout, '')
+        self.assertEqual(actual_stderr, '')
+        self.assertEqual(actual_logs, '')
+        self.assertEqual(port.host.filesystem.read_binary_file('/path/some-dir/some-test-actual.png'), 'actual image')
+
+    def test_run_single_fails_without_webpagereplay(self):
+        output_capture = OutputCapture()
+        output_capture.capture_output()
+
+        test, port = self._setup_test()
+        test._start_replay_server = lambda archive, record: None
+        test._archive_path = '/path/some-dir.wpr'
+        test._url = 'http://some-test/'
+
+        try:
+            driver = port.create_driver(worker_number=1, no_timeout=True)
+            self.assertEqual(test.run_single(driver, '/path/some-dir/some-test.replay', time_out_ms=100), None)
+        finally:
+            actual_stdout, actual_stderr, actual_logs = output_capture.restore_output()
+        self.assertEqual(actual_stdout, '')
+        self.assertEqual(actual_stderr, '')
+        self.assertEqual(actual_logs, "Web page replay didn't start.\n")
+
+    def test_prepare_fails_when_wait_until_ready_fails(self):
+        output_capture = OutputCapture()
+        output_capture.capture_output()
+
+        test, port = self._setup_test()
+        test._start_replay_server = lambda archive, record: self.__class__.MockReplayServer(wait_until_ready=False)
+        test._archive_path = '/path/some-dir.wpr'
+        test._url = 'http://some-test/'
+
+        try:
+            driver = port.create_driver(worker_number=1, no_timeout=True)
+            self.assertEqual(test.run_single(driver, '/path/some-dir/some-test.replay', time_out_ms=100), None)
+        finally:
+            actual_stdout, actual_stderr, actual_logs = output_capture.restore_output()
+
+        self.assertEqual(actual_stdout, '')
+        self.assertEqual(actual_stderr, '')
+        self.assertEqual(actual_logs, "Web page replay didn't start.\n")
+
+    def test_run_single_fails_when_output_has_error(self):
+        output_capture = OutputCapture()
+        output_capture.capture_output()
+
+        loaded_pages = []
+
+        def run_test(test_input, stop_when_done):
+            loaded_pages.append(test_input)
+            self._add_file(port, '/path/some-dir', 'some-test.wpr', 'wpr content')
+            return DriverOutput('actual text', 'actual image', 'actual checksum',
+                audio=None, crash=False, timeout=False, error='some error')
+
+        test, port = self._setup_test(run_test)
+        test._archive_path = '/path/some-dir.wpr'
+        test._url = 'http://some-test/'
+
+        try:
+            driver = port.create_driver(worker_number=1, no_timeout=True)
+            self.assertEqual(test.run_single(driver, '/path/some-dir/some-test.replay', time_out_ms=100), None)
+        finally:
+            actual_stdout, actual_stderr, actual_logs = output_capture.restore_output()
+
+        self.assertEqual(len(loaded_pages), 2)
+        self.assertEqual(loaded_pages[0].test_name, test.force_gc_test)
+        self.assertEqual(loaded_pages[1].test_name, 'http://some-test/')
+        self.assertEqual(actual_stdout, '')
+        self.assertEqual(actual_stderr, '')
+        self.assertEqual(actual_logs, 'error: some-test.replay\nsome error\n')
+
+    def test_prepare(self):
+        output_capture = OutputCapture()
+        output_capture.capture_output()
+
+        def run_test(test_input, stop_when_done):
+            self._add_file(port, '/path/some-dir', 'some-test.wpr', 'wpr content')
+            return DriverOutput('actual text', 'actual image', 'actual checksum',
+                audio=None, crash=False, timeout=False, error=False)
+
+        test, port = self._setup_test(run_test)
+
+        try:
+            self.assertEqual(test.prepare(time_out_ms=100), True)
+        finally:
+            actual_stdout, actual_stderr, actual_logs = output_capture.restore_output()
+
+        self.assertEqual(actual_stdout, '')
+        self.assertEqual(actual_stderr, '')
+        self.assertEqual(actual_logs, 'Preparing replay for some-test.replay\nPrepared replay for some-test.replay\n')
+        self.assertEqual(port.host.filesystem.read_binary_file('/path/some-dir/some-test-expected.png'), 'actual image')
+
+    def test_prepare_calls_run_single(self):
+        output_capture = OutputCapture()
+        output_capture.capture_output()
+        called = [False]
+
+        def run_single(driver, url, time_out_ms, record):
+            self.assertTrue(record)
+            self.assertEqual(url, '/path/some-dir/some-test.wpr')
+            called[0] = True
+            return False
+
+        test, port = self._setup_test()
+        test.run_single = run_single
+
+        try:
+            self.assertEqual(test.prepare(time_out_ms=100), False)
+        finally:
+            actual_stdout, actual_stderr, actual_logs = output_capture.restore_output()
+        self.assertTrue(called[0])
+        self.assertEqual(test._archive_path, '/path/some-dir/some-test.wpr')
+        self.assertEqual(test._url, 'http://some-test/')
+        self.assertEqual(actual_stdout, '')
+        self.assertEqual(actual_stderr, '')
+        self.assertEqual(actual_logs, "Preparing replay for some-test.replay\nFailed to prepare a replay for some-test.replay\n")
+
+class TestPerfTestFactory(unittest.TestCase):
+    def test_regular_test(self):
+        test = PerfTestFactory.create_perf_test(MockPort(), 'some-dir/some-test', '/path/some-dir/some-test')
+        self.assertEqual(test.__class__, PerfTest)
+
+    def test_inspector_test(self):
+        test = PerfTestFactory.create_perf_test(MockPort(), 'inspector/some-test', '/path/inspector/some-test')
+        self.assertEqual(test.__class__, ChromiumStylePerfTest)
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/Tools/Scripts/webkitpy/performance_tests/perftestsrunner.py b/Tools/Scripts/webkitpy/performance_tests/perftestsrunner.py
new file mode 100755
index 0000000..42e0d96
--- /dev/null
+++ b/Tools/Scripts/webkitpy/performance_tests/perftestsrunner.py
@@ -0,0 +1,324 @@
+#!/usr/bin/env python
+# Copyright (C) 2012 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Run Inspector's perf tests in perf mode."""
+
+import os
+import json
+import logging
+import optparse
+import time
+
+from webkitpy.common import find_files
+from webkitpy.common.checkout.scm.detection import SCMDetector
+from webkitpy.common.host import Host
+from webkitpy.common.net.file_uploader import FileUploader
+from webkitpy.performance_tests.perftest import PerfTestFactory
+
+
+_log = logging.getLogger(__name__)
+
+
+class PerfTestsRunner(object):
+    _default_branch = 'webkit-trunk'
+    EXIT_CODE_BAD_BUILD = -1
+    EXIT_CODE_BAD_SOURCE_JSON = -2
+    EXIT_CODE_BAD_MERGE = -3
+    EXIT_CODE_FAILED_UPLOADING = -4
+    EXIT_CODE_BAD_PREPARATION = -5
+
+    _DEFAULT_JSON_FILENAME = 'PerformanceTestsResults.json'
+
+    def __init__(self, args=None, port=None):
+        self._options, self._args = PerfTestsRunner._parse_args(args)
+        if port:
+            self._port = port
+            self._host = self._port.host
+        else:
+            self._host = Host()
+            self._port = self._host.port_factory.get(self._options.platform, self._options)
+        self._host.initialize_scm()
+        self._webkit_base_dir_len = len(self._port.webkit_base())
+        self._base_path = self._port.perf_tests_dir()
+        self._results = {}
+        self._timestamp = time.time()
+
+    @staticmethod
+    def _parse_args(args=None):
+        def _expand_path(option, opt_str, value, parser):
+            path = os.path.expandvars(os.path.expanduser(value))
+            setattr(parser.values, option.dest, path)
+        perf_option_list = [
+            optparse.make_option('--debug', action='store_const', const='Debug', dest="configuration",
+                help='Set the configuration to Debug'),
+            optparse.make_option('--release', action='store_const', const='Release', dest="configuration",
+                help='Set the configuration to Release'),
+            optparse.make_option("--platform",
+                help="Specify port/platform being tested (i.e. chromium-mac)"),
+            optparse.make_option("--chromium",
+                action="store_const", const='chromium', dest='platform', help='Alias for --platform=chromium'),
+            optparse.make_option("--builder-name",
+                help=("The name of the builder shown on the waterfall running this script e.g. google-mac-2.")),
+            optparse.make_option("--build-number",
+                help=("The build number of the builder running this script.")),
+            optparse.make_option("--build", dest="build", action="store_true", default=True,
+                help="Check to ensure the DumpRenderTree build is up-to-date (default)."),
+            optparse.make_option("--no-build", dest="build", action="store_false",
+                help="Don't check to see if the DumpRenderTree build is up-to-date."),
+            optparse.make_option("--build-directory",
+                help="Path to the directory under which build files are kept (should not include configuration)"),
+            optparse.make_option("--time-out-ms", default=600 * 1000,
+                help="Set the timeout for each test"),
+            optparse.make_option("--pause-before-testing", dest="pause_before_testing", action="store_true", default=False,
+                help="Pause before running the tests to let user attach a performance monitor."),
+            optparse.make_option("--no-results", action="store_false", dest="generate_results", default=True,
+                help="Do no generate results JSON and results page."),
+            optparse.make_option("--output-json-path", action='callback', callback=_expand_path, type="str",
+                help="Path to generate a JSON file at; may contain previous results if it already exists."),
+            optparse.make_option("--reset-results", action="store_true",
+                help="Clears the content in the generated JSON file before adding the results."),
+            optparse.make_option("--slave-config-json-path", action='callback', callback=_expand_path, type="str",
+                help="Only used on bots. Path to a slave configuration file."),
+            optparse.make_option("--description",
+                help="Add a description to the output JSON file if one is generated"),
+            optparse.make_option("--no-show-results", action="store_false", default=True, dest="show_results",
+                help="Don't launch a browser with results after the tests are done"),
+            optparse.make_option("--test-results-server",
+                help="Upload the generated JSON file to the specified server when --output-json-path is present."),
+            optparse.make_option("--webkit-test-runner", "-2", action="store_true",
+                help="Use WebKitTestRunner rather than DumpRenderTree."),
+            optparse.make_option("--replay", dest="replay", action="store_true", default=False,
+                help="Run replay tests."),
+            optparse.make_option("--force", dest="skipped", action="store_true", default=False,
+                help="Run all tests, including the ones in the Skipped list."),
+            ]
+        return optparse.OptionParser(option_list=(perf_option_list)).parse_args(args)
+
+    def _collect_tests(self):
+        """Return the list of tests found."""
+
+        test_extensions = ['.html', '.svg']
+        if self._options.replay:
+            test_extensions.append('.replay')
+
+        def _is_test_file(filesystem, dirname, filename):
+            return filesystem.splitext(filename)[1] in test_extensions
+
+        filesystem = self._host.filesystem
+
+        paths = []
+        for arg in self._args:
+            paths.append(arg)
+            relpath = filesystem.relpath(arg, self._base_path)
+            if relpath:
+                paths.append(relpath)
+
+        skipped_directories = set(['.svn', 'resources'])
+        test_files = find_files.find(filesystem, self._base_path, paths, skipped_directories, _is_test_file)
+        tests = []
+        for path in test_files:
+            relative_path = self._port.relative_perf_test_filename(path).replace('\\', '/')
+            if self._port.skips_perf_test(relative_path) and not self._options.skipped:
+                continue
+            test = PerfTestFactory.create_perf_test(self._port, relative_path, path)
+            tests.append(test)
+
+        return tests
+
+    def run(self):
+        if not self._port.check_build(needs_http=False):
+            _log.error("Build not up to date for %s" % self._port._path_to_driver())
+            return self.EXIT_CODE_BAD_BUILD
+
+        tests = self._collect_tests()
+        _log.info("Running %d tests" % len(tests))
+
+        for test in tests:
+            if not test.prepare(self._options.time_out_ms):
+                return self.EXIT_CODE_BAD_PREPARATION
+
+        unexpected = self._run_tests_set(sorted(list(tests), key=lambda test: test.test_name()), self._port)
+        if self._options.generate_results:
+            exit_code = self._generate_and_show_results()
+            if exit_code:
+                return exit_code
+
+        return unexpected
+
+    def _output_json_path(self):
+        output_json_path = self._options.output_json_path
+        if output_json_path:
+            return output_json_path
+        return self._host.filesystem.join(self._port.perf_results_directory(), self._DEFAULT_JSON_FILENAME)
+
+    def _generate_and_show_results(self):
+        options = self._options
+        output_json_path = self._output_json_path()
+        output = self._generate_results_dict(self._timestamp, options.description, options.platform, options.builder_name, options.build_number)
+
+        if options.slave_config_json_path:
+            output = self._merge_slave_config_json(options.slave_config_json_path, output)
+            if not output:
+                return self.EXIT_CODE_BAD_SOURCE_JSON
+
+        output = self._merge_outputs_if_needed(output_json_path, output)
+        if not output:
+            return self.EXIT_CODE_BAD_MERGE
+
+        results_page_path = self._host.filesystem.splitext(output_json_path)[0] + '.html'
+        self._generate_output_files(output_json_path, results_page_path, output)
+
+        if options.test_results_server:
+            if not self._upload_json(options.test_results_server, output_json_path):
+                return self.EXIT_CODE_FAILED_UPLOADING
+
+        if options.show_results:
+            self._port.show_results_html_file(results_page_path)
+
+    def _generate_results_dict(self, timestamp, description, platform, builder_name, build_number):
+        contents = {'results': self._results}
+        if description:
+            contents['description'] = description
+        for (name, path) in self._port.repository_paths():
+            scm = SCMDetector(self._host.filesystem, self._host.executive).detect_scm_system(path) or self._host.scm()
+            contents[name + '-revision'] = scm.svn_revision(path)
+
+        # FIXME: Add --branch or auto-detect the branch we're in
+        for key, value in {'timestamp': int(timestamp), 'branch': self._default_branch, 'platform': platform,
+            'builder-name': builder_name, 'build-number': int(build_number) if build_number else None}.items():
+            if value:
+                contents[key] = value
+
+        return contents
+
+    def _merge_slave_config_json(self, slave_config_json_path, output):
+        if not self._host.filesystem.isfile(slave_config_json_path):
+            _log.error("Missing slave configuration JSON file: %s" % slave_config_json_path)
+            return None
+
+        try:
+            slave_config_json = self._host.filesystem.open_text_file_for_reading(slave_config_json_path)
+            slave_config = json.load(slave_config_json)
+            return dict(slave_config.items() + output.items())
+        except Exception, error:
+            _log.error("Failed to merge slave configuration JSON file %s: %s" % (slave_config_json_path, error))
+        return None
+
+    def _merge_outputs_if_needed(self, output_json_path, output):
+        if self._options.reset_results or not self._host.filesystem.isfile(output_json_path):
+            return [output]
+        try:
+            existing_outputs = json.loads(self._host.filesystem.read_text_file(output_json_path))
+            return existing_outputs + [output]
+        except Exception, error:
+            _log.error("Failed to merge output JSON file %s: %s" % (output_json_path, error))
+        return None
+
+    def _generate_output_files(self, output_json_path, results_page_path, output):
+        filesystem = self._host.filesystem
+
+        json_output = json.dumps(output)
+        filesystem.write_text_file(output_json_path, json_output)
+
+        if results_page_path:
+            template_path = filesystem.join(self._port.perf_tests_dir(), 'resources/results-template.html')
+            template = filesystem.read_text_file(template_path)
+
+            absolute_path_to_trunk = filesystem.dirname(self._port.perf_tests_dir())
+            results_page = template.replace('%AbsolutePathToWebKitTrunk%', absolute_path_to_trunk)
+            results_page = results_page.replace('%PeformanceTestsResultsJSON%', json_output)
+
+            filesystem.write_text_file(results_page_path, results_page)
+
+    def _upload_json(self, test_results_server, json_path, file_uploader=FileUploader):
+        uploader = file_uploader("https://%s/api/test/report" % test_results_server, 120)
+        try:
+            response = uploader.upload_single_text_file(self._host.filesystem, 'application/json', json_path)
+        except Exception, error:
+            _log.error("Failed to upload JSON file in 120s: %s" % error)
+            return False
+
+        response_body = [line.strip('\n') for line in response]
+        if response_body != ['OK']:
+            _log.error("Uploaded JSON but got a bad response:")
+            for line in response_body:
+                _log.error(line)
+            return False
+
+        _log.info("JSON file uploaded.")
+        return True
+
+    def _print_status(self, tests, expected, unexpected):
+        if len(tests) == expected + unexpected:
+            status = "Ran %d tests" % len(tests)
+        else:
+            status = "Running %d of %d tests" % (expected + unexpected + 1, len(tests))
+        if unexpected:
+            status += " (%d didn't run)" % unexpected
+        _log.info(status)
+
+    def _run_tests_set(self, tests, port):
+        result_count = len(tests)
+        expected = 0
+        unexpected = 0
+        driver = None
+
+        for test in tests:
+            driver = port.create_driver(worker_number=1, no_timeout=True)
+
+            if self._options.pause_before_testing:
+                driver.start()
+                if not self._host.user.confirm("Ready to run test?"):
+                    driver.stop()
+                    return unexpected
+
+            _log.info('Running %s (%d of %d)' % (test.test_name(), expected + unexpected + 1, len(tests)))
+            if self._run_single_test(test, driver):
+                expected = expected + 1
+            else:
+                unexpected = unexpected + 1
+
+            _log.info('')
+
+            driver.stop()
+
+        return unexpected
+
+    def _run_single_test(self, test, driver):
+        start_time = time.time()
+
+        new_results = test.run(driver, self._options.time_out_ms)
+        if new_results:
+            self._results.update(new_results)
+        else:
+            _log.error('FAILED')
+
+        _log.info("Finished: %f s" % (time.time() - start_time))
+
+        return new_results != None
diff --git a/Tools/Scripts/webkitpy/performance_tests/perftestsrunner_unittest.py b/Tools/Scripts/webkitpy/performance_tests/perftestsrunner_unittest.py
new file mode 100755
index 0000000..9c9295f
--- /dev/null
+++ b/Tools/Scripts/webkitpy/performance_tests/perftestsrunner_unittest.py
@@ -0,0 +1,642 @@
+#!/usr/bin/python
+# Copyright (C) 2012 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Unit tests for run_perf_tests."""
+
+import StringIO
+import json
+import re
+import unittest
+
+from webkitpy.common.host_mock import MockHost
+from webkitpy.common.system.filesystem_mock import MockFileSystem
+from webkitpy.common.system.outputcapture import OutputCapture
+from webkitpy.layout_tests.port.driver import DriverInput, DriverOutput
+from webkitpy.layout_tests.port.test import TestPort
+from webkitpy.layout_tests.views import printing
+from webkitpy.performance_tests.perftest import ChromiumStylePerfTest
+from webkitpy.performance_tests.perftest import PerfTest
+from webkitpy.performance_tests.perftestsrunner import PerfTestsRunner
+
+
+class MainTest(unittest.TestCase):
+    def assertWritten(self, stream, contents):
+        self.assertEquals(stream.buflist, contents)
+
+    def normalizeFinishedTime(self, log):
+        return re.sub(r'Finished: [0-9\.]+ s', 'Finished: 0.1 s', log)
+
+    class TestDriver:
+        def run_test(self, driver_input, stop_when_done):
+            text = ''
+            timeout = False
+            crash = False
+            if driver_input.test_name.endswith('pass.html'):
+                text = 'RESULT group_name: test_name= 42 ms'
+            elif driver_input.test_name.endswith('timeout.html'):
+                timeout = True
+            elif driver_input.test_name.endswith('failed.html'):
+                text = None
+            elif driver_input.test_name.endswith('tonguey.html'):
+                text = 'we are not expecting an output from perf tests but RESULT blablabla'
+            elif driver_input.test_name.endswith('crash.html'):
+                crash = True
+            elif driver_input.test_name.endswith('event-target-wrapper.html'):
+                text = """Running 20 times
+Ignoring warm-up run (1502)
+1504
+1505
+1510
+1504
+1507
+1509
+1510
+1487
+1488
+1472
+1472
+1488
+1473
+1472
+1475
+1487
+1486
+1486
+1475
+1471
+
+Time:
+values 1504, 1505, 1510, 1504, 1507, 1509, 1510, 1487, 1488, 1472, 1472, 1488, 1473, 1472, 1475, 1487, 1486, 1486, 1475, 1471 ms
+avg 1489.05 ms
+median 1487 ms
+stdev 14.46 ms
+min 1471 ms
+max 1510 ms
+"""
+            elif driver_input.test_name.endswith('some-parser.html'):
+                text = """Running 20 times
+Ignoring warm-up run (1115)
+
+Time:
+values 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19 ms
+avg 1100 ms
+median 1101 ms
+stdev 11 ms
+min 1080 ms
+max 1120 ms
+"""
+            elif driver_input.test_name.endswith('memory-test.html'):
+                text = """Running 20 times
+Ignoring warm-up run (1115)
+
+Time:
+values 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19 ms
+avg 1100 ms
+median 1101 ms
+stdev 11 ms
+min 1080 ms
+max 1120 ms
+
+JS Heap:
+values 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19 bytes
+avg 832000 bytes
+median 829000 bytes
+stdev 15000 bytes
+min 811000 bytes
+max 848000 bytes
+
+Malloc:
+values 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19 bytes
+avg 532000 bytes
+median 529000 bytes
+stdev 13000 bytes
+min 511000 bytes
+max 548000 bytes
+"""
+            return DriverOutput(text, '', '', '', crash=crash, timeout=timeout)
+
+        def start(self):
+            """do nothing"""
+
+        def stop(self):
+            """do nothing"""
+
+    def create_runner(self, args=[], driver_class=TestDriver):
+        options, parsed_args = PerfTestsRunner._parse_args(args)
+        test_port = TestPort(host=MockHost(), options=options)
+        test_port.create_driver = lambda worker_number=None, no_timeout=False: driver_class()
+
+        runner = PerfTestsRunner(args=args, port=test_port)
+        runner._host.filesystem.maybe_make_directory(runner._base_path, 'inspector')
+        runner._host.filesystem.maybe_make_directory(runner._base_path, 'Bindings')
+        runner._host.filesystem.maybe_make_directory(runner._base_path, 'Parser')
+
+        filesystem = runner._host.filesystem
+        runner.load_output_json = lambda: json.loads(filesystem.read_text_file(runner._output_json_path()))
+        return runner, test_port
+
+    def run_test(self, test_name):
+        runner, port = self.create_runner()
+        driver = MainTest.TestDriver()
+        return runner._run_single_test(ChromiumStylePerfTest(port, test_name, runner._host.filesystem.join('some-dir', test_name)), driver)
+
+    def test_run_passing_test(self):
+        self.assertTrue(self.run_test('pass.html'))
+
+    def test_run_silent_test(self):
+        self.assertFalse(self.run_test('silent.html'))
+
+    def test_run_failed_test(self):
+        self.assertFalse(self.run_test('failed.html'))
+
+    def test_run_tonguey_test(self):
+        self.assertFalse(self.run_test('tonguey.html'))
+
+    def test_run_timeout_test(self):
+        self.assertFalse(self.run_test('timeout.html'))
+
+    def test_run_crash_test(self):
+        self.assertFalse(self.run_test('crash.html'))
+
+    def _tests_for_runner(self, runner, test_names):
+        filesystem = runner._host.filesystem
+        tests = []
+        for test in test_names:
+            path = filesystem.join(runner._base_path, test)
+            dirname = filesystem.dirname(path)
+            if test.startswith('inspector/'):
+                tests.append(ChromiumStylePerfTest(runner._port, test, path))
+            else:
+                tests.append(PerfTest(runner._port, test, path))
+        return tests
+
+    def test_run_test_set(self):
+        runner, port = self.create_runner()
+        tests = self._tests_for_runner(runner, ['inspector/pass.html', 'inspector/silent.html', 'inspector/failed.html',
+            'inspector/tonguey.html', 'inspector/timeout.html', 'inspector/crash.html'])
+        output = OutputCapture()
+        output.capture_output()
+        try:
+            unexpected_result_count = runner._run_tests_set(tests, port)
+        finally:
+            stdout, stderr, log = output.restore_output()
+        self.assertEqual(unexpected_result_count, len(tests) - 1)
+        self.assertTrue('\nRESULT group_name: test_name= 42 ms\n' in log)
+
+    def test_run_test_set_kills_drt_per_run(self):
+
+        class TestDriverWithStopCount(MainTest.TestDriver):
+            stop_count = 0
+
+            def stop(self):
+                TestDriverWithStopCount.stop_count += 1
+
+        runner, port = self.create_runner(driver_class=TestDriverWithStopCount)
+
+        tests = self._tests_for_runner(runner, ['inspector/pass.html', 'inspector/silent.html', 'inspector/failed.html',
+            'inspector/tonguey.html', 'inspector/timeout.html', 'inspector/crash.html'])
+        unexpected_result_count = runner._run_tests_set(tests, port)
+
+        self.assertEqual(TestDriverWithStopCount.stop_count, 6)
+
+    def test_run_test_pause_before_testing(self):
+        class TestDriverWithStartCount(MainTest.TestDriver):
+            start_count = 0
+
+            def start(self):
+                TestDriverWithStartCount.start_count += 1
+
+        runner, port = self.create_runner(args=["--pause-before-testing"], driver_class=TestDriverWithStartCount)
+        tests = self._tests_for_runner(runner, ['inspector/pass.html'])
+
+        output = OutputCapture()
+        output.capture_output()
+        try:
+            unexpected_result_count = runner._run_tests_set(tests, port)
+            self.assertEqual(TestDriverWithStartCount.start_count, 1)
+        finally:
+            stdout, stderr, log = output.restore_output()
+        self.assertEqual(stderr, "Ready to run test?\n")
+        self.assertEqual(self.normalizeFinishedTime(log),
+            "Running inspector/pass.html (1 of 1)\nRESULT group_name: test_name= 42 ms\nFinished: 0.1 s\n\n")
+
+    def test_run_test_set_for_parser_tests(self):
+        runner, port = self.create_runner()
+        tests = self._tests_for_runner(runner, ['Bindings/event-target-wrapper.html', 'Parser/some-parser.html'])
+        output = OutputCapture()
+        output.capture_output()
+        try:
+            unexpected_result_count = runner._run_tests_set(tests, port)
+        finally:
+            stdout, stderr, log = output.restore_output()
+        self.assertEqual(unexpected_result_count, 0)
+        self.assertEqual(self.normalizeFinishedTime(log), '\n'.join(['Running Bindings/event-target-wrapper.html (1 of 2)',
+        'RESULT Bindings: event-target-wrapper= 1489.05 ms',
+        'median= 1487.0 ms, stdev= 14.46 ms, min= 1471.0 ms, max= 1510.0 ms',
+        'Finished: 0.1 s',
+        '',
+        'Running Parser/some-parser.html (2 of 2)',
+        'RESULT Parser: some-parser= 1100.0 ms',
+        'median= 1101.0 ms, stdev= 11.0 ms, min= 1080.0 ms, max= 1120.0 ms',
+        'Finished: 0.1 s',
+        '', '']))
+
+    def test_run_memory_test(self):
+        runner, port = self.create_runner_and_setup_results_template()
+        runner._timestamp = 123456789
+        port.host.filesystem.write_text_file(runner._base_path + '/Parser/memory-test.html', 'some content')
+
+        output = OutputCapture()
+        output.capture_output()
+        try:
+            unexpected_result_count = runner.run()
+        finally:
+            stdout, stderr, log = output.restore_output()
+        self.assertEqual(unexpected_result_count, 0)
+        self.assertEqual(self.normalizeFinishedTime(log), '\n'.join([
+            'Running 1 tests',
+            'Running Parser/memory-test.html (1 of 1)',
+            'RESULT Parser: memory-test= 1100.0 ms',
+            'median= 1101.0 ms, stdev= 11.0 ms, min= 1080.0 ms, max= 1120.0 ms',
+            'RESULT Parser: memory-test: JSHeap= 832000.0 bytes',
+            'median= 829000.0 bytes, stdev= 15000.0 bytes, min= 811000.0 bytes, max= 848000.0 bytes',
+            'RESULT Parser: memory-test: Malloc= 532000.0 bytes',
+            'median= 529000.0 bytes, stdev= 13000.0 bytes, min= 511000.0 bytes, max= 548000.0 bytes',
+            'Finished: 0.1 s',
+            '', '']))
+        results = runner.load_output_json()[0]['results']
+        values = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]
+        self.assertEqual(results['Parser/memory-test'], {'min': 1080.0, 'max': 1120.0, 'median': 1101.0, 'stdev': 11.0, 'avg': 1100.0, 'unit': 'ms', 'values': values})
+        self.assertEqual(results['Parser/memory-test:JSHeap'], {'min': 811000.0, 'max': 848000.0, 'median': 829000.0, 'stdev': 15000.0, 'avg': 832000.0, 'unit': 'bytes', 'values': values})
+        self.assertEqual(results['Parser/memory-test:Malloc'], {'min': 511000.0, 'max': 548000.0, 'median': 529000.0, 'stdev': 13000.0, 'avg': 532000.0, 'unit': 'bytes', 'values': values})
+
+    def _test_run_with_json_output(self, runner, filesystem, upload_suceeds=False, expected_exit_code=0):
+        filesystem.write_text_file(runner._base_path + '/inspector/pass.html', 'some content')
+        filesystem.write_text_file(runner._base_path + '/Bindings/event-target-wrapper.html', 'some content')
+
+        uploaded = [False]
+
+        def mock_upload_json(hostname, json_path):
+            self.assertEqual(hostname, 'some.host')
+            self.assertEqual(json_path, '/mock-checkout/output.json')
+            uploaded[0] = upload_suceeds
+            return upload_suceeds
+
+        runner._upload_json = mock_upload_json
+        runner._timestamp = 123456789
+        output_capture = OutputCapture()
+        output_capture.capture_output()
+        try:
+            self.assertEqual(runner.run(), expected_exit_code)
+        finally:
+            stdout, stderr, logs = output_capture.restore_output()
+
+        if not expected_exit_code:
+            self.assertEqual(self.normalizeFinishedTime(logs),
+                '\n'.join(['Running 2 tests',
+                'Running Bindings/event-target-wrapper.html (1 of 2)',
+                'RESULT Bindings: event-target-wrapper= 1489.05 ms',
+                'median= 1487.0 ms, stdev= 14.46 ms, min= 1471.0 ms, max= 1510.0 ms',
+                'Finished: 0.1 s',
+                '',
+                'Running inspector/pass.html (2 of 2)',
+                'RESULT group_name: test_name= 42 ms',
+                'Finished: 0.1 s',
+                '',
+                '']))
+
+        self.assertEqual(uploaded[0], upload_suceeds)
+
+        return logs
+
+    _event_target_wrapper_and_inspector_results = {
+        "Bindings/event-target-wrapper": {"max": 1510, "avg": 1489.05, "median": 1487, "min": 1471, "stdev": 14.46, "unit": "ms",
+           "values": [1504, 1505, 1510, 1504, 1507, 1509, 1510, 1487, 1488, 1472, 1472, 1488, 1473, 1472, 1475, 1487, 1486, 1486, 1475, 1471]},
+        "inspector/pass.html:group_name:test_name": 42}
+
+    def test_run_with_json_output(self):
+        runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json',
+            '--test-results-server=some.host'])
+        self._test_run_with_json_output(runner, port.host.filesystem, upload_suceeds=True)
+        self.assertEqual(runner.load_output_json(), [{
+            "timestamp": 123456789, "results": self._event_target_wrapper_and_inspector_results,
+            "webkit-revision": "5678", "branch": "webkit-trunk"}])
+
+        filesystem = port.host.filesystem
+        self.assertTrue(filesystem.isfile(runner._output_json_path()))
+        self.assertTrue(filesystem.isfile(filesystem.splitext(runner._output_json_path())[0] + '.html'))
+
+    def test_run_with_description(self):
+        runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json',
+            '--test-results-server=some.host', '--description', 'some description'])
+        self._test_run_with_json_output(runner, port.host.filesystem, upload_suceeds=True)
+        self.assertEqual(runner.load_output_json(), [{
+            "timestamp": 123456789, "description": "some description",
+            "results": self._event_target_wrapper_and_inspector_results,
+            "webkit-revision": "5678", "branch": "webkit-trunk"}])
+
+    def create_runner_and_setup_results_template(self, args=[]):
+        runner, port = self.create_runner(args)
+        filesystem = port.host.filesystem
+        filesystem.write_text_file(runner._base_path + '/resources/results-template.html',
+            'BEGIN<script src="%AbsolutePathToWebKitTrunk%/some.js"></script>'
+            '<script src="%AbsolutePathToWebKitTrunk%/other.js"></script><script>%PeformanceTestsResultsJSON%</script>END')
+        filesystem.write_text_file(runner._base_path + '/Dromaeo/resources/dromaeo/web/lib/jquery-1.6.4.js', 'jquery content')
+        return runner, port
+
+    def test_run_respects_no_results(self):
+        runner, port = self.create_runner(args=['--output-json-path=/mock-checkout/output.json',
+            '--test-results-server=some.host', '--no-results'])
+        self._test_run_with_json_output(runner, port.host.filesystem, upload_suceeds=False)
+        self.assertFalse(port.host.filesystem.isfile('/mock-checkout/output.json'))
+
+    def test_run_generates_json_by_default(self):
+        runner, port = self.create_runner_and_setup_results_template()
+        filesystem = port.host.filesystem
+        output_json_path = runner._output_json_path()
+        results_page_path = filesystem.splitext(output_json_path)[0] + '.html'
+
+        self.assertFalse(filesystem.isfile(output_json_path))
+        self.assertFalse(filesystem.isfile(results_page_path))
+
+        self._test_run_with_json_output(runner, port.host.filesystem)
+
+        self.assertEqual(json.loads(port.host.filesystem.read_text_file(output_json_path)), [{
+            "timestamp": 123456789, "results": self._event_target_wrapper_and_inspector_results,
+            "webkit-revision": "5678", "branch": "webkit-trunk"}])
+
+        self.assertTrue(filesystem.isfile(output_json_path))
+        self.assertTrue(filesystem.isfile(results_page_path))
+
+    def test_run_merges_output_by_default(self):
+        runner, port = self.create_runner_and_setup_results_template()
+        filesystem = port.host.filesystem
+        output_json_path = runner._output_json_path()
+
+        filesystem.write_text_file(output_json_path, '[{"previous": "results"}]')
+
+        self._test_run_with_json_output(runner, port.host.filesystem)
+
+        self.assertEqual(json.loads(port.host.filesystem.read_text_file(output_json_path)), [{"previous": "results"}, {
+            "timestamp": 123456789, "results": self._event_target_wrapper_and_inspector_results,
+            "webkit-revision": "5678", "branch": "webkit-trunk"}])
+        self.assertTrue(filesystem.isfile(filesystem.splitext(output_json_path)[0] + '.html'))
+
+    def test_run_respects_reset_results(self):
+        runner, port = self.create_runner_and_setup_results_template(args=["--reset-results"])
+        filesystem = port.host.filesystem
+        output_json_path = runner._output_json_path()
+
+        filesystem.write_text_file(output_json_path, '[{"previous": "results"}]')
+
+        self._test_run_with_json_output(runner, port.host.filesystem)
+
+        self.assertEqual(json.loads(port.host.filesystem.read_text_file(output_json_path)), [{
+            "timestamp": 123456789, "results": self._event_target_wrapper_and_inspector_results,
+            "webkit-revision": "5678", "branch": "webkit-trunk"}])
+        self.assertTrue(filesystem.isfile(filesystem.splitext(output_json_path)[0] + '.html'))
+        pass
+
+    def test_run_generates_and_show_results_page(self):
+        runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json'])
+        page_shown = []
+        port.show_results_html_file = lambda path: page_shown.append(path)
+        filesystem = port.host.filesystem
+        self._test_run_with_json_output(runner, filesystem)
+
+        expected_entry = {"timestamp": 123456789, "results": self._event_target_wrapper_and_inspector_results,
+            "webkit-revision": "5678", "branch": "webkit-trunk"}
+
+        self.maxDiff = None
+        json_output = port.host.filesystem.read_text_file('/mock-checkout/output.json')
+        self.assertEqual(json.loads(json_output), [expected_entry])
+        self.assertEqual(filesystem.read_text_file('/mock-checkout/output.html'),
+            'BEGIN<script src="/test.checkout/some.js"></script><script src="/test.checkout/other.js"></script>'
+            '<script>%s</script>END' % json_output)
+        self.assertEqual(page_shown[0], '/mock-checkout/output.html')
+
+        self._test_run_with_json_output(runner, filesystem)
+        json_output = port.host.filesystem.read_text_file('/mock-checkout/output.json')
+        self.assertEqual(json.loads(json_output), [expected_entry, expected_entry])
+        self.assertEqual(filesystem.read_text_file('/mock-checkout/output.html'),
+            'BEGIN<script src="/test.checkout/some.js"></script><script src="/test.checkout/other.js"></script>'
+            '<script>%s</script>END' % json_output)
+
+    def test_run_respects_no_show_results(self):
+        show_results_html_file = lambda path: page_shown.append(path)
+
+        runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json'])
+        page_shown = []
+        port.show_results_html_file = show_results_html_file
+        self._test_run_with_json_output(runner, port.host.filesystem)
+        self.assertEqual(page_shown[0], '/mock-checkout/output.html')
+
+        runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json',
+            '--no-show-results'])
+        page_shown = []
+        port.show_results_html_file = show_results_html_file
+        self._test_run_with_json_output(runner, port.host.filesystem)
+        self.assertEqual(page_shown, [])
+
+    def test_run_with_bad_output_json(self):
+        runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json'])
+        port.host.filesystem.write_text_file('/mock-checkout/output.json', 'bad json')
+        self._test_run_with_json_output(runner, port.host.filesystem, expected_exit_code=PerfTestsRunner.EXIT_CODE_BAD_MERGE)
+        port.host.filesystem.write_text_file('/mock-checkout/output.json', '{"another bad json": "1"}')
+        self._test_run_with_json_output(runner, port.host.filesystem, expected_exit_code=PerfTestsRunner.EXIT_CODE_BAD_MERGE)
+
+    def test_run_with_slave_config_json(self):
+        runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json',
+            '--slave-config-json-path=/mock-checkout/slave-config.json', '--test-results-server=some.host'])
+        port.host.filesystem.write_text_file('/mock-checkout/slave-config.json', '{"key": "value"}')
+        self._test_run_with_json_output(runner, port.host.filesystem, upload_suceeds=True)
+        self.assertEqual(runner.load_output_json(), [{
+            "timestamp": 123456789, "results": self._event_target_wrapper_and_inspector_results,
+            "webkit-revision": "5678", "branch": "webkit-trunk", "key": "value"}])
+
+    def test_run_with_bad_slave_config_json(self):
+        runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json',
+            '--slave-config-json-path=/mock-checkout/slave-config.json', '--test-results-server=some.host'])
+        logs = self._test_run_with_json_output(runner, port.host.filesystem, expected_exit_code=PerfTestsRunner.EXIT_CODE_BAD_SOURCE_JSON)
+        self.assertTrue('Missing slave configuration JSON file: /mock-checkout/slave-config.json' in logs)
+        port.host.filesystem.write_text_file('/mock-checkout/slave-config.json', 'bad json')
+        self._test_run_with_json_output(runner, port.host.filesystem, expected_exit_code=PerfTestsRunner.EXIT_CODE_BAD_SOURCE_JSON)
+        port.host.filesystem.write_text_file('/mock-checkout/slave-config.json', '["another bad json"]')
+        self._test_run_with_json_output(runner, port.host.filesystem, expected_exit_code=PerfTestsRunner.EXIT_CODE_BAD_SOURCE_JSON)
+
+    def test_run_with_multiple_repositories(self):
+        runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json',
+            '--test-results-server=some.host'])
+        port.repository_paths = lambda: [('webkit', '/mock-checkout'), ('some', '/mock-checkout/some')]
+        self._test_run_with_json_output(runner, port.host.filesystem, upload_suceeds=True)
+        self.assertEqual(runner.load_output_json(), [{
+            "timestamp": 123456789, "results": self._event_target_wrapper_and_inspector_results,
+            "webkit-revision": "5678", "some-revision": "5678", "branch": "webkit-trunk"}])
+
+    def test_run_with_upload_json(self):
+        runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json',
+            '--test-results-server', 'some.host', '--platform', 'platform1', '--builder-name', 'builder1', '--build-number', '123'])
+
+        self._test_run_with_json_output(runner, port.host.filesystem, upload_suceeds=True)
+        generated_json = json.loads(port.host.filesystem.files['/mock-checkout/output.json'])
+        self.assertEqual(generated_json[0]['platform'], 'platform1')
+        self.assertEqual(generated_json[0]['builder-name'], 'builder1')
+        self.assertEqual(generated_json[0]['build-number'], 123)
+
+        self._test_run_with_json_output(runner, port.host.filesystem, upload_suceeds=False, expected_exit_code=PerfTestsRunner.EXIT_CODE_FAILED_UPLOADING)
+
+    def test_upload_json(self):
+        runner, port = self.create_runner()
+        port.host.filesystem.files['/mock-checkout/some.json'] = 'some content'
+
+        called = []
+        upload_single_text_file_throws = False
+        upload_single_text_file_return_value = StringIO.StringIO('OK')
+
+        class MockFileUploader:
+            def __init__(mock, url, timeout):
+                self.assertEqual(url, 'https://some.host/api/test/report')
+                self.assertTrue(isinstance(timeout, int) and timeout)
+                called.append('FileUploader')
+
+            def upload_single_text_file(mock, filesystem, content_type, filename):
+                self.assertEqual(filesystem, port.host.filesystem)
+                self.assertEqual(content_type, 'application/json')
+                self.assertEqual(filename, 'some.json')
+                called.append('upload_single_text_file')
+                if upload_single_text_file_throws:
+                    raise "Some exception"
+                return upload_single_text_file_return_value
+
+        runner._upload_json('some.host', 'some.json', MockFileUploader)
+        self.assertEqual(called, ['FileUploader', 'upload_single_text_file'])
+
+        output = OutputCapture()
+        output.capture_output()
+        upload_single_text_file_return_value = StringIO.StringIO('Some error')
+        runner._upload_json('some.host', 'some.json', MockFileUploader)
+        _, _, logs = output.restore_output()
+        self.assertEqual(logs, 'Uploaded JSON but got a bad response:\nSome error\n')
+
+        # Throwing an exception upload_single_text_file shouldn't blow up _upload_json
+        called = []
+        upload_single_text_file_throws = True
+        runner._upload_json('some.host', 'some.json', MockFileUploader)
+        self.assertEqual(called, ['FileUploader', 'upload_single_text_file'])
+
+    def _add_file(self, runner, dirname, filename, content=True):
+        dirname = runner._host.filesystem.join(runner._base_path, dirname) if dirname else runner._base_path
+        runner._host.filesystem.maybe_make_directory(dirname)
+        runner._host.filesystem.files[runner._host.filesystem.join(dirname, filename)] = content
+
+    def test_collect_tests(self):
+        runner, port = self.create_runner()
+        self._add_file(runner, 'inspector', 'a_file.html', 'a content')
+        tests = runner._collect_tests()
+        self.assertEqual(len(tests), 1)
+
+    def _collect_tests_and_sort_test_name(self, runner):
+        return sorted([test.test_name() for test in runner._collect_tests()])
+
+    def test_collect_tests_with_multile_files(self):
+        runner, port = self.create_runner(args=['PerformanceTests/test1.html', 'test2.html'])
+
+        def add_file(filename):
+            port.host.filesystem.files[runner._host.filesystem.join(runner._base_path, filename)] = 'some content'
+
+        add_file('test1.html')
+        add_file('test2.html')
+        add_file('test3.html')
+        port.host.filesystem.chdir(runner._port.perf_tests_dir()[:runner._port.perf_tests_dir().rfind(runner._host.filesystem.sep)])
+        self.assertEqual(self._collect_tests_and_sort_test_name(runner), ['test1.html', 'test2.html'])
+
+    def test_collect_tests_with_skipped_list(self):
+        runner, port = self.create_runner()
+
+        self._add_file(runner, 'inspector', 'test1.html')
+        self._add_file(runner, 'inspector', 'unsupported_test1.html')
+        self._add_file(runner, 'inspector', 'test2.html')
+        self._add_file(runner, 'inspector/resources', 'resource_file.html')
+        self._add_file(runner, 'unsupported', 'unsupported_test2.html')
+        port.skipped_perf_tests = lambda: ['inspector/unsupported_test1.html', 'unsupported']
+        self.assertEqual(self._collect_tests_and_sort_test_name(runner), ['inspector/test1.html', 'inspector/test2.html'])
+
+    def test_collect_tests_with_skipped_list(self):
+        runner, port = self.create_runner(args=['--force'])
+
+        self._add_file(runner, 'inspector', 'test1.html')
+        self._add_file(runner, 'inspector', 'unsupported_test1.html')
+        self._add_file(runner, 'inspector', 'test2.html')
+        self._add_file(runner, 'inspector/resources', 'resource_file.html')
+        self._add_file(runner, 'unsupported', 'unsupported_test2.html')
+        port.skipped_perf_tests = lambda: ['inspector/unsupported_test1.html', 'unsupported']
+        self.assertEqual(self._collect_tests_and_sort_test_name(runner), ['inspector/test1.html', 'inspector/test2.html', 'inspector/unsupported_test1.html', 'unsupported/unsupported_test2.html'])
+
+    def test_collect_tests_should_ignore_replay_tests_by_default(self):
+        runner, port = self.create_runner()
+        self._add_file(runner, 'Replay', 'www.webkit.org.replay')
+        self.assertEqual(runner._collect_tests(), [])
+
+    def test_collect_tests_with_replay_tests(self):
+        runner, port = self.create_runner(args=['--replay'])
+        self._add_file(runner, 'Replay', 'www.webkit.org.replay')
+        tests = runner._collect_tests()
+        self.assertEqual(len(tests), 1)
+        self.assertEqual(tests[0].__class__.__name__, 'ReplayPerfTest')
+
+    def test_parse_args(self):
+        runner, port = self.create_runner()
+        options, args = PerfTestsRunner._parse_args([
+                '--build-directory=folder42',
+                '--platform=platform42',
+                '--builder-name', 'webkit-mac-1',
+                '--build-number=56',
+                '--time-out-ms=42',
+                '--no-show-results',
+                '--reset-results',
+                '--output-json-path=a/output.json',
+                '--slave-config-json-path=a/source.json',
+                '--test-results-server=somehost',
+                '--debug'])
+        self.assertEqual(options.build, True)
+        self.assertEqual(options.build_directory, 'folder42')
+        self.assertEqual(options.platform, 'platform42')
+        self.assertEqual(options.builder_name, 'webkit-mac-1')
+        self.assertEqual(options.build_number, '56')
+        self.assertEqual(options.time_out_ms, '42')
+        self.assertEqual(options.configuration, 'Debug')
+        self.assertEqual(options.show_results, False)
+        self.assertEqual(options.reset_results, True)
+        self.assertEqual(options.output_json_path, 'a/output.json')
+        self.assertEqual(options.slave_config_json_path, 'a/source.json')
+        self.assertEqual(options.test_results_server, 'somehost')
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/Tools/Scripts/webkitpy/pylintrc b/Tools/Scripts/webkitpy/pylintrc
new file mode 100644
index 0000000..caadcfb
--- /dev/null
+++ b/Tools/Scripts/webkitpy/pylintrc
@@ -0,0 +1,312 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+[MASTER]
+
+# Specify a configuration file.
+#rcfile=
+
+# Python code to execute, usually for sys.path manipulation such as
+# pygtk.require().
+#init-hook=
+
+# Profiled execution.
+profile=no
+
+# Add files or directories to the blacklist. They should be base names, not
+# paths.
+ignore=CVS
+
+# Pickle collected data for later comparisons.
+persistent=yes
+
+# List of plugins (as comma separated values of python modules names) to load,
+# usually to register additional checkers.
+load-plugins=
+
+
+[MESSAGES CONTROL]
+
+# Enable the message, report, category or checker with the given id(s). You can
+# either give multiple identifier separated by comma (,) or put this option
+# multiple time.
+#enable=
+
+# Disable the message, report, category or checker with the given id(s). You
+# can either give multiple identifier separated by comma (,) or put this option
+# multiple time (only on the command line, not in the configuration file where
+# it should appear only once).
+# CHANGED:
+# C0103: Invalid name ""
+# C0111: Missing docstring
+# C0301: Line too long
+# C0302: Too many lines in module (N)
+# I0010: Unable to consider inline option ''
+# I0011: Locally disabling WNNNN
+#
+# R0201: Method could be a function
+# R0801: Similar lines in N files
+# R0901: Too many ancestors (8/7)
+# R0902: Too many instance attributes (N/7)
+# R0903: Too few public methods (N/2)
+# R0904: Too many public methods (N/20)
+# R0911: Too many return statements (N/6)
+# R0912: Too many branches (N/12)
+# R0913: Too many arguments (N/5)
+# R0914: Too many local variables (N/15)
+# R0915: Too many statements (N/50)
+# R0921: Abstract class not referenced
+# R0922: Abstract class is only referenced 1 times
+# W0122: Use of the exec statement
+# W0141: Used builtin function ''
+# W0212: Access to a protected member X of a client class
+# W0142: Used * or ** magic
+# W0401: Wildcard import X
+# W0402: Uses of a deprecated module 'string'
+# W0404: 41: Reimport 'XX' (imported line NN)
+# W0511: TODO
+# W0603: Using the global statement
+# W0614: Unused import X from wildcard import
+# W0703: Catch "Exception"
+# W1201: Specify string format arguments as logging function parameters
+disable=C0103,C0111,C0301,C0302,I0010,I0011,R0201,R0801,R0901,R0902,R0903,R0904,R0911,R0912,R0913,R0914,R0915,R0921,R0922,W0122,W0141,W0142,W0212,W0401,W0402,W0404,W0511,W0603,W0614,W0703,W1201
+
+
+[REPORTS]
+
+# Set the output format. Available formats are text, parseable, colorized, msvs
+# (visual studio) and html
+output-format=text
+
+# Include message's id in output
+include-ids=yes
+
+# Put messages in a separate file for each module / package specified on the
+# command line instead of printing them on stdout. Reports (if any) will be
+# written in a file name "pylint_global.[txt|html]".
+files-output=no
+
+# Tells whether to display a full report or only the messages
+# CHANGED:
+reports=no
+
+# Python expression which should return a note less than 10 (10 is the highest
+# note). You have access to the variables errors warning, statement which
+# respectively contain the number of errors / warnings messages and the total
+# number of statements analyzed. This is used by the global evaluation report
+# (RP0004).
+evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)
+
+# Add a comment according to your evaluation note. This is used by the global
+# evaluation report (RP0004).
+comment=no
+
+
+[VARIABLES]
+
+# Tells whether we should check for unused import in __init__ files.
+init-import=no
+
+# A regular expression matching the beginning of the name of dummy variables
+# (i.e. not used).
+dummy-variables-rgx=_|dummy
+
+# List of additional names supposed to be defined in builtins. Remember that
+# you should avoid to define new builtins when possible.
+additional-builtins=
+
+
+[TYPECHECK]
+
+# Tells whether missing members accessed in mixin class should be ignored. A
+# mixin class is detected if its name ends with "mixin" (case insensitive).
+ignore-mixin-members=yes
+
+# List of classes names for which member attributes should not be checked
+# (useful for classes with attributes dynamically set).
+ignored-classes=SQLObject,twisted.internet.reactor,hashlib,google.appengine.api.memcache
+
+# When zope mode is activated, add a predefined set of Zope acquired attributes
+# to generated-members.
+zope=no
+
+# List of members which are set dynamically and missed by pylint inference
+# system, and so shouldn't trigger E0201 when accessed. Python regular
+# expressions are accepted.
+generated-members=REQUEST,acl_users,aq_parent
+
+
+[MISCELLANEOUS]
+
+# List of note tags to take in consideration, separated by a comma.
+notes=FIXME,XXX,TODO
+
+
+[SIMILARITIES]
+
+# Minimum lines number of a similarity.
+min-similarity-lines=4
+
+# Ignore comments when computing similarities.
+ignore-comments=yes
+
+# Ignore docstrings when computing similarities.
+ignore-docstrings=yes
+
+
+[FORMAT]
+
+# Maximum number of characters on a single line.
+max-line-length=200
+
+# Maximum number of lines in a module
+max-module-lines=1000
+
+# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1
+# tab).
+# CHANGED:
+indent-string='    '
+
+
+[BASIC]
+
+# Required attributes for module, separated by a comma
+required-attributes=
+
+# List of builtins function names that should not be used, separated by a comma
+bad-functions=map,filter,apply,input
+
+# Regular expression which should only match correct module names
+module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$
+
+# Regular expression which should only match correct module level names
+const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__))$
+
+# Regular expression which should only match correct class names
+class-rgx=[A-Z_][a-zA-Z0-9]+$
+
+# Regular expression which should only match correct function names
+function-rgx=[a-z_][a-z0-9_]{2,30}$
+
+# Regular expression which should only match correct method names
+method-rgx=[a-z_][a-z0-9_]{2,30}$
+
+# Regular expression which should only match correct instance attribute names
+attr-rgx=[a-z_][a-z0-9_]{2,30}$
+
+# Regular expression which should only match correct argument names
+argument-rgx=[a-z_][a-z0-9_]{2,30}$
+
+# Regular expression which should only match correct variable names
+variable-rgx=[a-z_][a-z0-9_]{2,30}$
+
+# Regular expression which should only match correct list comprehension /
+# generator expression variable names
+inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$
+
+# Good variable names which should always be accepted, separated by a comma
+good-names=i,j,k,ex,Run,_
+
+# Bad variable names which should always be refused, separated by a comma
+bad-names=foo,bar,baz,toto,tutu,tata
+
+# Regular expression which should only match functions or classes name which do
+# not require a docstring
+no-docstring-rgx=__.*__
+
+
+[DESIGN]
+
+# Maximum number of arguments for function / method
+max-args=5
+
+# Argument names that match this expression will be ignored. Default to name
+# with leading underscore
+ignored-argument-names=_.*
+
+# Maximum number of locals for function / method body
+max-locals=15
+
+# Maximum number of return / yield for function / method body
+max-returns=6
+
+# Maximum number of branch for function / method body
+max-branchs=12
+
+# Maximum number of statements in function / method body
+max-statements=50
+
+# Maximum number of parents for a class (see R0901).
+max-parents=7
+
+# Maximum number of attributes for a class (see R0902).
+max-attributes=7
+
+# Minimum number of public methods for a class (see R0903).
+min-public-methods=2
+
+# Maximum number of public methods for a class (see R0904).
+max-public-methods=20
+
+
+[CLASSES]
+
+# List of interface methods to ignore, separated by a comma. This is used for
+# instance to not check methods defines in Zope's Interface base class.
+ignore-iface-methods=isImplementedBy,deferred,extends,names,namesAndDescriptions,queryDescriptionFor,getBases,getDescriptionFor,getDoc,getName,getTaggedValue,getTaggedValueTags,isEqualOrExtendedBy,setTaggedValue,isImplementedByInstancesOf,adaptWith,is_implemented_by
+
+# List of method names used to declare (i.e. assign) instance attributes.
+defining-attr-methods=__init__,__new__,setUp
+
+# List of valid names for the first argument in a class method.
+valid-classmethod-first-arg=cls
+
+
+[IMPORTS]
+
+# Deprecated modules which should not be used, separated by a comma
+deprecated-modules=regsub,string,TERMIOS,Bastion,rexec
+
+# Create a graph of every (i.e. internal and external) dependencies in the
+# given file (report RP0402 must not be disabled)
+import-graph=
+
+# Create a graph of external dependencies in the given file (report RP0402 must
+# not be disabled)
+ext-import-graph=
+
+# Create a graph of internal dependencies in the given file (report RP0402 must
+# not be disabled)
+int-import-graph=
+
+
+[EXCEPTIONS]
+
+# Exceptions that will emit a warning when being caught. Defaults to
+# "Exception"
+overgeneral-exceptions=Exception
diff --git a/Tools/Scripts/webkitpy/style/__init__.py b/Tools/Scripts/webkitpy/style/__init__.py
new file mode 100644
index 0000000..ef65bee
--- /dev/null
+++ b/Tools/Scripts/webkitpy/style/__init__.py
@@ -0,0 +1 @@
+# Required for Python to search this directory for module files
diff --git a/Tools/Scripts/webkitpy/style/checker.py b/Tools/Scripts/webkitpy/style/checker.py
new file mode 100644
index 0000000..9f27c36
--- /dev/null
+++ b/Tools/Scripts/webkitpy/style/checker.py
@@ -0,0 +1,860 @@
+# Copyright (C) 2009 Google Inc. All rights reserved.
+# Copyright (C) 2010 Chris Jerdonek (chris.jerdonek@gmail.com)
+# Copyright (C) 2010 ProFUSION embedded systems
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Front end of some style-checker modules."""
+
+import logging
+import os.path
+import re
+import sys
+
+from checkers.common import categories as CommonCategories
+from checkers.common import CarriageReturnChecker
+from checkers.changelog import ChangeLogChecker
+from checkers.cpp import CppChecker
+from checkers.jsonchecker import JSONChecker
+from checkers.png import PNGChecker
+from checkers.python import PythonChecker
+from checkers.test_expectations import TestExpectationsChecker
+from checkers.text import TextChecker
+from checkers.watchlist import WatchListChecker
+from checkers.xcodeproj import XcodeProjectFileChecker
+from checkers.xml import XMLChecker
+from error_handlers import DefaultStyleErrorHandler
+from filter import FilterConfiguration
+from optparser import ArgumentParser
+from optparser import DefaultCommandOptionValues
+from webkitpy.common.system.logutils import configure_logging as _configure_logging
+
+
+_log = logging.getLogger(__name__)
+
+
+# These are default option values for the command-line option parser.
+_DEFAULT_MIN_CONFIDENCE = 1
+_DEFAULT_OUTPUT_FORMAT = 'emacs'
+
+
+# FIXME: For style categories we will never want to have, remove them.
+#        For categories for which we want to have similar functionality,
+#        modify the implementation and enable them.
+#
+# Throughout this module, we use "filter rule" rather than "filter"
+# for an individual boolean filter flag like "+foo".  This allows us to
+# reserve "filter" for what one gets by collectively applying all of
+# the filter rules.
+#
+# The base filter rules are the filter rules that begin the list of
+# filter rules used to check style.  For example, these rules precede
+# any user-specified filter rules.  Since by default all categories are
+# checked, this list should normally include only rules that begin
+# with a "-" sign.
+_BASE_FILTER_RULES = [
+    '-build/endif_comment',
+    '-build/include_what_you_use',  # <string> for std::string
+    '-build/storage_class',  # const static
+    '-legal/copyright',
+    '-readability/multiline_comment',
+    '-readability/braces',  # int foo() {};
+    '-readability/fn_size',
+    '-readability/casting',
+    '-readability/function',
+    '-runtime/arrays',  # variable length array
+    '-runtime/casting',
+    '-runtime/sizeof',
+    '-runtime/explicit',  # explicit
+    '-runtime/virtual',  # virtual dtor
+    '-runtime/printf',
+    '-runtime/threadsafe_fn',
+    '-runtime/rtti',
+    '-whitespace/blank_line',
+    '-whitespace/end_of_line',
+    # List Python pep8 categories last.
+    #
+    # Because much of WebKit's Python code base does not abide by the
+    # PEP8 79 character limit, we ignore the 79-character-limit category
+    # pep8/E501 for now.
+    #
+    # FIXME: Consider bringing WebKit's Python code base into conformance
+    #        with the 79 character limit, or some higher limit that is
+    #        agreeable to the WebKit project.
+    '-pep8/E501',
+    ]
+
+
+# The path-specific filter rules.
+#
+# This list is order sensitive.  Only the first path substring match
+# is used.  See the FilterConfiguration documentation in filter.py
+# for more information on this list.
+#
+# Each string appearing in this nested list should have at least
+# one associated unit test assertion.  These assertions are located,
+# for example, in the test_path_rules_specifier() unit test method of
+# checker_unittest.py.
+_PATH_RULES_SPECIFIER = [
+    # Files in these directories are consumers of the WebKit
+    # API and therefore do not follow the same header including
+    # discipline as WebCore.
+
+    ([# TestNetscapePlugIn has no config.h and uses funny names like
+      # NPP_SetWindow.
+      "Tools/DumpRenderTree/TestNetscapePlugIn/",
+      # The API test harnesses have no config.h and use funny macros like
+      # TEST_CLASS_NAME.
+      "Tools/WebKitAPITest/",
+      "Tools/TestWebKitAPI/",
+      "Source/WebKit/qt/tests/qdeclarativewebview"],
+     ["-build/include",
+      "-readability/naming"]),
+    ([# There is no clean way to avoid "yy_*" names used by flex.
+      "Source/WebCore/css/CSSParser.cpp",
+      # Qt code uses '_' in some places (such as private slots
+      # and on test xxx_data methos on tests)
+      "Source/JavaScriptCore/qt/",
+      "Source/WebKit/qt/tests/",
+      "Source/WebKit/qt/declarative/",
+      "Source/WebKit/qt/examples/"],
+     ["-readability/naming"]),
+
+    ([# The Qt APIs use Qt declaration style, it puts the * to
+      # the variable name, not to the class.
+      "Source/WebKit/qt/Api/"],
+     ["-readability/naming",
+      "-whitespace/declaration"]),
+
+     ([# Qt's MiniBrowser has no config.h
+       "Tools/MiniBrowser/qt",
+       "Tools/MiniBrowser/qt/raw"],
+      ["-build/include"]),
+
+    ([# The Qt APIs use Qt/QML naming style, which includes
+      # naming parameters in h files.
+      "Source/WebKit2/UIProcess/API/qt"],
+     ["-readability/parameter_name"]),
+
+    ([# The GTK+ APIs use GTK+ naming style, which includes
+      # lower-cased, underscore-separated values, whitespace before
+      # parens for function calls, and always having variable names.
+      # Also, GTK+ allows the use of NULL.
+      "Source/WebCore/bindings/scripts/test/GObject",
+      "Source/WebKit/gtk/webkit/",
+      "Tools/DumpRenderTree/gtk/"],
+     ["-readability/naming",
+      "-readability/parameter_name",
+      "-readability/null",
+      "-whitespace/parens"]),
+    ([# Header files in ForwardingHeaders have no header guards or
+      # exceptional header guards (e.g., WebCore_FWD_Debugger_h).
+      "/ForwardingHeaders/"],
+     ["-build/header_guard"]),
+    ([# assembler has lots of opcodes that use underscores, so
+      # we don't check for underscores in that directory.
+      "Source/JavaScriptCore/assembler/",
+      "Source/JavaScriptCore/jit/JIT"],
+     ["-readability/naming/underscores"]),
+    ([# JITStubs has an usual syntax which causes false alarms for a few checks.
+      "JavaScriptCore/jit/JITStubs.cpp"],
+     ["-readability/parameter_name",
+      "-whitespace/parens"]),
+
+    ([# The EFL APIs use EFL naming style, which includes
+      # both lower-cased and camel-cased, underscore-sparated
+      # values.
+      "Source/WebKit/efl/ewk/",
+      "Source/WebKit2/UIProcess/API/efl/"],
+     ["-readability/naming",
+      "-readability/parameter_name"]),
+    ([# EWebLauncher and MiniBrowser are EFL simple application.
+      # They need to use efl coding style and they don't have config.h.
+      "Tools/EWebLauncher/",
+      "Tools/MiniBrowser/efl/"],
+     ["-readability/naming",
+      "-readability/parameter_name",
+      "-whitespace/declaration",
+      "-build/include_order"]),
+
+    # WebKit2 rules:
+    # WebKit2 and certain directories have idiosyncracies.
+    ([# NPAPI has function names with underscores.
+      "Source/WebKit2/WebProcess/Plugins/Netscape"],
+     ["-readability/naming"]),
+    ([# The WebKit2 C API has names with underscores and whitespace-aligned
+      # struct members. Also, we allow unnecessary parameter names in
+      # WebKit2 APIs because we're matching CF's header style.
+      "Source/WebKit2/UIProcess/API/C/",
+      "Source/WebKit2/Shared/API/c/",
+      "Source/WebKit2/WebProcess/InjectedBundle/API/c/"],
+     ["-readability/naming",
+      "-readability/parameter_name",
+      "-whitespace/declaration"]),
+    ([# These files define GObjects, which implies some definitions of
+      # variables and functions containing underscores.
+      "Source/WebCore/platform/graphics/gstreamer/VideoSinkGStreamer1.cpp",
+      "Source/WebCore/platform/graphics/gstreamer/VideoSinkGStreamer.cpp",
+      "Source/WebCore/platform/graphics/gstreamer/WebKitWebSourceGStreamer.cpp",
+      "Source/WebCore/platform/audio/gstreamer/WebKitWebAudioSourceGStreamer.cpp",
+      "Source/WebCore/platform/network/soup/ProxyResolverSoup.cpp",
+      "Source/WebCore/platform/network/soup/ProxyResolverSoup.h"],
+     ["-readability/naming"]),
+
+    # For third-party Python code, keep only the following checks--
+    #
+    #   No tabs: to avoid having to set the SVN allow-tabs property.
+    #   No trailing white space: since this is easy to correct.
+    #   No carriage-return line endings: since this is easy to correct.
+    #
+    (["webkitpy/thirdparty/"],
+     ["-",
+      "+pep8/W191",  # Tabs
+      "+pep8/W291",  # Trailing white space
+      "+whitespace/carriage_return"]),
+
+    ([# glu's libtess is third-party code, and doesn't follow WebKit style.
+      "Source/ThirdParty/glu"],
+     ["-readability",
+      "-whitespace",
+      "-build/header_guard",
+      "-build/include_order"]),
+
+    ([# There is no way to avoid the symbols __jit_debug_register_code
+      # and __jit_debug_descriptor when integrating with gdb.
+      "Source/JavaScriptCore/jit/GDBInterface.cpp"],
+     ["-readability/naming"]),
+
+    ([# On some systems the trailing CR is causing parser failure.
+      "Source/JavaScriptCore/parser/Keywords.table"],
+     ["+whitespace/carriage_return"]),
+]
+
+
+_CPP_FILE_EXTENSIONS = [
+    'c',
+    'cpp',
+    'h',
+    ]
+
+_JSON_FILE_EXTENSION = 'json'
+
+_PYTHON_FILE_EXTENSION = 'py'
+
+_TEXT_FILE_EXTENSIONS = [
+    'ac',
+    'cc',
+    'cgi',
+    'css',
+    'exp',
+    'flex',
+    'gyp',
+    'gypi',
+    'html',
+    'idl',
+    'in',
+    'js',
+    'mm',
+    'php',
+    'pl',
+    'pm',
+    'pri',
+    'pro',
+    'rb',
+    'sh',
+    'table',
+    'txt',
+    'wm',
+    'xhtml',
+    'y',
+    ]
+
+_XCODEPROJ_FILE_EXTENSION = 'pbxproj'
+
+_XML_FILE_EXTENSIONS = [
+    'vcproj',
+    'vsprops',
+    ]
+
+_PNG_FILE_EXTENSION = 'png'
+
+# Files to skip that are less obvious.
+#
+# Some files should be skipped when checking style. For example,
+# WebKit maintains some files in Mozilla style on purpose to ease
+# future merges.
+_SKIPPED_FILES_WITH_WARNING = [
+    "Source/WebKit/gtk/tests/",
+    # All WebKit*.h files in Source/WebKit2/UIProcess/API/gtk,
+    # except those ending in ...Private.h are GTK+ API headers,
+    # which differ greatly from WebKit coding style.
+    re.compile(r'Source/WebKit2/UIProcess/API/gtk/WebKit(?!.*Private\.h).*\.h$'),
+    'Source/WebKit2/UIProcess/API/gtk/webkit2.h']
+
+# Files to skip that are more common or obvious.
+#
+# This list should be in addition to files with FileType.NONE.  Files
+# with FileType.NONE are automatically skipped without warning.
+_SKIPPED_FILES_WITHOUT_WARNING = [
+    "LayoutTests" + os.path.sep,
+    ]
+
+# Extensions of files which are allowed to contain carriage returns.
+_CARRIAGE_RETURN_ALLOWED_FILE_EXTENSIONS = [
+    'png',
+    'vcproj',
+    'vsprops',
+    ]
+
+# The maximum number of errors to report per file, per category.
+# If a category is not a key, then it has no maximum.
+_MAX_REPORTS_PER_CATEGORY = {
+    "whitespace/carriage_return": 1
+}
+
+
+def _all_categories():
+    """Return the set of all categories used by check-webkit-style."""
+    # Take the union across all checkers.
+    categories = CommonCategories.union(CppChecker.categories)
+    categories = categories.union(JSONChecker.categories)
+    categories = categories.union(TestExpectationsChecker.categories)
+    categories = categories.union(ChangeLogChecker.categories)
+    categories = categories.union(PNGChecker.categories)
+
+    # FIXME: Consider adding all of the pep8 categories.  Since they
+    #        are not too meaningful for documentation purposes, for
+    #        now we add only the categories needed for the unit tests
+    #        (which validate the consistency of the configuration
+    #        settings against the known categories, etc).
+    categories = categories.union(["pep8/W191", "pep8/W291", "pep8/E501"])
+
+    return categories
+
+
+def _check_webkit_style_defaults():
+    """Return the default command-line options for check-webkit-style."""
+    return DefaultCommandOptionValues(min_confidence=_DEFAULT_MIN_CONFIDENCE,
+                                      output_format=_DEFAULT_OUTPUT_FORMAT)
+
+
+# This function assists in optparser not having to import from checker.
+def check_webkit_style_parser():
+    all_categories = _all_categories()
+    default_options = _check_webkit_style_defaults()
+    return ArgumentParser(all_categories=all_categories,
+                          base_filter_rules=_BASE_FILTER_RULES,
+                          default_options=default_options)
+
+
+def check_webkit_style_configuration(options):
+    """Return a StyleProcessorConfiguration instance for check-webkit-style.
+
+    Args:
+      options: A CommandOptionValues instance.
+
+    """
+    filter_configuration = FilterConfiguration(
+                               base_rules=_BASE_FILTER_RULES,
+                               path_specific=_PATH_RULES_SPECIFIER,
+                               user_rules=options.filter_rules)
+
+    return StyleProcessorConfiguration(filter_configuration=filter_configuration,
+               max_reports_per_category=_MAX_REPORTS_PER_CATEGORY,
+               min_confidence=options.min_confidence,
+               output_format=options.output_format,
+               stderr_write=sys.stderr.write)
+
+
+def _create_log_handlers(stream):
+    """Create and return a default list of logging.Handler instances.
+
+    Format WARNING messages and above to display the logging level, and
+    messages strictly below WARNING not to display it.
+
+    Args:
+      stream: See the configure_logging() docstring.
+
+    """
+    # Handles logging.WARNING and above.
+    error_handler = logging.StreamHandler(stream)
+    error_handler.setLevel(logging.WARNING)
+    formatter = logging.Formatter("%(levelname)s: %(message)s")
+    error_handler.setFormatter(formatter)
+
+    # Create a logging.Filter instance that only accepts messages
+    # below WARNING (i.e. filters out anything WARNING or above).
+    non_error_filter = logging.Filter()
+    # The filter method accepts a logging.LogRecord instance.
+    non_error_filter.filter = lambda record: record.levelno < logging.WARNING
+
+    non_error_handler = logging.StreamHandler(stream)
+    non_error_handler.addFilter(non_error_filter)
+    formatter = logging.Formatter("%(message)s")
+    non_error_handler.setFormatter(formatter)
+
+    return [error_handler, non_error_handler]
+
+
+def _create_debug_log_handlers(stream):
+    """Create and return a list of logging.Handler instances for debugging.
+
+    Args:
+      stream: See the configure_logging() docstring.
+
+    """
+    handler = logging.StreamHandler(stream)
+    formatter = logging.Formatter("%(name)s: %(levelname)-8s %(message)s")
+    handler.setFormatter(formatter)
+
+    return [handler]
+
+
+def configure_logging(stream, logger=None, is_verbose=False):
+    """Configure logging, and return the list of handlers added.
+
+    Returns:
+      A list of references to the logging handlers added to the root
+      logger.  This allows the caller to later remove the handlers
+      using logger.removeHandler.  This is useful primarily during unit
+      testing where the caller may want to configure logging temporarily
+      and then undo the configuring.
+
+    Args:
+      stream: A file-like object to which to log.  The stream must
+              define an "encoding" data attribute, or else logging
+              raises an error.
+      logger: A logging.logger instance to configure.  This parameter
+              should be used only in unit tests.  Defaults to the
+              root logger.
+      is_verbose: A boolean value of whether logging should be verbose.
+
+    """
+    # If the stream does not define an "encoding" data attribute, the
+    # logging module can throw an error like the following:
+    #
+    # Traceback (most recent call last):
+    #   File "/System/Library/Frameworks/Python.framework/Versions/2.6/...
+    #         lib/python2.6/logging/__init__.py", line 761, in emit
+    #     self.stream.write(fs % msg.encode(self.stream.encoding))
+    # LookupError: unknown encoding: unknown
+    if logger is None:
+        logger = logging.getLogger()
+
+    if is_verbose:
+        logging_level = logging.DEBUG
+        handlers = _create_debug_log_handlers(stream)
+    else:
+        logging_level = logging.INFO
+        handlers = _create_log_handlers(stream)
+
+    handlers = _configure_logging(logging_level=logging_level, logger=logger,
+                                  handlers=handlers)
+
+    return handlers
+
+
+# Enum-like idiom
+class FileType:
+
+    NONE = 0  # FileType.NONE evaluates to False.
+    # Alphabetize remaining types
+    CHANGELOG = 1
+    CPP = 2
+    JSON = 3
+    PNG = 4
+    PYTHON = 5
+    TEXT = 6
+    WATCHLIST = 7
+    XML = 8
+    XCODEPROJ = 9
+
+
+class CheckerDispatcher(object):
+
+    """Supports determining whether and how to check style, based on path."""
+
+    def _file_extension(self, file_path):
+        """Return the file extension without the leading dot."""
+        return os.path.splitext(file_path)[1].lstrip(".")
+
+    def _should_skip_file_path(self, file_path, skip_array_entry):
+        match = re.search("\s*png$", file_path)
+        if match:
+            return False
+        if isinstance(skip_array_entry, str):
+            if file_path.find(skip_array_entry) >= 0:
+                return True
+        elif skip_array_entry.match(file_path):
+                return True
+        return False
+
+    def should_skip_with_warning(self, file_path):
+        """Return whether the given file should be skipped with a warning."""
+        for skipped_file in _SKIPPED_FILES_WITH_WARNING:
+            if self._should_skip_file_path(file_path, skipped_file):
+                return True
+        return False
+
+    def should_skip_without_warning(self, file_path):
+        """Return whether the given file should be skipped without a warning."""
+        if not self._file_type(file_path):  # FileType.NONE.
+            return True
+        # Since "LayoutTests" is in _SKIPPED_FILES_WITHOUT_WARNING, make
+        # an exception to prevent files like "LayoutTests/ChangeLog" and
+        # "LayoutTests/ChangeLog-2009-06-16" from being skipped.
+        # Files like 'TestExpectations' are also should not be skipped.
+        #
+        # FIXME: Figure out a good way to avoid having to add special logic
+        #        for this special case.
+        basename = os.path.basename(file_path)
+        if basename.startswith('ChangeLog'):
+            return False
+        elif basename == 'TestExpectations':
+            return False
+        for skipped_file in _SKIPPED_FILES_WITHOUT_WARNING:
+            if self._should_skip_file_path(file_path, skipped_file):
+                return True
+        return False
+
+    def should_check_and_strip_carriage_returns(self, file_path):
+        return self._file_extension(file_path) not in _CARRIAGE_RETURN_ALLOWED_FILE_EXTENSIONS
+
+    def _file_type(self, file_path):
+        """Return the file type corresponding to the given file."""
+        file_extension = self._file_extension(file_path)
+
+        if (file_extension in _CPP_FILE_EXTENSIONS) or (file_path == '-'):
+            # FIXME: Do something about the comment below and the issue it
+            #        raises since cpp_style already relies on the extension.
+            #
+            # Treat stdin as C++. Since the extension is unknown when
+            # reading from stdin, cpp_style tests should not rely on
+            # the extension.
+            return FileType.CPP
+        elif file_extension == _JSON_FILE_EXTENSION:
+            return FileType.JSON
+        elif file_extension == _PYTHON_FILE_EXTENSION:
+            return FileType.PYTHON
+        elif file_extension in _XML_FILE_EXTENSIONS:
+            return FileType.XML
+        elif os.path.basename(file_path).startswith('ChangeLog'):
+            return FileType.CHANGELOG
+        elif os.path.basename(file_path) == 'watchlist':
+            return FileType.WATCHLIST
+        elif file_extension == _XCODEPROJ_FILE_EXTENSION:
+            return FileType.XCODEPROJ
+        elif file_extension == _PNG_FILE_EXTENSION:
+            return FileType.PNG
+        elif ((not file_extension and os.path.join("Tools", "Scripts") in file_path) or
+              file_extension in _TEXT_FILE_EXTENSIONS or os.path.basename(file_path) == 'TestExpectations'):
+            return FileType.TEXT
+        else:
+            return FileType.NONE
+
+    def _create_checker(self, file_type, file_path, handle_style_error,
+                        min_confidence):
+        """Instantiate and return a style checker based on file type."""
+        if file_type == FileType.NONE:
+            checker = None
+        elif file_type == FileType.CHANGELOG:
+            should_line_be_checked = None
+            if handle_style_error:
+                should_line_be_checked = handle_style_error.should_line_be_checked
+            checker = ChangeLogChecker(file_path, handle_style_error, should_line_be_checked)
+        elif file_type == FileType.CPP:
+            file_extension = self._file_extension(file_path)
+            checker = CppChecker(file_path, file_extension,
+                                 handle_style_error, min_confidence)
+        elif file_type == FileType.JSON:
+            checker = JSONChecker(file_path, handle_style_error)
+        elif file_type == FileType.PYTHON:
+            checker = PythonChecker(file_path, handle_style_error)
+        elif file_type == FileType.XML:
+            checker = XMLChecker(file_path, handle_style_error)
+        elif file_type == FileType.XCODEPROJ:
+            checker = XcodeProjectFileChecker(file_path, handle_style_error)
+        elif file_type == FileType.PNG:
+            checker = PNGChecker(file_path, handle_style_error)
+        elif file_type == FileType.TEXT:
+            basename = os.path.basename(file_path)
+            if basename == 'TestExpectations':
+                checker = TestExpectationsChecker(file_path, handle_style_error)
+            else:
+                checker = TextChecker(file_path, handle_style_error)
+        elif file_type == FileType.WATCHLIST:
+            checker = WatchListChecker(file_path, handle_style_error)
+        else:
+            raise ValueError('Invalid file type "%(file_type)s": the only valid file types '
+                             "are %(NONE)s, %(CPP)s, and %(TEXT)s."
+                             % {"file_type": file_type,
+                                "NONE": FileType.NONE,
+                                "CPP": FileType.CPP,
+                                "TEXT": FileType.TEXT})
+
+        return checker
+
+    def dispatch(self, file_path, handle_style_error, min_confidence):
+        """Instantiate and return a style checker based on file path."""
+        file_type = self._file_type(file_path)
+
+        checker = self._create_checker(file_type,
+                                       file_path,
+                                       handle_style_error,
+                                       min_confidence)
+        return checker
+
+
+# FIXME: Remove the stderr_write attribute from this class and replace
+#        its use with calls to a logging module logger.
+class StyleProcessorConfiguration(object):
+
+    """Stores configuration values for the StyleProcessor class.
+
+    Attributes:
+      min_confidence: An integer between 1 and 5 inclusive that is the
+                      minimum confidence level of style errors to report.
+
+      max_reports_per_category: The maximum number of errors to report
+                                per category, per file.
+
+      stderr_write: A function that takes a string as a parameter and
+                    serves as stderr.write.
+
+    """
+
+    def __init__(self,
+                 filter_configuration,
+                 max_reports_per_category,
+                 min_confidence,
+                 output_format,
+                 stderr_write):
+        """Create a StyleProcessorConfiguration instance.
+
+        Args:
+          filter_configuration: A FilterConfiguration instance.  The default
+                                is the "empty" filter configuration, which
+                                means that all errors should be checked.
+
+          max_reports_per_category: The maximum number of errors to report
+                                    per category, per file.
+
+          min_confidence: An integer between 1 and 5 inclusive that is the
+                          minimum confidence level of style errors to report.
+                          The default is 1, which reports all style errors.
+
+          output_format: A string that is the output format.  The supported
+                         output formats are "emacs" which emacs can parse
+                         and "vs7" which Microsoft Visual Studio 7 can parse.
+
+          stderr_write: A function that takes a string as a parameter and
+                        serves as stderr.write.
+
+        """
+        self._filter_configuration = filter_configuration
+        self._output_format = output_format
+
+        self.max_reports_per_category = max_reports_per_category
+        self.min_confidence = min_confidence
+        self.stderr_write = stderr_write
+
+    def is_reportable(self, category, confidence_in_error, file_path):
+        """Return whether an error is reportable.
+
+        An error is reportable if both the confidence in the error is
+        at least the minimum confidence level and the current filter
+        says the category should be checked for the given path.
+
+        Args:
+          category: A string that is a style category.
+          confidence_in_error: An integer between 1 and 5 inclusive that is
+                               the application's confidence in the error.
+                               A higher number means greater confidence.
+          file_path: The path of the file being checked
+
+        """
+        if confidence_in_error < self.min_confidence:
+            return False
+
+        return self._filter_configuration.should_check(category, file_path)
+
+    def write_style_error(self,
+                          category,
+                          confidence_in_error,
+                          file_path,
+                          line_number,
+                          message):
+        """Write a style error to the configured stderr."""
+        if self._output_format == 'vs7':
+            format_string = "%s(%s):  %s  [%s] [%d]\n"
+        else:
+            format_string = "%s:%s:  %s  [%s] [%d]\n"
+
+        self.stderr_write(format_string % (file_path,
+                                           line_number,
+                                           message,
+                                           category,
+                                           confidence_in_error))
+
+
+class ProcessorBase(object):
+
+    """The base class for processors of lists of lines."""
+
+    def should_process(self, file_path):
+        """Return whether the file at file_path should be processed.
+
+        The TextFileReader class calls this method prior to reading in
+        the lines of a file.  Use this method, for example, to prevent
+        the style checker from reading binary files into memory.
+
+        """
+        raise NotImplementedError('Subclasses should implement.')
+
+    def process(self, lines, file_path, **kwargs):
+        """Process lines of text read from a file.
+
+        Args:
+          lines: A list of lines of text to process.
+          file_path: The path from which the lines were read.
+          **kwargs: This argument signifies that the process() method of
+                    subclasses of ProcessorBase may support additional
+                    keyword arguments.
+                        For example, a style checker's check() method
+                    may support a "reportable_lines" parameter that represents
+                    the line numbers of the lines for which style errors
+                    should be reported.
+
+        """
+        raise NotImplementedError('Subclasses should implement.')
+
+
+class StyleProcessor(ProcessorBase):
+
+    """A ProcessorBase for checking style.
+
+    Attributes:
+      error_count: An integer that is the total number of reported
+                   errors for the lifetime of this instance.
+
+    """
+
+    def __init__(self, configuration, mock_dispatcher=None,
+                 mock_increment_error_count=None,
+                 mock_carriage_checker_class=None):
+        """Create an instance.
+
+        Args:
+          configuration: A StyleProcessorConfiguration instance.
+          mock_dispatcher: A mock CheckerDispatcher instance.  This
+                           parameter is for unit testing.  Defaults to a
+                           CheckerDispatcher instance.
+          mock_increment_error_count: A mock error-count incrementer.
+          mock_carriage_checker_class: A mock class for checking and
+                                       transforming carriage returns.
+                                       This parameter is for unit testing.
+                                       Defaults to CarriageReturnChecker.
+
+        """
+        if mock_dispatcher is None:
+            dispatcher = CheckerDispatcher()
+        else:
+            dispatcher = mock_dispatcher
+
+        if mock_increment_error_count is None:
+            # The following blank line is present to avoid flagging by pep8.py.
+
+            def increment_error_count():
+                """Increment the total count of reported errors."""
+                self.error_count += 1
+        else:
+            increment_error_count = mock_increment_error_count
+
+        if mock_carriage_checker_class is None:
+            # This needs to be a class rather than an instance since the
+            # process() method instantiates one using parameters.
+            carriage_checker_class = CarriageReturnChecker
+        else:
+            carriage_checker_class = mock_carriage_checker_class
+
+        self.error_count = 0
+
+        self._carriage_checker_class = carriage_checker_class
+        self._configuration = configuration
+        self._dispatcher = dispatcher
+        self._increment_error_count = increment_error_count
+
+    def should_process(self, file_path):
+        """Return whether the file should be checked for style."""
+        if self._dispatcher.should_skip_without_warning(file_path):
+            return False
+        if self._dispatcher.should_skip_with_warning(file_path):
+            _log.warn('File exempt from style guide. Skipping: "%s"'
+                      % file_path)
+            return False
+        return True
+
+    def process(self, lines, file_path, line_numbers=None):
+        """Check the given lines for style.
+
+        Arguments:
+          lines: A list of all lines in the file to check.
+          file_path: The path of the file to process.  If possible, the path
+                     should be relative to the source root.  Otherwise,
+                     path-specific logic may not behave as expected.
+          line_numbers: A list of line numbers of the lines for which
+                        style errors should be reported, or None if errors
+                        for all lines should be reported.  When not None, this
+                        list normally contains the line numbers corresponding
+                        to the modified lines of a patch.
+
+        """
+        _log.debug("Checking style: " + file_path)
+
+        style_error_handler = DefaultStyleErrorHandler(
+            configuration=self._configuration,
+            file_path=file_path,
+            increment_error_count=self._increment_error_count,
+            line_numbers=line_numbers)
+
+        carriage_checker = self._carriage_checker_class(style_error_handler)
+
+        # Check for and remove trailing carriage returns ("\r").
+        if self._dispatcher.should_check_and_strip_carriage_returns(file_path):
+            lines = carriage_checker.check(lines)
+
+        min_confidence = self._configuration.min_confidence
+        checker = self._dispatcher.dispatch(file_path,
+                                            style_error_handler,
+                                            min_confidence)
+
+        if checker is None:
+            raise AssertionError("File should not be checked: '%s'" % file_path)
+
+        _log.debug("Using class: " + checker.__class__.__name__)
+
+        checker.check(lines)
diff --git a/Tools/Scripts/webkitpy/style/checker_unittest.py b/Tools/Scripts/webkitpy/style/checker_unittest.py
new file mode 100755
index 0000000..d834fd5
--- /dev/null
+++ b/Tools/Scripts/webkitpy/style/checker_unittest.py
@@ -0,0 +1,894 @@
+#!/usr/bin/python
+# -*- coding: utf-8; -*-
+#
+# Copyright (C) 2009 Google Inc. All rights reserved.
+# Copyright (C) 2009 Torch Mobile Inc.
+# Copyright (C) 2009 Apple Inc. All rights reserved.
+# Copyright (C) 2010 Chris Jerdonek (chris.jerdonek@gmail.com)
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#    * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Unit tests for style.py."""
+
+import logging
+import os
+import unittest
+
+import checker as style
+from webkitpy.common.system.logtesting import LogTesting, TestLogStream
+from checker import _BASE_FILTER_RULES
+from checker import _MAX_REPORTS_PER_CATEGORY
+from checker import _PATH_RULES_SPECIFIER as PATH_RULES_SPECIFIER
+from checker import _all_categories
+from checker import check_webkit_style_configuration
+from checker import check_webkit_style_parser
+from checker import configure_logging
+from checker import CheckerDispatcher
+from checker import ProcessorBase
+from checker import StyleProcessor
+from checker import StyleProcessorConfiguration
+from checkers.changelog import ChangeLogChecker
+from checkers.cpp import CppChecker
+from checkers.jsonchecker import JSONChecker
+from checkers.python import PythonChecker
+from checkers.text import TextChecker
+from checkers.xml import XMLChecker
+from error_handlers import DefaultStyleErrorHandler
+from filter import validate_filter_rules
+from filter import FilterConfiguration
+from optparser import ArgumentParser
+from optparser import CommandOptionValues
+from webkitpy.common.system.logtesting import LoggingTestCase
+from webkitpy.style.filereader import TextFileReader
+
+
+class ConfigureLoggingTestBase(unittest.TestCase):
+
+    """Base class for testing configure_logging().
+
+    Sub-classes should implement:
+
+      is_verbose: The is_verbose value to pass to configure_logging().
+
+    """
+
+    def setUp(self):
+        is_verbose = self.is_verbose
+
+        log_stream = TestLogStream(self)
+
+        # Use a logger other than the root logger or one prefixed with
+        # webkit so as not to conflict with test-webkitpy logging.
+        logger = logging.getLogger("unittest")
+
+        # Configure the test logger not to pass messages along to the
+        # root logger.  This prevents test messages from being
+        # propagated to loggers used by test-webkitpy logging (e.g.
+        # the root logger).
+        logger.propagate = False
+
+        self._handlers = configure_logging(stream=log_stream, logger=logger,
+                                           is_verbose=is_verbose)
+        self._log = logger
+        self._log_stream = log_stream
+
+    def tearDown(self):
+        """Reset logging to its original state.
+
+        This method ensures that the logging configuration set up
+        for a unit test does not affect logging in other unit tests.
+
+        """
+        logger = self._log
+        for handler in self._handlers:
+            logger.removeHandler(handler)
+
+    def assert_log_messages(self, messages):
+        """Assert that the logged messages equal the given messages."""
+        self._log_stream.assertMessages(messages)
+
+
+class ConfigureLoggingTest(ConfigureLoggingTestBase):
+
+    """Tests the configure_logging() function."""
+
+    is_verbose = False
+
+    def test_warning_message(self):
+        self._log.warn("test message")
+        self.assert_log_messages(["WARNING: test message\n"])
+
+    def test_below_warning_message(self):
+        # We test the boundary case of a logging level equal to 29.
+        # In practice, we will probably only be calling log.info(),
+        # which corresponds to a logging level of 20.
+        level = logging.WARNING - 1  # Equals 29.
+        self._log.log(level, "test message")
+        self.assert_log_messages(["test message\n"])
+
+    def test_debug_message(self):
+        self._log.debug("test message")
+        self.assert_log_messages([])
+
+    def test_two_messages(self):
+        self._log.info("message1")
+        self._log.info("message2")
+        self.assert_log_messages(["message1\n", "message2\n"])
+
+
+class ConfigureLoggingVerboseTest(ConfigureLoggingTestBase):
+
+    """Tests the configure_logging() function with is_verbose True."""
+
+    is_verbose = True
+
+    def test_debug_message(self):
+        self._log.debug("test message")
+        self.assert_log_messages(["unittest: DEBUG    test message\n"])
+
+
+class GlobalVariablesTest(unittest.TestCase):
+
+    """Tests validity of the global variables."""
+
+    def _all_categories(self):
+        return _all_categories()
+
+    def defaults(self):
+        return style._check_webkit_style_defaults()
+
+    def test_webkit_base_filter_rules(self):
+        base_filter_rules = _BASE_FILTER_RULES
+        defaults = self.defaults()
+        already_seen = []
+        validate_filter_rules(base_filter_rules, self._all_categories())
+        # Also do some additional checks.
+        for rule in base_filter_rules:
+            # Check no leading or trailing white space.
+            self.assertEquals(rule, rule.strip())
+            # All categories are on by default, so defaults should
+            # begin with -.
+            self.assertTrue(rule.startswith('-'))
+            # Check no rule occurs twice.
+            self.assertFalse(rule in already_seen)
+            already_seen.append(rule)
+
+    def test_defaults(self):
+        """Check that default arguments are valid."""
+        default_options = self.defaults()
+
+        # FIXME: We should not need to call parse() to determine
+        #        whether the default arguments are valid.
+        parser = ArgumentParser(all_categories=self._all_categories(),
+                                base_filter_rules=[],
+                                default_options=default_options)
+        # No need to test the return value here since we test parse()
+        # on valid arguments elsewhere.
+        #
+        # The default options are valid: no error or SystemExit.
+        parser.parse(args=[])
+
+    def test_path_rules_specifier(self):
+        all_categories = self._all_categories()
+        for (sub_paths, path_rules) in PATH_RULES_SPECIFIER:
+            validate_filter_rules(path_rules, self._all_categories())
+
+        config = FilterConfiguration(path_specific=PATH_RULES_SPECIFIER)
+
+        def assertCheck(path, category):
+            """Assert that the given category should be checked."""
+            message = ('Should check category "%s" for path "%s".'
+                       % (category, path))
+            self.assertTrue(config.should_check(category, path))
+
+        def assertNoCheck(path, category):
+            """Assert that the given category should not be checked."""
+            message = ('Should not check category "%s" for path "%s".'
+                       % (category, path))
+            self.assertFalse(config.should_check(category, path), message)
+
+        assertCheck("random_path.cpp",
+                    "build/include")
+        assertNoCheck("Tools/WebKitAPITest/main.cpp",
+                      "build/include")
+        assertCheck("random_path.cpp",
+                    "readability/naming")
+        assertNoCheck("Source/WebKit/gtk/webkit/webkit.h",
+                      "readability/naming")
+        assertNoCheck("Tools/DumpRenderTree/gtk/DumpRenderTree.cpp",
+                      "readability/null")
+        assertNoCheck("Source/WebKit/efl/ewk/ewk_view.h",
+                      "readability/naming")
+        assertNoCheck("Source/WebCore/css/CSSParser.cpp",
+                      "readability/naming")
+
+        # Test if Qt exceptions are indeed working
+        assertCheck("Source/WebKit/qt/Api/qwebpage.cpp",
+                    "readability/braces")
+        assertCheck("Source/WebKit/qt/tests/qwebelement/tst_qwebelement.cpp",
+                    "readability/braces")
+        assertCheck("Source/WebKit/qt/declarative/platformplugin/WebPlugin.cpp",
+                    "readability/braces")
+        assertCheck("Source/WebKit/qt/examples/platformplugin/WebPlugin.cpp",
+                    "readability/braces")
+        assertNoCheck("Source/WebKit/qt/Api/qwebpage.cpp",
+                      "readability/naming")
+        assertNoCheck("Source/WebKit/qt/tests/qwebelement/tst_qwebelement.cpp",
+                      "readability/naming")
+        assertNoCheck("Source/WebKit/qt/declarative/platformplugin/WebPlugin.cpp",
+                      "readability/naming")
+        assertNoCheck("Source/WebKit/qt/examples/platformplugin/WebPlugin.cpp",
+                      "readability/naming")
+
+        assertNoCheck("Tools/MiniBrowser/qt/UrlLoader.cpp",
+                    "build/include")
+
+        assertNoCheck("Source/WebKit2/UIProcess/API/qt",
+                    "readability/parameter_name")
+
+        assertNoCheck("Source/WebCore/ForwardingHeaders/debugger/Debugger.h",
+                      "build/header_guard")
+
+        assertNoCheck("Source/WebCore/platform/graphics/gstreamer/VideoSinkGStreamer.cpp",
+                      "readability/naming")
+
+        # Third-party Python code: webkitpy/thirdparty
+        path = "Tools/Scripts/webkitpy/thirdparty/mock.py"
+        assertNoCheck(path, "build/include")
+        assertNoCheck(path, "pep8/E401")  # A random pep8 category.
+        assertCheck(path, "pep8/W191")
+        assertCheck(path, "pep8/W291")
+        assertCheck(path, "whitespace/carriage_return")
+
+        # Test if the exception for GDBInterface.cpp is in place.
+        assertNoCheck("Source/JavaScriptCore/jit/GDBInterface.cpp",
+                      "readability/naming")
+
+        # Javascript keywords.
+        assertCheck("Source/JavaScriptCore/parser/Keywords.table", "whitespace/carriage_return")
+
+    def test_max_reports_per_category(self):
+        """Check that _MAX_REPORTS_PER_CATEGORY is valid."""
+        all_categories = self._all_categories()
+        for category in _MAX_REPORTS_PER_CATEGORY.iterkeys():
+            self.assertTrue(category in all_categories,
+                            'Key "%s" is not a category' % category)
+
+
+class CheckWebKitStyleFunctionTest(unittest.TestCase):
+
+    """Tests the functions with names of the form check_webkit_style_*."""
+
+    def test_check_webkit_style_configuration(self):
+        # Exercise the code path to make sure the function does not error out.
+        option_values = CommandOptionValues()
+        configuration = check_webkit_style_configuration(option_values)
+
+    def test_check_webkit_style_parser(self):
+        # Exercise the code path to make sure the function does not error out.
+        parser = check_webkit_style_parser()
+
+
+class CheckerDispatcherSkipTest(unittest.TestCase):
+
+    """Tests the "should skip" methods of the CheckerDispatcher class."""
+
+    def setUp(self):
+        self._dispatcher = CheckerDispatcher()
+
+    def test_should_skip_with_warning(self):
+        """Test should_skip_with_warning()."""
+        # Check skipped files.
+        paths_to_skip = [
+           "Source/WebKit/gtk/tests/testatk.c",
+           "Source/WebKit2/UIProcess/API/gtk/webkit2.h",
+           "Source/WebKit2/UIProcess/API/gtk/WebKitWebView.h",
+           "Source/WebKit2/UIProcess/API/gtk/WebKitLoader.h",
+            ]
+
+        for path in paths_to_skip:
+            self.assertTrue(self._dispatcher.should_skip_with_warning(path),
+                            "Checking: " + path)
+
+        # Verify that some files are not skipped.
+        paths_not_to_skip = [
+           "foo.txt",
+           "Source/WebKit2/UIProcess/API/gtk/HelperClass.cpp",
+           "Source/WebKit2/UIProcess/API/gtk/HelperClass.h",
+           "Source/WebKit2/UIProcess/API/gtk/WebKitWebView.cpp",
+           "Source/WebKit2/UIProcess/API/gtk/WebKitWebViewPrivate.h",
+           "Source/WebKit2/UIProcess/API/gtk/tests/WebViewTest.cpp",
+           "Source/WebKit2/UIProcess/API/gtk/tests/WebViewTest.h",
+            ]
+
+        for path in paths_not_to_skip:
+            self.assertFalse(self._dispatcher.should_skip_with_warning(path))
+
+    def _assert_should_skip_without_warning(self, path, is_checker_none,
+                                            expected):
+        # Check the file type before asserting the return value.
+        checker = self._dispatcher.dispatch(file_path=path,
+                                            handle_style_error=None,
+                                            min_confidence=3)
+        message = 'while checking: %s' % path
+        self.assertEquals(checker is None, is_checker_none, message)
+        self.assertEquals(self._dispatcher.should_skip_without_warning(path),
+                          expected, message)
+
+    def test_should_skip_without_warning__true(self):
+        """Test should_skip_without_warning() for True return values."""
+        # Check a file with NONE file type.
+        path = 'foo.asdf'  # Non-sensical file extension.
+        self._assert_should_skip_without_warning(path,
+                                                 is_checker_none=True,
+                                                 expected=True)
+
+        # Check files with non-NONE file type.  These examples must be
+        # drawn from the _SKIPPED_FILES_WITHOUT_WARNING configuration
+        # variable.
+        path = os.path.join('LayoutTests', 'foo.txt')
+        self._assert_should_skip_without_warning(path,
+                                                 is_checker_none=False,
+                                                 expected=True)
+
+    def test_should_skip_without_warning__false(self):
+        """Test should_skip_without_warning() for False return values."""
+        paths = ['foo.txt',
+                 os.path.join('LayoutTests', 'ChangeLog'),
+        ]
+
+        for path in paths:
+            self._assert_should_skip_without_warning(path,
+                                                     is_checker_none=False,
+                                                     expected=False)
+
+
+class CheckerDispatcherCarriageReturnTest(unittest.TestCase):
+    def test_should_check_and_strip_carriage_returns(self):
+        files = {
+            'foo.txt': True,
+            'foo.cpp': True,
+            'foo.vcproj': False,
+            'foo.vsprops': False,
+        }
+
+        dispatcher = CheckerDispatcher()
+        for file_path, expected_result in files.items():
+            self.assertEquals(dispatcher.should_check_and_strip_carriage_returns(file_path), expected_result, 'Checking: %s' % file_path)
+
+
+class CheckerDispatcherDispatchTest(unittest.TestCase):
+
+    """Tests dispatch() method of CheckerDispatcher class."""
+
+    def dispatch(self, file_path):
+        """Call dispatch() with the given file path."""
+        dispatcher = CheckerDispatcher()
+        self.mock_handle_style_error = DefaultStyleErrorHandler('', None, None, [])
+        checker = dispatcher.dispatch(file_path,
+                                      self.mock_handle_style_error,
+                                      min_confidence=3)
+        return checker
+
+    def assert_checker_none(self, file_path):
+        """Assert that the dispatched checker is None."""
+        checker = self.dispatch(file_path)
+        self.assertTrue(checker is None, 'Checking: "%s"' % file_path)
+
+    def assert_checker(self, file_path, expected_class):
+        """Assert the type of the dispatched checker."""
+        checker = self.dispatch(file_path)
+        got_class = checker.__class__
+        self.assertEquals(got_class, expected_class,
+                          'For path "%(file_path)s" got %(got_class)s when '
+                          "expecting %(expected_class)s."
+                          % {"file_path": file_path,
+                             "got_class": got_class,
+                             "expected_class": expected_class})
+
+    def assert_checker_changelog(self, file_path):
+        """Assert that the dispatched checker is a ChangeLogChecker."""
+        self.assert_checker(file_path, ChangeLogChecker)
+
+    def assert_checker_cpp(self, file_path):
+        """Assert that the dispatched checker is a CppChecker."""
+        self.assert_checker(file_path, CppChecker)
+
+    def assert_checker_json(self, file_path):
+        """Assert that the dispatched checker is a JSONChecker."""
+        self.assert_checker(file_path, JSONChecker)
+
+    def assert_checker_python(self, file_path):
+        """Assert that the dispatched checker is a PythonChecker."""
+        self.assert_checker(file_path, PythonChecker)
+
+    def assert_checker_text(self, file_path):
+        """Assert that the dispatched checker is a TextChecker."""
+        self.assert_checker(file_path, TextChecker)
+
+    def assert_checker_xml(self, file_path):
+        """Assert that the dispatched checker is a XMLChecker."""
+        self.assert_checker(file_path, XMLChecker)
+
+    def test_changelog_paths(self):
+        """Test paths that should be checked as ChangeLog."""
+        paths = [
+                 "ChangeLog",
+                 "ChangeLog-2009-06-16",
+                 os.path.join("Source", "WebCore", "ChangeLog"),
+                 ]
+
+        for path in paths:
+            self.assert_checker_changelog(path)
+
+        # Check checker attributes on a typical input.
+        file_path = "ChangeLog"
+        self.assert_checker_changelog(file_path)
+        checker = self.dispatch(file_path)
+        self.assertEquals(checker.file_path, file_path)
+        self.assertEquals(checker.handle_style_error,
+                          self.mock_handle_style_error)
+
+    def test_cpp_paths(self):
+        """Test paths that should be checked as C++."""
+        paths = [
+            "-",
+            "foo.c",
+            "foo.cpp",
+            "foo.h",
+            ]
+
+        for path in paths:
+            self.assert_checker_cpp(path)
+
+        # Check checker attributes on a typical input.
+        file_base = "foo"
+        file_extension = "c"
+        file_path = file_base + "." + file_extension
+        self.assert_checker_cpp(file_path)
+        checker = self.dispatch(file_path)
+        self.assertEquals(checker.file_extension, file_extension)
+        self.assertEquals(checker.file_path, file_path)
+        self.assertEquals(checker.handle_style_error, self.mock_handle_style_error)
+        self.assertEquals(checker.min_confidence, 3)
+        # Check "-" for good measure.
+        file_base = "-"
+        file_extension = ""
+        file_path = file_base
+        self.assert_checker_cpp(file_path)
+        checker = self.dispatch(file_path)
+        self.assertEquals(checker.file_extension, file_extension)
+        self.assertEquals(checker.file_path, file_path)
+
+    def test_json_paths(self):
+        """Test paths that should be checked as JSON."""
+        paths = [
+           "Source/WebCore/inspector/Inspector.json",
+           "Tools/BuildSlaveSupport/build.webkit.org-config/config.json",
+        ]
+
+        for path in paths:
+            self.assert_checker_json(path)
+
+        # Check checker attributes on a typical input.
+        file_base = "foo"
+        file_extension = "json"
+        file_path = file_base + "." + file_extension
+        self.assert_checker_json(file_path)
+        checker = self.dispatch(file_path)
+        self.assertEquals(checker._handle_style_error,
+                          self.mock_handle_style_error)
+
+    def test_python_paths(self):
+        """Test paths that should be checked as Python."""
+        paths = [
+           "foo.py",
+           "Tools/Scripts/modules/text_style.py",
+        ]
+
+        for path in paths:
+            self.assert_checker_python(path)
+
+        # Check checker attributes on a typical input.
+        file_base = "foo"
+        file_extension = "css"
+        file_path = file_base + "." + file_extension
+        self.assert_checker_text(file_path)
+        checker = self.dispatch(file_path)
+        self.assertEquals(checker.file_path, file_path)
+        self.assertEquals(checker.handle_style_error,
+                          self.mock_handle_style_error)
+
+    def test_text_paths(self):
+        """Test paths that should be checked as text."""
+        paths = [
+           "foo.ac",
+           "foo.cc",
+           "foo.cgi",
+           "foo.css",
+           "foo.exp",
+           "foo.flex",
+           "foo.gyp",
+           "foo.gypi",
+           "foo.html",
+           "foo.idl",
+           "foo.in",
+           "foo.js",
+           "foo.mm",
+           "foo.php",
+           "foo.pl",
+           "foo.pm",
+           "foo.pri",
+           "foo.pro",
+           "foo.rb",
+           "foo.sh",
+           "foo.txt",
+           "foo.wm",
+           "foo.xhtml",
+           "foo.y",
+           os.path.join("Source", "WebCore", "inspector", "front-end", "inspector.js"),
+           os.path.join("Tools", "Scripts", "check-webkit-style"),
+        ]
+
+        for path in paths:
+            self.assert_checker_text(path)
+
+        # Check checker attributes on a typical input.
+        file_base = "foo"
+        file_extension = "css"
+        file_path = file_base + "." + file_extension
+        self.assert_checker_text(file_path)
+        checker = self.dispatch(file_path)
+        self.assertEquals(checker.file_path, file_path)
+        self.assertEquals(checker.handle_style_error, self.mock_handle_style_error)
+
+    def test_xml_paths(self):
+        """Test paths that should be checked as XML."""
+        paths = [
+           "Source/WebCore/WebCore.vcproj/WebCore.vcproj",
+           "WebKitLibraries/win/tools/vsprops/common.vsprops",
+        ]
+
+        for path in paths:
+            self.assert_checker_xml(path)
+
+        # Check checker attributes on a typical input.
+        file_base = "foo"
+        file_extension = "vcproj"
+        file_path = file_base + "." + file_extension
+        self.assert_checker_xml(file_path)
+        checker = self.dispatch(file_path)
+        self.assertEquals(checker._handle_style_error,
+                          self.mock_handle_style_error)
+
+    def test_none_paths(self):
+        """Test paths that have no file type.."""
+        paths = [
+           "Makefile",
+           "foo.asdf",  # Non-sensical file extension.
+           "foo.exe",
+            ]
+
+        for path in paths:
+            self.assert_checker_none(path)
+
+
+class StyleProcessorConfigurationTest(unittest.TestCase):
+
+    """Tests the StyleProcessorConfiguration class."""
+
+    def setUp(self):
+        self._error_messages = []
+        """The messages written to _mock_stderr_write() of this class."""
+
+    def _mock_stderr_write(self, message):
+        self._error_messages.append(message)
+
+    def _style_checker_configuration(self, output_format="vs7"):
+        """Return a StyleProcessorConfiguration instance for testing."""
+        base_rules = ["-whitespace", "+whitespace/tab"]
+        filter_configuration = FilterConfiguration(base_rules=base_rules)
+
+        return StyleProcessorConfiguration(
+                   filter_configuration=filter_configuration,
+                   max_reports_per_category={"whitespace/newline": 1},
+                   min_confidence=3,
+                   output_format=output_format,
+                   stderr_write=self._mock_stderr_write)
+
+    def test_init(self):
+        """Test the __init__() method."""
+        configuration = self._style_checker_configuration()
+
+        # Check that __init__ sets the "public" data attributes correctly.
+        self.assertEquals(configuration.max_reports_per_category,
+                          {"whitespace/newline": 1})
+        self.assertEquals(configuration.stderr_write, self._mock_stderr_write)
+        self.assertEquals(configuration.min_confidence, 3)
+
+    def test_is_reportable(self):
+        """Test the is_reportable() method."""
+        config = self._style_checker_configuration()
+
+        self.assertTrue(config.is_reportable("whitespace/tab", 3, "foo.txt"))
+
+        # Test the confidence check code path by varying the confidence.
+        self.assertFalse(config.is_reportable("whitespace/tab", 2, "foo.txt"))
+
+        # Test the category check code path by varying the category.
+        self.assertFalse(config.is_reportable("whitespace/line", 4, "foo.txt"))
+
+    def _call_write_style_error(self, output_format):
+        config = self._style_checker_configuration(output_format=output_format)
+        config.write_style_error(category="whitespace/tab",
+                                 confidence_in_error=5,
+                                 file_path="foo.h",
+                                 line_number=100,
+                                 message="message")
+
+    def test_write_style_error_emacs(self):
+        """Test the write_style_error() method."""
+        self._call_write_style_error("emacs")
+        self.assertEquals(self._error_messages,
+                          ["foo.h:100:  message  [whitespace/tab] [5]\n"])
+
+    def test_write_style_error_vs7(self):
+        """Test the write_style_error() method."""
+        self._call_write_style_error("vs7")
+        self.assertEquals(self._error_messages,
+                          ["foo.h(100):  message  [whitespace/tab] [5]\n"])
+
+
+class StyleProcessor_EndToEndTest(LoggingTestCase):
+
+    """Test the StyleProcessor class with an emphasis on end-to-end tests."""
+
+    def setUp(self):
+        LoggingTestCase.setUp(self)
+        self._messages = []
+
+    def _mock_stderr_write(self, message):
+        """Save a message so it can later be asserted."""
+        self._messages.append(message)
+
+    def test_init(self):
+        """Test __init__ constructor."""
+        configuration = StyleProcessorConfiguration(
+                            filter_configuration=FilterConfiguration(),
+                            max_reports_per_category={},
+                            min_confidence=3,
+                            output_format="vs7",
+                            stderr_write=self._mock_stderr_write)
+        processor = StyleProcessor(configuration)
+
+        self.assertEquals(processor.error_count, 0)
+        self.assertEquals(self._messages, [])
+
+    def test_process(self):
+        configuration = StyleProcessorConfiguration(
+                            filter_configuration=FilterConfiguration(),
+                            max_reports_per_category={},
+                            min_confidence=3,
+                            output_format="vs7",
+                            stderr_write=self._mock_stderr_write)
+        processor = StyleProcessor(configuration)
+
+        processor.process(lines=['line1', 'Line with tab:\t'],
+                          file_path='foo.txt')
+        self.assertEquals(processor.error_count, 1)
+        expected_messages = ['foo.txt(2):  Line contains tab character.  '
+                             '[whitespace/tab] [5]\n']
+        self.assertEquals(self._messages, expected_messages)
+
+
+class StyleProcessor_CodeCoverageTest(LoggingTestCase):
+
+    """Test the StyleProcessor class with an emphasis on code coverage.
+
+    This class makes heavy use of mock objects.
+
+    """
+
+    class MockDispatchedChecker(object):
+
+        """A mock checker dispatched by the MockDispatcher."""
+
+        def __init__(self, file_path, min_confidence, style_error_handler):
+            self.file_path = file_path
+            self.min_confidence = min_confidence
+            self.style_error_handler = style_error_handler
+
+        def check(self, lines):
+            self.lines = lines
+
+    class MockDispatcher(object):
+
+        """A mock CheckerDispatcher class."""
+
+        def __init__(self):
+            self.dispatched_checker = None
+
+        def should_skip_with_warning(self, file_path):
+            return file_path.endswith('skip_with_warning.txt')
+
+        def should_skip_without_warning(self, file_path):
+            return file_path.endswith('skip_without_warning.txt')
+
+        def should_check_and_strip_carriage_returns(self, file_path):
+            return not file_path.endswith('carriage_returns_allowed.txt')
+
+        def dispatch(self, file_path, style_error_handler, min_confidence):
+            if file_path.endswith('do_not_process.txt'):
+                return None
+
+            checker = StyleProcessor_CodeCoverageTest.MockDispatchedChecker(
+                          file_path,
+                          min_confidence,
+                          style_error_handler)
+
+            # Save the dispatched checker so the current test case has a
+            # way to access and check it.
+            self.dispatched_checker = checker
+
+            return checker
+
+    def setUp(self):
+        LoggingTestCase.setUp(self)
+        # We can pass an error-message swallower here because error message
+        # output is tested instead in the end-to-end test case above.
+        configuration = StyleProcessorConfiguration(
+                            filter_configuration=FilterConfiguration(),
+                            max_reports_per_category={"whitespace/newline": 1},
+                            min_confidence=3,
+                            output_format="vs7",
+                            stderr_write=self._swallow_stderr_message)
+
+        mock_carriage_checker_class = self._create_carriage_checker_class()
+        mock_dispatcher = self.MockDispatcher()
+        # We do not need to use a real incrementer here because error-count
+        # incrementing is tested instead in the end-to-end test case above.
+        mock_increment_error_count = self._do_nothing
+
+        processor = StyleProcessor(configuration=configuration,
+                        mock_carriage_checker_class=mock_carriage_checker_class,
+                        mock_dispatcher=mock_dispatcher,
+                        mock_increment_error_count=mock_increment_error_count)
+
+        self._configuration = configuration
+        self._mock_dispatcher = mock_dispatcher
+        self._processor = processor
+
+    def _do_nothing(self):
+        # We provide this function so the caller can pass it to the
+        # StyleProcessor constructor.  This lets us assert the equality of
+        # the DefaultStyleErrorHandler instance generated by the process()
+        # method with an expected instance.
+        pass
+
+    def _swallow_stderr_message(self, message):
+        """Swallow a message passed to stderr.write()."""
+        # This is a mock stderr.write() for passing to the constructor
+        # of the StyleProcessorConfiguration class.
+        pass
+
+    def _create_carriage_checker_class(self):
+
+        # Create a reference to self with a new name so its name does not
+        # conflict with the self introduced below.
+        test_case = self
+
+        class MockCarriageChecker(object):
+
+            """A mock carriage-return checker."""
+
+            def __init__(self, style_error_handler):
+                self.style_error_handler = style_error_handler
+
+                # This gives the current test case access to the
+                # instantiated carriage checker.
+                test_case.carriage_checker = self
+
+            def check(self, lines):
+                # Save the lines so the current test case has a way to access
+                # and check them.
+                self.lines = lines
+
+                return lines
+
+        return MockCarriageChecker
+
+    def test_should_process__skip_without_warning(self):
+        """Test should_process() for a skip-without-warning file."""
+        file_path = "foo/skip_without_warning.txt"
+
+        self.assertFalse(self._processor.should_process(file_path))
+
+    def test_should_process__skip_with_warning(self):
+        """Test should_process() for a skip-with-warning file."""
+        file_path = "foo/skip_with_warning.txt"
+
+        self.assertFalse(self._processor.should_process(file_path))
+
+        self.assertLog(['WARNING: File exempt from style guide. '
+                        'Skipping: "foo/skip_with_warning.txt"\n'])
+
+    def test_should_process__true_result(self):
+        """Test should_process() for a file that should be processed."""
+        file_path = "foo/skip_process.txt"
+
+        self.assertTrue(self._processor.should_process(file_path))
+
+    def test_process__checker_dispatched(self):
+        """Test the process() method for a path with a dispatched checker."""
+        file_path = 'foo.txt'
+        lines = ['line1', 'line2']
+        line_numbers = [100]
+
+        expected_error_handler = DefaultStyleErrorHandler(
+            configuration=self._configuration,
+            file_path=file_path,
+            increment_error_count=self._do_nothing,
+            line_numbers=line_numbers)
+
+        self._processor.process(lines=lines,
+                                file_path=file_path,
+                                line_numbers=line_numbers)
+
+        # Check that the carriage-return checker was instantiated correctly
+        # and was passed lines correctly.
+        carriage_checker = self.carriage_checker
+        self.assertEquals(carriage_checker.style_error_handler,
+                          expected_error_handler)
+        self.assertEquals(carriage_checker.lines, ['line1', 'line2'])
+
+        # Check that the style checker was dispatched correctly and was
+        # passed lines correctly.
+        checker = self._mock_dispatcher.dispatched_checker
+        self.assertEquals(checker.file_path, 'foo.txt')
+        self.assertEquals(checker.min_confidence, 3)
+        self.assertEquals(checker.style_error_handler, expected_error_handler)
+
+        self.assertEquals(checker.lines, ['line1', 'line2'])
+
+    def test_process__no_checker_dispatched(self):
+        """Test the process() method for a path with no dispatched checker."""
+        path = os.path.join('foo', 'do_not_process.txt')
+        self.assertRaises(AssertionError, self._processor.process,
+                          lines=['line1', 'line2'], file_path=path,
+                          line_numbers=[100])
+
+    def test_process__carriage_returns_not_stripped(self):
+        """Test that carriage returns aren't stripped from files that are allowed to contain them."""
+        file_path = 'carriage_returns_allowed.txt'
+        lines = ['line1\r', 'line2\r']
+        line_numbers = [100]
+        self._processor.process(lines=lines,
+                                file_path=file_path,
+                                line_numbers=line_numbers)
+        # The carriage return checker should never have been invoked, and so
+        # should not have saved off any lines.
+        self.assertFalse(hasattr(self.carriage_checker, 'lines'))
diff --git a/Tools/Scripts/webkitpy/style/checkers/__init__.py b/Tools/Scripts/webkitpy/style/checkers/__init__.py
new file mode 100644
index 0000000..ef65bee
--- /dev/null
+++ b/Tools/Scripts/webkitpy/style/checkers/__init__.py
@@ -0,0 +1 @@
+# Required for Python to search this directory for module files
diff --git a/Tools/Scripts/webkitpy/style/checkers/changelog.py b/Tools/Scripts/webkitpy/style/checkers/changelog.py
new file mode 100644
index 0000000..a096d3f
--- /dev/null
+++ b/Tools/Scripts/webkitpy/style/checkers/changelog.py
@@ -0,0 +1,96 @@
+#!/usr/bin/env python
+#
+# Copyright (C) 2011 Patrick Gansterer <paroga@paroga.com>
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1.  Redistributions of source code must retain the above copyright
+#     notice, this list of conditions and the following disclaimer.
+# 2.  Redistributions in binary form must reproduce the above copyright
+#     notice, this list of conditions and the following disclaimer in the
+#     documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Checks WebKit style for ChangeLog files."""
+
+import re
+from common import TabChecker
+from webkitpy.common.checkout.changelog import parse_bug_id_from_changelog
+
+
+class ChangeLogChecker(object):
+    """Processes text lines for checking style."""
+
+    categories = set(['changelog/bugnumber', 'changelog/filechangedescriptionwhitespace'])
+
+    def __init__(self, file_path, handle_style_error, should_line_be_checked):
+        self.file_path = file_path
+        self.handle_style_error = handle_style_error
+        self.should_line_be_checked = should_line_be_checked
+        self._tab_checker = TabChecker(file_path, handle_style_error)
+
+    def check_entry(self, first_line_checked, entry_lines):
+        if not entry_lines:
+            return
+        for line in entry_lines:
+            if parse_bug_id_from_changelog(line):
+                break
+            if re.search("Unreviewed", line, re.IGNORECASE):
+                break
+            if re.search("build", line, re.IGNORECASE) and re.search("fix", line, re.IGNORECASE):
+                break
+        else:
+            self.handle_style_error(first_line_checked,
+                                    "changelog/bugnumber", 5,
+                                    "ChangeLog entry has no bug number")
+        # check file change descriptions for style violations
+        line_no = first_line_checked - 1
+        for line in entry_lines:
+            line_no = line_no + 1
+            # filter file change descriptions
+            if not re.match('\s*\*\s', line):
+                continue
+            if re.search(':\s*$', line) or re.search(':\s', line):
+                continue
+            self.handle_style_error(line_no,
+                                    "changelog/filechangedescriptionwhitespace", 5,
+                                    "Need whitespace between colon and description")
+
+        # check for a lingering "No new tests. (OOPS!)" left over from prepare-changeLog.
+        line_no = first_line_checked - 1
+        for line in entry_lines:
+            line_no = line_no + 1
+            if re.match('\s*No new tests. \(OOPS!\)$', line):
+                self.handle_style_error(line_no,
+                                        "changelog/nonewtests", 5,
+                                        "You should remove the 'No new tests' and either add and list tests, or explain why no new tests were possible.")
+
+    def check(self, lines):
+        self._tab_checker.check(lines)
+        first_line_checked = 0
+        entry_lines = []
+
+        for line_index, line in enumerate(lines):
+            if not self.should_line_be_checked(line_index + 1):
+                # If we transitioned from finding changed lines to
+                # unchanged lines, then we are done.
+                if first_line_checked:
+                    break
+                continue
+            if not first_line_checked:
+                first_line_checked = line_index + 1
+            entry_lines.append(line)
+
+        self.check_entry(first_line_checked, entry_lines)
diff --git a/Tools/Scripts/webkitpy/style/checkers/changelog_unittest.py b/Tools/Scripts/webkitpy/style/checkers/changelog_unittest.py
new file mode 100644
index 0000000..9fe8a60
--- /dev/null
+++ b/Tools/Scripts/webkitpy/style/checkers/changelog_unittest.py
@@ -0,0 +1,187 @@
+#!/usr/bin/env python
+#
+# Copyright (C) 2010 Apple Inc. All rights reserved.
+# Copyright (C) 2011 Patrick Gansterer <paroga@paroga.com>
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1.  Redistributions of source code must retain the above copyright
+#     notice, this list of conditions and the following disclaimer.
+# 2.  Redistributions in binary form must reproduce the above copyright
+#     notice, this list of conditions and the following disclaimer in the
+#     documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
+# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Unit test for changelog.py."""
+
+import changelog
+import unittest
+
+
+class ChangeLogCheckerTest(unittest.TestCase):
+    """Tests ChangeLogChecker class."""
+
+    def assert_no_error(self, lines_to_check, changelog_data):
+        def handle_style_error(line_number, category, confidence, message):
+            self.fail('Unexpected error: %d %s %d %s for\n%s' % (line_number, category, confidence, message, changelog_data))
+        self.lines_to_check = set(lines_to_check)
+        checker = changelog.ChangeLogChecker('ChangeLog', handle_style_error, self.mock_should_line_be_checked)
+        checker.check(changelog_data.split('\n'))
+
+    def assert_error(self, expected_line_number, lines_to_check, expected_category, changelog_data):
+        self.had_error = False
+
+        def handle_style_error(line_number, category, confidence, message):
+            self.had_error = True
+            self.assertEquals(expected_line_number, line_number)
+            self.assertEquals(expected_category, category)
+        self.lines_to_check = set(lines_to_check)
+        checker = changelog.ChangeLogChecker('ChangeLog', handle_style_error, self.mock_should_line_be_checked)
+        checker.check(changelog_data.split('\n'))
+        self.assertTrue(self.had_error)
+
+    def mock_handle_style_error(self):
+        pass
+
+    def mock_should_line_be_checked(self, line_number):
+        return line_number in self.lines_to_check
+
+    def test_init(self):
+        checker = changelog.ChangeLogChecker('ChangeLog', self.mock_handle_style_error, self.mock_should_line_be_checked)
+        self.assertEquals(checker.file_path, 'ChangeLog')
+        self.assertEquals(checker.handle_style_error, self.mock_handle_style_error)
+        self.assertEquals(checker.should_line_be_checked, self.mock_should_line_be_checked)
+
+    def test_missing_bug_number(self):
+        self.assert_error(1, range(1, 20), 'changelog/bugnumber',
+                          '2011-01-01  Patrick Gansterer  <paroga@paroga.com>\n'
+                          '\n'
+                          '        Example bug\n')
+        self.assert_error(1, range(1, 20), 'changelog/bugnumber',
+                          '2011-01-01  Patrick Gansterer  <paroga@paroga.com>\n'
+                          '\n'
+                          '        Example bug\n'
+                          '        http://bugs.webkit.org/show_bug.cgi?id=\n')
+        self.assert_error(1, range(1, 20), 'changelog/bugnumber',
+                          '2011-01-01  Patrick Gansterer  <paroga@paroga.com>\n'
+                          '\n'
+                          '        Example bug\n'
+                          '        https://bugs.webkit.org/show_bug.cgi?id=\n')
+        self.assert_error(1, range(1, 20), 'changelog/bugnumber',
+                          '2011-01-01  Patrick Gansterer  <paroga@paroga.com>\n'
+                          '\n'
+                          '        Example bug\n'
+                          '        http://webkit.org/b/\n')
+        self.assert_error(1, range(1, 20), 'changelog/bugnumber',
+                          '2011-01-01  Patrick Gansterer  <paroga@paroga.com>\n'
+                          '\n'
+                          '        Example bug'
+                          '\n'
+                          '        http://trac.webkit.org/changeset/12345\n')
+        self.assert_error(2, range(2, 5), 'changelog/bugnumber',
+                          '2011-01-01  Patrick Gansterer  <paroga@paroga.com>\n'
+                          '        Example bug\n'
+                          '        https://bugs.webkit.org/show_bug.cgi\n'
+                          '\n'
+                          '2011-01-01  Patrick Gansterer  <paroga@paroga.com>\n'
+                          '        Another change\n')
+        self.assert_error(2, range(2, 6), 'changelog/bugnumber',
+                          '2011-01-01  Patrick Gansterer  <paroga@paroga.com>\n'
+                          '        Example bug\n'
+                          '        More text about bug.\n'
+                          '\n'
+                          '2011-01-01  Patrick Gansterer  <paroga@paroga.com>\n'
+                          '\n'
+                          '        No bug in this change.\n')
+
+    def test_file_descriptions(self):
+        self.assert_error(5, range(1, 20), 'changelog/filechangedescriptionwhitespace',
+                          '2011-01-01 Dmitry Lomov  <dslomov@google.com>\n'
+                          '        ExampleBug\n'
+                          '        http://bugs.webkit.org/show_bug.cgi?id=12345\n'
+                          '\n'
+                          '        *  Source/Tools/random-script.py:Fixed')
+        self.assert_error(6, range(1, 20), 'changelog/filechangedescriptionwhitespace',
+                          '2011-01-01 Dmitry Lomov  <dslomov@google.com>\n'
+                          '        ExampleBug\n'
+                          '        http://bugs.webkit.org/show_bug.cgi?id=12345\n'
+                          '\n'
+                          '        *  Source/Tools/another-file: Done\n'
+                          '        *  Source/Tools/random-script.py:Fixed\n'
+                          '        *  Source/Tools/one-morefile:\n')
+
+    def test_no_new_tests(self):
+        self.assert_error(5, range(1, 20), 'changelog/nonewtests',
+                          '2011-01-01 Dmitry Lomov  <dslomov@google.com>\n'
+                          '        ExampleBug\n'
+                          '        http://bugs.webkit.org/show_bug.cgi?id=12345\n'
+                          '\n'
+                          '        No new tests. (OOPS!)\n'
+                          '        *  Source/Tools/random-script.py: Fixed')
+
+    def test_no_error(self):
+        self.assert_no_error([],
+                             '2011-01-01  Patrick Gansterer  <paroga@paroga.com>\n'
+                             '\n'
+                             '        Example ChangeLog entry out of range\n'
+                             '        http://example.com/\n')
+        self.assert_no_error([],
+                             '2011-01-01  Patrick Gansterer  <paroga@paroga.com>\n'
+                             '\n'
+                             '        Example bug\n'
+                             '        http://bugs.webkit.org/show_bug.cgi?id=12345\n')
+        self.assert_no_error(range(1, 20),
+                             '2011-01-01  Patrick Gansterer  <paroga@paroga.com>\n'
+                             '\n'
+                             '        Example bug\n'
+                             '        http://bugs.webkit.org/show_bug.cgi?id=12345\n')
+        self.assert_no_error(range(1, 20),
+                             '2011-01-01  Patrick Gansterer  <paroga@paroga.com>\n'
+                             '\n'
+                             '        Example bug\n'
+                             '        https://bugs.webkit.org/show_bug.cgi?id=12345\n')
+        self.assert_no_error(range(1, 20),
+                             '2011-01-01  Patrick Gansterer  <paroga@paroga.com>\n'
+                             '\n'
+                             '        Example bug\n'
+                             '        http://webkit.org/b/12345\n')
+        self.assert_no_error(range(1, 20),
+                             '2011-01-01  Patrick Gansterer  <paroga@paroga.com>\n'
+                             '\n'
+                             '        Unreview build fix for r12345.\n')
+        self.assert_no_error(range(1, 20),
+                             '2011-01-01  Patrick Gansterer  <paroga@paroga.com>\n'
+                             '\n'
+                             '        Fix build after a bad change.\n')
+        self.assert_no_error(range(1, 20),
+                             '2011-01-01  Patrick Gansterer  <paroga@paroga.com>\n'
+                             '\n'
+                             '        Fix example port build.\n')
+        self.assert_no_error(range(2, 6),
+                             '2011-01-01  Patrick Gansterer  <paroga@paroga.com>\n'
+                             '        Example bug\n'
+                             '        https://bugs.webkit.org/show_bug.cgi?id=12345\n'
+                             '\n'
+                             '2011-01-01  Patrick Gansterer  <paroga@paroga.com>\n'
+                             '        No bug here!\n')
+        self.assert_no_error(range(1, 20),
+                             '2011-01-01  Patrick Gansterer  <paroga@paroga.com>\n'
+                             '        Example bug\n'
+                             '        https://bugs.webkit.org/show_bug.cgi?id=12345\n'
+                             '        * Source/WebKit/foo.cpp:    \n'
+                             '        * Source/WebKit/bar.cpp:\n'
+                             '        * Source/WebKit/foobar.cpp: Description\n')
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/Tools/Scripts/webkitpy/style/checkers/common.py b/Tools/Scripts/webkitpy/style/checkers/common.py
new file mode 100644
index 0000000..76aa956
--- /dev/null
+++ b/Tools/Scripts/webkitpy/style/checkers/common.py
@@ -0,0 +1,74 @@
+# Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org)
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1.  Redistributions of source code must retain the above copyright
+#     notice, this list of conditions and the following disclaimer.
+# 2.  Redistributions in binary form must reproduce the above copyright
+#     notice, this list of conditions and the following disclaimer in the
+#     documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Supports style checking not specific to any one file type."""
+
+
+# FIXME: Test this list in the same way that the list of CppChecker
+#        categories is tested, for example by checking that all of its
+#        elements appear in the unit tests. This should probably be done
+#        after moving the relevant cpp_unittest.ErrorCollector code
+#        into a shared location and refactoring appropriately.
+categories = set([
+    "whitespace/carriage_return",
+    "whitespace/tab"])
+
+
+class CarriageReturnChecker(object):
+
+    """Supports checking for and handling carriage returns."""
+
+    def __init__(self, handle_style_error):
+        self._handle_style_error = handle_style_error
+
+    def check(self, lines):
+        """Check for and strip trailing carriage returns from lines."""
+        for line_number in range(len(lines)):
+            if not lines[line_number].endswith("\r"):
+                continue
+
+            self._handle_style_error(line_number + 1,  # Correct for offset.
+                                     "whitespace/carriage_return",
+                                     1,
+                                     "One or more unexpected \\r (^M) found; "
+                                     "better to use only a \\n")
+
+            lines[line_number] = lines[line_number].rstrip("\r")
+
+        return lines
+
+
+class TabChecker(object):
+
+    """Supports checking for and handling tabs."""
+
+    def __init__(self, file_path, handle_style_error):
+        self.file_path = file_path
+        self.handle_style_error = handle_style_error
+
+    def check(self, lines):
+        # FIXME: share with cpp_style.
+        for line_number, line in enumerate(lines):
+            if "\t" in line:
+                self.handle_style_error(line_number + 1,
+                                        "whitespace/tab", 5,
+                                        "Line contains tab character.")
diff --git a/Tools/Scripts/webkitpy/style/checkers/common_unittest.py b/Tools/Scripts/webkitpy/style/checkers/common_unittest.py
new file mode 100644
index 0000000..1fe1263
--- /dev/null
+++ b/Tools/Scripts/webkitpy/style/checkers/common_unittest.py
@@ -0,0 +1,124 @@
+# Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org)
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1.  Redistributions of source code must retain the above copyright
+#     notice, this list of conditions and the following disclaimer.
+# 2.  Redistributions in binary form must reproduce the above copyright
+#     notice, this list of conditions and the following disclaimer in the
+#     documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Unit tests for common.py."""
+
+import unittest
+
+from common import CarriageReturnChecker
+from common import TabChecker
+
+# FIXME: The unit tests for the cpp, text, and common checkers should
+#        share supporting test code. This can include, for example, the
+#        mock style error handling code and the code to check that all
+#        of a checker's categories are covered by the unit tests.
+#        Such shared code can be located in a shared test file, perhaps
+#        even this file.
+class CarriageReturnCheckerTest(unittest.TestCase):
+
+    """Tests check_no_carriage_return()."""
+
+    _category = "whitespace/carriage_return"
+    _confidence = 1
+    _expected_message = ("One or more unexpected \\r (^M) found; "
+                         "better to use only a \\n")
+
+    def setUp(self):
+        self._style_errors = [] # The list of accumulated style errors.
+
+    def _mock_style_error_handler(self, line_number, category, confidence,
+                                  message):
+        """Append the error information to the list of style errors."""
+        error = (line_number, category, confidence, message)
+        self._style_errors.append(error)
+
+    def assert_carriage_return(self, input_lines, expected_lines, error_lines):
+        """Process the given line and assert that the result is correct."""
+        handle_style_error = self._mock_style_error_handler
+
+        checker = CarriageReturnChecker(handle_style_error)
+        output_lines = checker.check(input_lines)
+
+        # Check both the return value and error messages.
+        self.assertEquals(output_lines, expected_lines)
+
+        expected_errors = [(line_number, self._category, self._confidence,
+                            self._expected_message)
+                           for line_number in error_lines]
+        self.assertEquals(self._style_errors, expected_errors)
+
+    def test_ends_with_carriage(self):
+        self.assert_carriage_return(["carriage return\r"],
+                                    ["carriage return"],
+                                    [1])
+
+    def test_ends_with_nothing(self):
+        self.assert_carriage_return(["no carriage return"],
+                                    ["no carriage return"],
+                                    [])
+
+    def test_ends_with_newline(self):
+        self.assert_carriage_return(["no carriage return\n"],
+                                    ["no carriage return\n"],
+                                    [])
+
+    def test_carriage_in_middle(self):
+        # The CarriageReturnChecker checks only the final character
+        # of each line.
+        self.assert_carriage_return(["carriage\r in a string"],
+                                    ["carriage\r in a string"],
+                                    [])
+
+    def test_multiple_errors(self):
+        self.assert_carriage_return(["line1", "line2\r", "line3\r"],
+                                    ["line1", "line2", "line3"],
+                                    [2, 3])
+
+
+class TabCheckerTest(unittest.TestCase):
+
+    """Tests for TabChecker."""
+
+    def assert_tab(self, input_lines, error_lines):
+        """Assert when the given lines contain tabs."""
+        self._error_lines = []
+
+        def style_error_handler(line_number, category, confidence, message):
+            self.assertEqual(category, 'whitespace/tab')
+            self.assertEqual(confidence, 5)
+            self.assertEqual(message, 'Line contains tab character.')
+            self._error_lines.append(line_number)
+
+        checker = TabChecker('', style_error_handler)
+        checker.check(input_lines)
+        self.assertEquals(self._error_lines, error_lines)
+
+    def test_notab(self):
+        self.assert_tab([''], [])
+        self.assert_tab(['foo', 'bar'], [])
+
+    def test_tab(self):
+        self.assert_tab(['\tfoo'], [1])
+        self.assert_tab(['line1', '\tline2', 'line3\t'], [2, 3])
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/Tools/Scripts/webkitpy/style/checkers/cpp.py b/Tools/Scripts/webkitpy/style/checkers/cpp.py
new file mode 100644
index 0000000..a1447e2
--- /dev/null
+++ b/Tools/Scripts/webkitpy/style/checkers/cpp.py
@@ -0,0 +1,3682 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2009, 2010, 2012 Google Inc. All rights reserved.
+# Copyright (C) 2009 Torch Mobile Inc.
+# Copyright (C) 2009 Apple Inc. All rights reserved.
+# Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org)
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#    * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# This is the modified version of Google's cpplint. The original code is
+# http://google-styleguide.googlecode.com/svn/trunk/cpplint/cpplint.py
+
+"""Support for check-webkit-style."""
+
+import codecs
+import math  # for log
+import os
+import os.path
+import re
+import sre_compile
+import string
+import sys
+import unicodedata
+
+from webkitpy.common.memoized import memoized
+
+# The key to use to provide a class to fake loading a header file.
+INCLUDE_IO_INJECTION_KEY = 'include_header_io'
+
+# Headers that we consider STL headers.
+_STL_HEADERS = frozenset([
+    'algobase.h', 'algorithm', 'alloc.h', 'bitset', 'deque', 'exception',
+    'function.h', 'functional', 'hash_map', 'hash_map.h', 'hash_set',
+    'hash_set.h', 'iterator', 'list', 'list.h', 'map', 'memory', 'pair.h',
+    'pthread_alloc', 'queue', 'set', 'set.h', 'sstream', 'stack',
+    'stl_alloc.h', 'stl_relops.h', 'type_traits.h',
+    'utility', 'vector', 'vector.h',
+    ])
+
+
+# Non-STL C++ system headers.
+_CPP_HEADERS = frozenset([
+    'algo.h', 'builtinbuf.h', 'bvector.h', 'cassert', 'cctype',
+    'cerrno', 'cfloat', 'ciso646', 'climits', 'clocale', 'cmath',
+    'complex', 'complex.h', 'csetjmp', 'csignal', 'cstdarg', 'cstddef',
+    'cstdio', 'cstdlib', 'cstring', 'ctime', 'cwchar', 'cwctype',
+    'defalloc.h', 'deque.h', 'editbuf.h', 'exception', 'fstream',
+    'fstream.h', 'hashtable.h', 'heap.h', 'indstream.h', 'iomanip',
+    'iomanip.h', 'ios', 'iosfwd', 'iostream', 'iostream.h', 'istream.h',
+    'iterator.h', 'limits', 'map.h', 'multimap.h', 'multiset.h',
+    'numeric', 'ostream.h', 'parsestream.h', 'pfstream.h', 'PlotFile.h',
+    'procbuf.h', 'pthread_alloc.h', 'rope', 'rope.h', 'ropeimpl.h',
+    'SFile.h', 'slist', 'slist.h', 'stack.h', 'stdexcept',
+    'stdiostream.h', 'streambuf.h', 'stream.h', 'strfile.h', 'string',
+    'strstream', 'strstream.h', 'tempbuf.h', 'tree.h', 'typeinfo', 'valarray',
+    ])
+
+
+# Assertion macros.  These are defined in base/logging.h and
+# testing/base/gunit.h.  Note that the _M versions need to come first
+# for substring matching to work.
+_CHECK_MACROS = [
+    'DCHECK', 'CHECK',
+    'EXPECT_TRUE_M', 'EXPECT_TRUE',
+    'ASSERT_TRUE_M', 'ASSERT_TRUE',
+    'EXPECT_FALSE_M', 'EXPECT_FALSE',
+    'ASSERT_FALSE_M', 'ASSERT_FALSE',
+    ]
+
+# Replacement macros for CHECK/DCHECK/EXPECT_TRUE/EXPECT_FALSE
+_CHECK_REPLACEMENT = dict([(m, {}) for m in _CHECK_MACROS])
+
+for op, replacement in [('==', 'EQ'), ('!=', 'NE'),
+                        ('>=', 'GE'), ('>', 'GT'),
+                        ('<=', 'LE'), ('<', 'LT')]:
+    _CHECK_REPLACEMENT['DCHECK'][op] = 'DCHECK_%s' % replacement
+    _CHECK_REPLACEMENT['CHECK'][op] = 'CHECK_%s' % replacement
+    _CHECK_REPLACEMENT['EXPECT_TRUE'][op] = 'EXPECT_%s' % replacement
+    _CHECK_REPLACEMENT['ASSERT_TRUE'][op] = 'ASSERT_%s' % replacement
+    _CHECK_REPLACEMENT['EXPECT_TRUE_M'][op] = 'EXPECT_%s_M' % replacement
+    _CHECK_REPLACEMENT['ASSERT_TRUE_M'][op] = 'ASSERT_%s_M' % replacement
+
+for op, inv_replacement in [('==', 'NE'), ('!=', 'EQ'),
+                            ('>=', 'LT'), ('>', 'LE'),
+                            ('<=', 'GT'), ('<', 'GE')]:
+    _CHECK_REPLACEMENT['EXPECT_FALSE'][op] = 'EXPECT_%s' % inv_replacement
+    _CHECK_REPLACEMENT['ASSERT_FALSE'][op] = 'ASSERT_%s' % inv_replacement
+    _CHECK_REPLACEMENT['EXPECT_FALSE_M'][op] = 'EXPECT_%s_M' % inv_replacement
+    _CHECK_REPLACEMENT['ASSERT_FALSE_M'][op] = 'ASSERT_%s_M' % inv_replacement
+
+
+# These constants define types of headers for use with
+# _IncludeState.check_next_include_order().
+_CONFIG_HEADER = 0
+_PRIMARY_HEADER = 1
+_OTHER_HEADER = 2
+_MOC_HEADER = 3
+
+
+# A dictionary of items customize behavior for unit test. For example,
+# INCLUDE_IO_INJECTION_KEY allows providing a custom io class which allows
+# for faking a header file.
+_unit_test_config = {}
+
+
+# The regexp compilation caching is inlined in all regexp functions for
+# performance reasons; factoring it out into a separate function turns out
+# to be noticeably expensive.
+_regexp_compile_cache = {}
+
+
+def match(pattern, s):
+    """Matches the string with the pattern, caching the compiled regexp."""
+    if not pattern in _regexp_compile_cache:
+        _regexp_compile_cache[pattern] = sre_compile.compile(pattern)
+    return _regexp_compile_cache[pattern].match(s)
+
+
+def search(pattern, s):
+    """Searches the string for the pattern, caching the compiled regexp."""
+    if not pattern in _regexp_compile_cache:
+        _regexp_compile_cache[pattern] = sre_compile.compile(pattern)
+    return _regexp_compile_cache[pattern].search(s)
+
+
+def sub(pattern, replacement, s):
+    """Substitutes occurrences of a pattern, caching the compiled regexp."""
+    if not pattern in _regexp_compile_cache:
+        _regexp_compile_cache[pattern] = sre_compile.compile(pattern)
+    return _regexp_compile_cache[pattern].sub(replacement, s)
+
+
+def subn(pattern, replacement, s):
+    """Substitutes occurrences of a pattern, caching the compiled regexp."""
+    if not pattern in _regexp_compile_cache:
+        _regexp_compile_cache[pattern] = sre_compile.compile(pattern)
+    return _regexp_compile_cache[pattern].subn(replacement, s)
+
+
+def iteratively_replace_matches_with_char(pattern, char_replacement, s):
+    """Returns the string with replacement done.
+
+    Every character in the match is replaced with char.
+    Due to the iterative nature, pattern should not match char or
+    there will be an infinite loop.
+
+    Example:
+      pattern = r'<[^>]>' # template parameters
+      char_replacement =  '_'
+      s =     'A<B<C, D>>'
+      Returns 'A_________'
+
+    Args:
+      pattern: The regex to match.
+      char_replacement: The character to put in place of every
+                        character of the match.
+      s: The string on which to do the replacements.
+
+    Returns:
+      True, if the given line is blank.
+    """
+    while True:
+        matched = search(pattern, s)
+        if not matched:
+            return s
+        start_match_index = matched.start(0)
+        end_match_index = matched.end(0)
+        match_length = end_match_index - start_match_index
+        s = s[:start_match_index] + char_replacement * match_length + s[end_match_index:]
+
+
+def _rfind_in_lines(regex, lines, start_position, not_found_position):
+    """Does a reverse find starting at start position and going backwards until
+    a match is found.
+
+    Returns the position where the regex ended.
+    """
+    # Put the regex in a group and proceed it with a greedy expression that
+    # matches anything to ensure that we get the last possible match in a line.
+    last_in_line_regex = r'.*(' + regex + ')'
+    current_row = start_position.row
+
+    # Start with the given row and trim off everything past what may be matched.
+    current_line = lines[start_position.row][:start_position.column]
+    while True:
+        found_match = match(last_in_line_regex, current_line)
+        if found_match:
+            return Position(current_row, found_match.end(1))
+
+        # A match was not found so continue backward.
+        current_row -= 1
+        if current_row < 0:
+            return not_found_position
+        current_line = lines[current_row]
+
+
+def _convert_to_lower_with_underscores(text):
+    """Converts all text strings in camelCase or PascalCase to lowers with underscores."""
+
+    # First add underscores before any capital letter followed by a lower case letter
+    # as long as it is in a word.
+    # (This put an underscore before Password but not P and A in WPAPassword).
+    text = sub(r'(?<=[A-Za-z0-9])([A-Z])(?=[a-z])', r'_\1', text)
+
+    # Next add underscores before capitals at the end of words if it was
+    # preceeded by lower case letter or number.
+    # (This puts an underscore before A in isA but not A in CBA).
+    text = sub(r'(?<=[a-z0-9])([A-Z])(?=\b)', r'_\1', text)
+
+    # Next add underscores when you have a captial letter which is followed by a capital letter
+    # but is not proceeded by one. (This puts an underscore before A in 'WordADay').
+    text = sub(r'(?<=[a-z0-9])([A-Z][A-Z_])', r'_\1', text)
+
+    return text.lower()
+
+
+
+def _create_acronym(text):
+    """Creates an acronym for the given text."""
+    # Removes all lower case letters except those starting words.
+    text = sub(r'(?<!\b)[a-z]', '', text)
+    return text.upper()
+
+
+def up_to_unmatched_closing_paren(s):
+    """Splits a string into two parts up to first unmatched ')'.
+
+    Args:
+      s: a string which is a substring of line after '('
+      (e.g., "a == (b + c))").
+
+    Returns:
+      A pair of strings (prefix before first unmatched ')',
+      remainder of s after first unmatched ')'), e.g.,
+      up_to_unmatched_closing_paren("a == (b + c)) { ")
+      returns "a == (b + c)", " {".
+      Returns None, None if there is no unmatched ')'
+
+    """
+    i = 1
+    for pos, c in enumerate(s):
+      if c == '(':
+        i += 1
+      elif c == ')':
+        i -= 1
+        if i == 0:
+          return s[:pos], s[pos + 1:]
+    return None, None
+
+class _IncludeState(dict):
+    """Tracks line numbers for includes, and the order in which includes appear.
+
+    As a dict, an _IncludeState object serves as a mapping between include
+    filename and line number on which that file was included.
+
+    Call check_next_include_order() once for each header in the file, passing
+    in the type constants defined above. Calls in an illegal order will
+    raise an _IncludeError with an appropriate error message.
+
+    """
+    # self._section will move monotonically through this set. If it ever
+    # needs to move backwards, check_next_include_order will raise an error.
+    _INITIAL_SECTION = 0
+    _CONFIG_SECTION = 1
+    _PRIMARY_SECTION = 2
+    _OTHER_SECTION = 3
+
+    _TYPE_NAMES = {
+        _CONFIG_HEADER: 'WebCore config.h',
+        _PRIMARY_HEADER: 'header this file implements',
+        _OTHER_HEADER: 'other header',
+        _MOC_HEADER: 'moc file',
+        }
+    _SECTION_NAMES = {
+        _INITIAL_SECTION: "... nothing.",
+        _CONFIG_SECTION: "WebCore config.h.",
+        _PRIMARY_SECTION: 'a header this file implements.',
+        _OTHER_SECTION: 'other header.',
+        }
+
+    def __init__(self):
+        dict.__init__(self)
+        self._section = self._INITIAL_SECTION
+        self._visited_primary_section = False
+        self.header_types = dict();
+
+    def visited_primary_section(self):
+        return self._visited_primary_section
+
+    def check_next_include_order(self, header_type, file_is_header, primary_header_exists):
+        """Returns a non-empty error message if the next header is out of order.
+
+        This function also updates the internal state to be ready to check
+        the next include.
+
+        Args:
+          header_type: One of the _XXX_HEADER constants defined above.
+          file_is_header: Whether the file that owns this _IncludeState is itself a header
+
+        Returns:
+          The empty string if the header is in the right order, or an
+          error message describing what's wrong.
+
+        """
+        if header_type == _CONFIG_HEADER and file_is_header:
+            return 'Header file should not contain WebCore config.h.'
+        if header_type == _PRIMARY_HEADER and file_is_header:
+            return 'Header file should not contain itself.'
+        if header_type == _MOC_HEADER:
+            return ''
+
+        error_message = ''
+        if self._section != self._OTHER_SECTION:
+            before_error_message = ('Found %s before %s' %
+                                    (self._TYPE_NAMES[header_type],
+                                     self._SECTION_NAMES[self._section + 1]))
+        after_error_message = ('Found %s after %s' %
+                                (self._TYPE_NAMES[header_type],
+                                 self._SECTION_NAMES[self._section]))
+
+        if header_type == _CONFIG_HEADER:
+            if self._section >= self._CONFIG_SECTION:
+                error_message = after_error_message
+            self._section = self._CONFIG_SECTION
+        elif header_type == _PRIMARY_HEADER:
+            if self._section >= self._PRIMARY_SECTION:
+                error_message = after_error_message
+            elif self._section < self._CONFIG_SECTION:
+                error_message = before_error_message
+            self._section = self._PRIMARY_SECTION
+            self._visited_primary_section = True
+        else:
+            assert header_type == _OTHER_HEADER
+            if not file_is_header and self._section < self._PRIMARY_SECTION:
+                if primary_header_exists:
+                    error_message = before_error_message
+            self._section = self._OTHER_SECTION
+
+        return error_message
+
+
+class Position(object):
+    """Holds the position of something."""
+    def __init__(self, row, column):
+        self.row = row
+        self.column = column
+
+    def __str__(self):
+        return '(%s, %s)' % (self.row, self.column)
+
+    def __cmp__(self, other):
+        return self.row.__cmp__(other.row) or self.column.__cmp__(other.column)
+
+
+class Parameter(object):
+    """Information about one function parameter."""
+    def __init__(self, parameter, parameter_name_index, row):
+        self.type = parameter[:parameter_name_index].strip()
+        # Remove any initializers from the parameter name (e.g. int i = 5).
+        self.name = sub(r'=.*', '', parameter[parameter_name_index:]).strip()
+        self.row = row
+
+    @memoized
+    def lower_with_underscores_name(self):
+        """Returns the parameter name in the lower with underscores format."""
+        return _convert_to_lower_with_underscores(self.name)
+
+
+class SingleLineView(object):
+    """Converts multiple lines into a single line (with line breaks replaced by a
+       space) to allow for easier searching."""
+    def __init__(self, lines, start_position, end_position):
+        """Create a SingleLineView instance.
+
+        Args:
+          lines: a list of multiple lines to combine into a single line.
+          start_position: offset within lines of where to start the single line.
+          end_position: just after where to end (like a slice operation).
+        """
+        # Get the rows of interest.
+        trimmed_lines = lines[start_position.row:end_position.row + 1]
+
+        # Remove the columns on the last line that aren't included.
+        trimmed_lines[-1] = trimmed_lines[-1][:end_position.column]
+
+        # Remove the columns on the first line that aren't included.
+        trimmed_lines[0] = trimmed_lines[0][start_position.column:]
+
+        # Create a single line with all of the parameters.
+        self.single_line = ' '.join(trimmed_lines)
+
+        # Keep the row lengths, so we can calculate the original row number
+        # given a column in the single line (adding 1 due to the space added
+        # during the join).
+        self._row_lengths = [len(line) + 1 for line in trimmed_lines]
+        self._starting_row = start_position.row
+
+    def convert_column_to_row(self, single_line_column_number):
+        """Convert the column number from the single line into the original
+        line number.
+
+        Special cases:
+        * Columns in the added spaces are considered part of the previous line.
+        * Columns beyond the end of the line are consider part the last line
+        in the view."""
+        total_columns = 0
+        row_offset = 0
+        while row_offset < len(self._row_lengths) - 1 and single_line_column_number >= total_columns + self._row_lengths[row_offset]:
+            total_columns += self._row_lengths[row_offset]
+            row_offset += 1
+        return self._starting_row + row_offset
+
+
+def create_skeleton_parameters(all_parameters):
+    """Converts a parameter list to a skeleton version.
+
+    The skeleton only has one word for the parameter name, one word for the type,
+    and commas after each parameter and only there. Everything in the skeleton
+    remains in the same columns as the original."""
+    all_simplifications = (
+        # Remove template parameters, function declaration parameters, etc.
+        r'(<[^<>]*?>)|(\([^\(\)]*?\))|(\{[^\{\}]*?\})',
+        # Remove all initializers.
+        r'=[^,]*',
+        # Remove :: and everything before it.
+        r'[^,]*::',
+        # Remove modifiers like &, *.
+        r'[&*]',
+        # Remove const modifiers.
+        r'\bconst\s+(?=[A-Za-z])',
+        # Remove numerical modifiers like long.
+        r'\b(unsigned|long|short)\s+(?=unsigned|long|short|int|char|double|float)')
+
+    skeleton_parameters = all_parameters
+    for simplification in all_simplifications:
+        skeleton_parameters = iteratively_replace_matches_with_char(simplification, ' ', skeleton_parameters)
+    # If there are any parameters, then add a , after the last one to
+    # make a regular pattern of a , following every parameter.
+    if skeleton_parameters.strip():
+        skeleton_parameters += ','
+    return skeleton_parameters
+
+
+def find_parameter_name_index(skeleton_parameter):
+    """Determines where the parametere name starts given the skeleton parameter."""
+    # The first space from the right in the simplified parameter is where the parameter
+    # name starts unless the first space is before any content in the simplified parameter.
+    before_name_index = skeleton_parameter.rstrip().rfind(' ')
+    if before_name_index != -1 and skeleton_parameter[:before_name_index].strip():
+        return before_name_index + 1
+    return len(skeleton_parameter)
+
+
+def parameter_list(elided_lines, start_position, end_position):
+    """Generator for a function's parameters."""
+    # Create new positions that omit the outer parenthesis of the parameters.
+    start_position = Position(row=start_position.row, column=start_position.column + 1)
+    end_position = Position(row=end_position.row, column=end_position.column - 1)
+    single_line_view = SingleLineView(elided_lines, start_position, end_position)
+    skeleton_parameters = create_skeleton_parameters(single_line_view.single_line)
+    end_index = -1
+
+    while True:
+        # Find the end of the next parameter.
+        start_index = end_index + 1
+        end_index = skeleton_parameters.find(',', start_index)
+
+        # No comma means that all parameters have been parsed.
+        if end_index == -1:
+            return
+        row = single_line_view.convert_column_to_row(end_index)
+
+        # Parse the parameter into a type and parameter name.
+        skeleton_parameter = skeleton_parameters[start_index:end_index]
+        name_offset = find_parameter_name_index(skeleton_parameter)
+        parameter = single_line_view.single_line[start_index:end_index]
+        yield Parameter(parameter, name_offset, row)
+
+
+class _FunctionState(object):
+    """Tracks current function name and the number of lines in its body.
+
+    Attributes:
+      min_confidence: The minimum confidence level to use while checking style.
+
+    """
+
+    _NORMAL_TRIGGER = 250  # for --v=0, 500 for --v=1, etc.
+    _TEST_TRIGGER = 400    # about 50% more than _NORMAL_TRIGGER.
+
+    def __init__(self, min_confidence):
+        self.min_confidence = min_confidence
+        self.current_function = ''
+        self.in_a_function = False
+        self.lines_in_function = 0
+        # Make sure these will not be mistaken for real positions (even when a
+        # small amount is added to them).
+        self.body_start_position = Position(-1000, 0)
+        self.end_position = Position(-1000, 0)
+
+    def begin(self, function_name, function_name_start_position, body_start_position, end_position,
+              parameter_start_position, parameter_end_position, clean_lines):
+        """Start analyzing function body.
+
+        Args:
+            function_name: The name of the function being tracked.
+            function_name_start_position: Position in elided where the function name starts.
+            body_start_position: Position in elided of the { or the ; for a prototype.
+            end_position: Position in elided just after the final } (or ; is.
+            parameter_start_position: Position in elided of the '(' for the parameters.
+            parameter_end_position: Position in elided just after the ')' for the parameters.
+            clean_lines: A CleansedLines instance containing the file.
+        """
+        self.in_a_function = True
+        self.lines_in_function = -1  # Don't count the open brace line.
+        self.current_function = function_name
+        self.function_name_start_position = function_name_start_position
+        self.body_start_position = body_start_position
+        self.end_position = end_position
+        self.is_declaration = clean_lines.elided[body_start_position.row][body_start_position.column] == ';'
+        self.parameter_start_position = parameter_start_position
+        self.parameter_end_position = parameter_end_position
+        self.is_pure = False
+        if self.is_declaration:
+            characters_after_parameters = SingleLineView(clean_lines.elided, parameter_end_position, body_start_position).single_line
+            self.is_pure = bool(match(r'\s*=\s*0\s*', characters_after_parameters))
+        self._clean_lines = clean_lines
+        self._parameter_list = None
+
+    def modifiers_and_return_type(self):
+        """Returns the modifiers and the return type."""
+        # Go backwards from where the function name is until we encounter one of several things:
+        #   ';' or '{' or '}' or 'private:', etc. or '#' or return Position(0, 0)
+        elided = self._clean_lines.elided
+        start_modifiers = _rfind_in_lines(r';|\{|\}|((private|public|protected):)|(#.*)',
+                                          elided, self.parameter_start_position, Position(0, 0))
+        return SingleLineView(elided, start_modifiers, self.function_name_start_position).single_line.strip()
+
+    def parameter_list(self):
+        if not self._parameter_list:
+            # Store the final result as a tuple since that is immutable.
+            self._parameter_list = tuple(parameter_list(self._clean_lines.elided, self.parameter_start_position, self.parameter_end_position))
+
+        return self._parameter_list
+
+    def count(self, line_number):
+        """Count line in current function body."""
+        if self.in_a_function and line_number >= self.body_start_position.row:
+            self.lines_in_function += 1
+
+    def check(self, error, line_number):
+        """Report if too many lines in function body.
+
+        Args:
+          error: The function to call with any errors found.
+          line_number: The number of the line to check.
+        """
+        if match(r'T(EST|est)', self.current_function):
+            base_trigger = self._TEST_TRIGGER
+        else:
+            base_trigger = self._NORMAL_TRIGGER
+        trigger = base_trigger * 2 ** self.min_confidence
+
+        if self.lines_in_function > trigger:
+            error_level = int(math.log(self.lines_in_function / base_trigger, 2))
+            # 50 => 0, 100 => 1, 200 => 2, 400 => 3, 800 => 4, 1600 => 5, ...
+            if error_level > 5:
+                error_level = 5
+            error(line_number, 'readability/fn_size', error_level,
+                  'Small and focused functions are preferred:'
+                  ' %s has %d non-comment lines'
+                  ' (error triggered by exceeding %d lines).'  % (
+                      self.current_function, self.lines_in_function, trigger))
+
+    def end(self):
+        """Stop analyzing function body."""
+        self.in_a_function = False
+
+
+class _IncludeError(Exception):
+    """Indicates a problem with the include order in a file."""
+    pass
+
+
+class FileInfo:
+    """Provides utility functions for filenames.
+
+    FileInfo provides easy access to the components of a file's path
+    relative to the project root.
+    """
+
+    def __init__(self, filename):
+        self._filename = filename
+
+    def full_name(self):
+        """Make Windows paths like Unix."""
+        return os.path.abspath(self._filename).replace('\\', '/')
+
+    def repository_name(self):
+        """Full name after removing the local path to the repository.
+
+        If we have a real absolute path name here we can try to do something smart:
+        detecting the root of the checkout and truncating /path/to/checkout from
+        the name so that we get header guards that don't include things like
+        "C:\Documents and Settings\..." or "/home/username/..." in them and thus
+        people on different computers who have checked the source out to different
+        locations won't see bogus errors.
+        """
+        fullname = self.full_name()
+
+        if os.path.exists(fullname):
+            project_dir = os.path.dirname(fullname)
+
+            if os.path.exists(os.path.join(project_dir, ".svn")):
+                # If there's a .svn file in the current directory, we
+                # recursively look up the directory tree for the top
+                # of the SVN checkout
+                root_dir = project_dir
+                one_up_dir = os.path.dirname(root_dir)
+                while os.path.exists(os.path.join(one_up_dir, ".svn")):
+                    root_dir = os.path.dirname(root_dir)
+                    one_up_dir = os.path.dirname(one_up_dir)
+
+                prefix = os.path.commonprefix([root_dir, project_dir])
+                return fullname[len(prefix) + 1:]
+
+            # Not SVN? Try to find a git top level directory by
+            # searching up from the current path.
+            root_dir = os.path.dirname(fullname)
+            while (root_dir != os.path.dirname(root_dir)
+                   and not os.path.exists(os.path.join(root_dir, ".git"))):
+                root_dir = os.path.dirname(root_dir)
+                if os.path.exists(os.path.join(root_dir, ".git")):
+                    prefix = os.path.commonprefix([root_dir, project_dir])
+                    return fullname[len(prefix) + 1:]
+
+        # Don't know what to do; header guard warnings may be wrong...
+        return fullname
+
+    def split(self):
+        """Splits the file into the directory, basename, and extension.
+
+        For 'chrome/browser/browser.cpp', Split() would
+        return ('chrome/browser', 'browser', '.cpp')
+
+        Returns:
+          A tuple of (directory, basename, extension).
+        """
+
+        googlename = self.repository_name()
+        project, rest = os.path.split(googlename)
+        return (project,) + os.path.splitext(rest)
+
+    def base_name(self):
+        """File base name - text after the final slash, before the final period."""
+        return self.split()[1]
+
+    def extension(self):
+        """File extension - text following the final period."""
+        return self.split()[2]
+
+    def no_extension(self):
+        """File has no source file extension."""
+        return '/'.join(self.split()[0:2])
+
+    def is_source(self):
+        """File has a source file extension."""
+        return self.extension()[1:] in ('c', 'cc', 'cpp', 'cxx')
+
+
+# Matches standard C++ escape esequences per 2.13.2.3 of the C++ standard.
+_RE_PATTERN_CLEANSE_LINE_ESCAPES = re.compile(
+    r'\\([abfnrtv?"\\\']|\d+|x[0-9a-fA-F]+)')
+# Matches strings.  Escape codes should already be removed by ESCAPES.
+_RE_PATTERN_CLEANSE_LINE_DOUBLE_QUOTES = re.compile(r'"[^"]*"')
+# Matches characters.  Escape codes should already be removed by ESCAPES.
+_RE_PATTERN_CLEANSE_LINE_SINGLE_QUOTES = re.compile(r"'.'")
+# Matches multi-line C++ comments.
+# This RE is a little bit more complicated than one might expect, because we
+# have to take care of space removals tools so we can handle comments inside
+# statements better.
+# The current rule is: We only clear spaces from both sides when we're at the
+# end of the line. Otherwise, we try to remove spaces from the right side,
+# if this doesn't work we try on left side but only if there's a non-character
+# on the right.
+_RE_PATTERN_CLEANSE_LINE_C_COMMENTS = re.compile(
+    r"""(\s*/\*.*\*/\s*$|
+            /\*.*\*/\s+|
+         \s+/\*.*\*/(?=\W)|
+            /\*.*\*/)""", re.VERBOSE)
+
+
+def is_cpp_string(line):
+    """Does line terminate so, that the next symbol is in string constant.
+
+    This function does not consider single-line nor multi-line comments.
+
+    Args:
+      line: is a partial line of code starting from the 0..n.
+
+    Returns:
+      True, if next character appended to 'line' is inside a
+      string constant.
+    """
+
+    line = line.replace(r'\\', 'XX')  # after this, \\" does not match to \"
+    return ((line.count('"') - line.count(r'\"') - line.count("'\"'")) & 1) == 1
+
+
+def find_next_multi_line_comment_start(lines, line_index):
+    """Find the beginning marker for a multiline comment."""
+    while line_index < len(lines):
+        if lines[line_index].strip().startswith('/*'):
+            # Only return this marker if the comment goes beyond this line
+            if lines[line_index].strip().find('*/', 2) < 0:
+                return line_index
+        line_index += 1
+    return len(lines)
+
+
+def find_next_multi_line_comment_end(lines, line_index):
+    """We are inside a comment, find the end marker."""
+    while line_index < len(lines):
+        if lines[line_index].strip().endswith('*/'):
+            return line_index
+        line_index += 1
+    return len(lines)
+
+
+def remove_multi_line_comments_from_range(lines, begin, end):
+    """Clears a range of lines for multi-line comments."""
+    # Having // dummy comments makes the lines non-empty, so we will not get
+    # unnecessary blank line warnings later in the code.
+    for i in range(begin, end):
+        lines[i] = '// dummy'
+
+
+def remove_multi_line_comments(lines, error):
+    """Removes multiline (c-style) comments from lines."""
+    line_index = 0
+    while line_index < len(lines):
+        line_index_begin = find_next_multi_line_comment_start(lines, line_index)
+        if line_index_begin >= len(lines):
+            return
+        line_index_end = find_next_multi_line_comment_end(lines, line_index_begin)
+        if line_index_end >= len(lines):
+            error(line_index_begin + 1, 'readability/multiline_comment', 5,
+                  'Could not find end of multi-line comment')
+            return
+        remove_multi_line_comments_from_range(lines, line_index_begin, line_index_end + 1)
+        line_index = line_index_end + 1
+
+
+def cleanse_comments(line):
+    """Removes //-comments and single-line C-style /* */ comments.
+
+    Args:
+      line: A line of C++ source.
+
+    Returns:
+      The line with single-line comments removed.
+    """
+    comment_position = line.find('//')
+    if comment_position != -1 and not is_cpp_string(line[:comment_position]):
+        line = line[:comment_position]
+    # get rid of /* ... */
+    return _RE_PATTERN_CLEANSE_LINE_C_COMMENTS.sub('', line)
+
+
+class CleansedLines(object):
+    """Holds 3 copies of all lines with different preprocessing applied to them.
+
+    1) elided member contains lines without strings and comments,
+    2) lines member contains lines without comments, and
+    3) raw member contains all the lines without processing.
+    All these three members are of <type 'list'>, and of the same length.
+    """
+
+    def __init__(self, lines):
+        self.elided = []
+        self.lines = []
+        self.raw_lines = lines
+        self._num_lines = len(lines)
+        for line_number in range(len(lines)):
+            self.lines.append(cleanse_comments(lines[line_number]))
+            elided = self.collapse_strings(lines[line_number])
+            self.elided.append(cleanse_comments(elided))
+
+    def num_lines(self):
+        """Returns the number of lines represented."""
+        return self._num_lines
+
+    @staticmethod
+    def collapse_strings(elided):
+        """Collapses strings and chars on a line to simple "" or '' blocks.
+
+        We nix strings first so we're not fooled by text like '"http://"'
+
+        Args:
+          elided: The line being processed.
+
+        Returns:
+          The line with collapsed strings.
+        """
+        if not _RE_PATTERN_INCLUDE.match(elided):
+            # Remove escaped characters first to make quote/single quote collapsing
+            # basic.  Things that look like escaped characters shouldn't occur
+            # outside of strings and chars.
+            elided = _RE_PATTERN_CLEANSE_LINE_ESCAPES.sub('', elided)
+            elided = _RE_PATTERN_CLEANSE_LINE_SINGLE_QUOTES.sub("''", elided)
+            elided = _RE_PATTERN_CLEANSE_LINE_DOUBLE_QUOTES.sub('""', elided)
+        return elided
+
+
+def close_expression(elided, position):
+    """If input points to ( or { or [, finds the position that closes it.
+
+    If elided[position.row][position.column] points to a '(' or '{' or '[',
+    finds the line_number/pos that correspond to the closing of the expression.
+
+     Args:
+       elided: A CleansedLines.elided instance containing the file.
+       position: The position of the opening item.
+
+     Returns:
+      The Position *past* the closing brace, or Position(len(elided), -1)
+      if we never find a close. Note we ignore strings and comments when matching.
+    """
+    line = elided[position.row]
+    start_character = line[position.column]
+    if start_character == '(':
+        enclosing_character_regex = r'[\(\)]'
+    elif start_character == '[':
+        enclosing_character_regex = r'[\[\]]'
+    elif start_character == '{':
+        enclosing_character_regex = r'[\{\}]'
+    else:
+        return Position(len(elided), -1)
+
+    current_column = position.column + 1
+    line_number = position.row
+    net_open = 1
+    for line in elided[position.row:]:
+        line = line[current_column:]
+
+        # Search the current line for opening and closing characters.
+        while True:
+            next_enclosing_character = search(enclosing_character_regex, line)
+            # No more on this line.
+            if not next_enclosing_character:
+                break
+            current_column += next_enclosing_character.end(0)
+            line = line[next_enclosing_character.end(0):]
+            if next_enclosing_character.group(0) == start_character:
+                net_open += 1
+            else:
+                net_open -= 1
+                if not net_open:
+                    return Position(line_number, current_column)
+
+        # Proceed to the next line.
+        line_number += 1
+        current_column = 0
+
+    # The given item was not closed.
+    return Position(len(elided), -1)
+
+def check_for_copyright(lines, error):
+    """Logs an error if no Copyright message appears at the top of the file."""
+
+    # We'll say it should occur by line 10. Don't forget there's a
+    # dummy line at the front.
+    for line in xrange(1, min(len(lines), 11)):
+        if re.search(r'Copyright', lines[line], re.I):
+            break
+    else:                       # means no copyright line was found
+        error(0, 'legal/copyright', 5,
+              'No copyright message found.  '
+              'You should have a line: "Copyright [year] <Copyright Owner>"')
+
+
+def get_header_guard_cpp_variable(filename):
+    """Returns the CPP variable that should be used as a header guard.
+
+    Args:
+      filename: The name of a C++ header file.
+
+    Returns:
+      The CPP variable that should be used as a header guard in the
+      named file.
+
+    """
+
+    # Restores original filename in case that style checker is invoked from Emacs's
+    # flymake.
+    filename = re.sub(r'_flymake\.h$', '.h', filename)
+
+    standard_name = sub(r'[-.\s]', '_', os.path.basename(filename))
+
+    # Files under WTF typically have header guards that start with WTF_.
+    if '/wtf/' in filename:
+        special_name = "WTF_" + standard_name
+    else:
+        special_name = standard_name
+    return (special_name, standard_name)
+
+
+def check_for_header_guard(filename, lines, error):
+    """Checks that the file contains a header guard.
+
+    Logs an error if no #ifndef header guard is present.  For other
+    headers, checks that the full pathname is used.
+
+    Args:
+      filename: The name of the C++ header file.
+      lines: An array of strings, each representing a line of the file.
+      error: The function to call with any errors found.
+    """
+
+    cppvar = get_header_guard_cpp_variable(filename)
+
+    ifndef = None
+    ifndef_line_number = 0
+    define = None
+    for line_number, line in enumerate(lines):
+        line_split = line.split()
+        if len(line_split) >= 2:
+            # find the first occurrence of #ifndef and #define, save arg
+            if not ifndef and line_split[0] == '#ifndef':
+                # set ifndef to the header guard presented on the #ifndef line.
+                ifndef = line_split[1]
+                ifndef_line_number = line_number
+            if not define and line_split[0] == '#define':
+                define = line_split[1]
+            if define and ifndef:
+                break
+
+    if not ifndef or not define or ifndef != define:
+        error(0, 'build/header_guard', 5,
+              'No #ifndef header guard found, suggested CPP variable is: %s' %
+              cppvar[0])
+        return
+
+    # The guard should be File_h.
+    if ifndef not in cppvar:
+        error(ifndef_line_number, 'build/header_guard', 5,
+              '#ifndef header guard has wrong style, please use: %s' % cppvar[0])
+
+
+def check_for_unicode_replacement_characters(lines, error):
+    """Logs an error for each line containing Unicode replacement characters.
+
+    These indicate that either the file contained invalid UTF-8 (likely)
+    or Unicode replacement characters (which it shouldn't).  Note that
+    it's possible for this to throw off line numbering if the invalid
+    UTF-8 occurred adjacent to a newline.
+
+    Args:
+      lines: An array of strings, each representing a line of the file.
+      error: The function to call with any errors found.
+    """
+    for line_number, line in enumerate(lines):
+        if u'\ufffd' in line:
+            error(line_number, 'readability/utf8', 5,
+                  'Line contains invalid UTF-8 (or Unicode replacement character).')
+
+
+def check_for_new_line_at_eof(lines, error):
+    """Logs an error if there is no newline char at the end of the file.
+
+    Args:
+      lines: An array of strings, each representing a line of the file.
+      error: The function to call with any errors found.
+    """
+
+    # The array lines() was created by adding two newlines to the
+    # original file (go figure), then splitting on \n.
+    # To verify that the file ends in \n, we just have to make sure the
+    # last-but-two element of lines() exists and is empty.
+    if len(lines) < 3 or lines[-2]:
+        error(len(lines) - 2, 'whitespace/ending_newline', 5,
+              'Could not find a newline character at the end of the file.')
+
+
+def check_for_multiline_comments_and_strings(clean_lines, line_number, error):
+    """Logs an error if we see /* ... */ or "..." that extend past one line.
+
+    /* ... */ comments are legit inside macros, for one line.
+    Otherwise, we prefer // comments, so it's ok to warn about the
+    other.  Likewise, it's ok for strings to extend across multiple
+    lines, as long as a line continuation character (backslash)
+    terminates each line. Although not currently prohibited by the C++
+    style guide, it's ugly and unnecessary. We don't do well with either
+    in this lint program, so we warn about both.
+
+    Args:
+      clean_lines: A CleansedLines instance containing the file.
+      line_number: The number of the line to check.
+      error: The function to call with any errors found.
+    """
+    line = clean_lines.elided[line_number]
+
+    # Remove all \\ (escaped backslashes) from the line. They are OK, and the
+    # second (escaped) slash may trigger later \" detection erroneously.
+    line = line.replace('\\\\', '')
+
+    if line.count('/*') > line.count('*/'):
+        error(line_number, 'readability/multiline_comment', 5,
+              'Complex multi-line /*...*/-style comment found. '
+              'Lint may give bogus warnings.  '
+              'Consider replacing these with //-style comments, '
+              'with #if 0...#endif, '
+              'or with more clearly structured multi-line comments.')
+
+    if (line.count('"') - line.count('\\"')) % 2:
+        error(line_number, 'readability/multiline_string', 5,
+              'Multi-line string ("...") found.  This lint script doesn\'t '
+              'do well with such strings, and may give bogus warnings.  They\'re '
+              'ugly and unnecessary, and you should use concatenation instead".')
+
+
+_THREADING_LIST = (
+    ('asctime(', 'asctime_r('),
+    ('ctime(', 'ctime_r('),
+    ('getgrgid(', 'getgrgid_r('),
+    ('getgrnam(', 'getgrnam_r('),
+    ('getlogin(', 'getlogin_r('),
+    ('getpwnam(', 'getpwnam_r('),
+    ('getpwuid(', 'getpwuid_r('),
+    ('gmtime(', 'gmtime_r('),
+    ('localtime(', 'localtime_r('),
+    ('rand(', 'rand_r('),
+    ('readdir(', 'readdir_r('),
+    ('strtok(', 'strtok_r('),
+    ('ttyname(', 'ttyname_r('),
+    )
+
+
+def check_posix_threading(clean_lines, line_number, error):
+    """Checks for calls to thread-unsafe functions.
+
+    Much code has been originally written without consideration of
+    multi-threading. Also, engineers are relying on their old experience;
+    they have learned posix before threading extensions were added. These
+    tests guide the engineers to use thread-safe functions (when using
+    posix directly).
+
+    Args:
+      clean_lines: A CleansedLines instance containing the file.
+      line_number: The number of the line to check.
+      error: The function to call with any errors found.
+    """
+    line = clean_lines.elided[line_number]
+    for single_thread_function, multithread_safe_function in _THREADING_LIST:
+        index = line.find(single_thread_function)
+        # Comparisons made explicit for clarity -- pylint: disable-msg=C6403
+        if index >= 0 and (index == 0 or (not line[index - 1].isalnum()
+                                          and line[index - 1] not in ('_', '.', '>'))):
+            error(line_number, 'runtime/threadsafe_fn', 2,
+                  'Consider using ' + multithread_safe_function +
+                  '...) instead of ' + single_thread_function +
+                  '...) for improved thread safety.')
+
+
+# Matches invalid increment: *count++, which moves pointer instead of
+# incrementing a value.
+_RE_PATTERN_INVALID_INCREMENT = re.compile(
+    r'^\s*\*\w+(\+\+|--);')
+
+
+def check_invalid_increment(clean_lines, line_number, error):
+    """Checks for invalid increment *count++.
+
+    For example following function:
+    void increment_counter(int* count) {
+        *count++;
+    }
+    is invalid, because it effectively does count++, moving pointer, and should
+    be replaced with ++*count, (*count)++ or *count += 1.
+
+    Args:
+      clean_lines: A CleansedLines instance containing the file.
+      line_number: The number of the line to check.
+      error: The function to call with any errors found.
+    """
+    line = clean_lines.elided[line_number]
+    if _RE_PATTERN_INVALID_INCREMENT.match(line):
+        error(line_number, 'runtime/invalid_increment', 5,
+              'Changing pointer instead of value (or unused value of operator*).')
+
+
+class _ClassInfo(object):
+    """Stores information about a class."""
+
+    def __init__(self, name, line_number):
+        self.name = name
+        self.line_number = line_number
+        self.seen_open_brace = False
+        self.is_derived = False
+        self.virtual_method_line_number = None
+        self.has_virtual_destructor = False
+        self.brace_depth = 0
+
+
+class _ClassState(object):
+    """Holds the current state of the parse relating to class declarations.
+
+    It maintains a stack of _ClassInfos representing the parser's guess
+    as to the current nesting of class declarations. The innermost class
+    is at the top (back) of the stack. Typically, the stack will either
+    be empty or have exactly one entry.
+    """
+
+    def __init__(self):
+        self.classinfo_stack = []
+
+    def check_finished(self, error):
+        """Checks that all classes have been completely parsed.
+
+        Call this when all lines in a file have been processed.
+        Args:
+          error: The function to call with any errors found.
+        """
+        if self.classinfo_stack:
+            # Note: This test can result in false positives if #ifdef constructs
+            # get in the way of brace matching. See the testBuildClass test in
+            # cpp_style_unittest.py for an example of this.
+            error(self.classinfo_stack[0].line_number, 'build/class', 5,
+                  'Failed to find complete declaration of class %s' %
+                  self.classinfo_stack[0].name)
+
+
+class _FileState(object):
+    def __init__(self, clean_lines, file_extension):
+        self._did_inside_namespace_indent_warning = False
+        self._clean_lines = clean_lines
+        if file_extension in ['m', 'mm']:
+            self._is_objective_c = True
+            self._is_c = False
+        elif file_extension == 'h':
+            # In the case of header files, it is unknown if the file
+            # is c / objective c or not, so set this value to None and then
+            # if it is requested, use heuristics to guess the value.
+            self._is_objective_c = None
+            self._is_c = None
+        elif file_extension == 'c':
+            self._is_c = True
+            self._is_objective_c = False
+        else:
+            self._is_objective_c = False
+            self._is_c = False
+
+    def set_did_inside_namespace_indent_warning(self):
+        self._did_inside_namespace_indent_warning = True
+
+    def did_inside_namespace_indent_warning(self):
+        return self._did_inside_namespace_indent_warning
+
+    def is_objective_c(self):
+        if self._is_objective_c is None:
+            for line in self._clean_lines.elided:
+                # Starting with @ or #import seem like the best indications
+                # that we have an Objective C file.
+                if line.startswith("@") or line.startswith("#import"):
+                    self._is_objective_c = True
+                    break
+            else:
+                self._is_objective_c = False
+        return self._is_objective_c
+
+    def is_c(self):
+        if self._is_c is None:
+            for line in self._clean_lines.lines:
+                # if extern "C" is found, then it is a good indication
+                # that we have a C header file.
+                if line.startswith('extern "C"'):
+                    self._is_c = True
+                    break
+            else:
+                self._is_c = False
+        return self._is_c
+
+    def is_c_or_objective_c(self):
+        """Return whether the file extension corresponds to C or Objective-C."""
+        return self.is_c() or self.is_objective_c()
+
+
+def check_for_non_standard_constructs(clean_lines, line_number,
+                                      class_state, error):
+    """Logs an error if we see certain non-ANSI constructs ignored by gcc-2.
+
+    Complain about several constructs which gcc-2 accepts, but which are
+    not standard C++.  Warning about these in lint is one way to ease the
+    transition to new compilers.
+    - put storage class first (e.g. "static const" instead of "const static").
+    - "%lld" instead of %qd" in printf-type functions.
+    - "%1$d" is non-standard in printf-type functions.
+    - "\%" is an undefined character escape sequence.
+    - text after #endif is not allowed.
+    - invalid inner-style forward declaration.
+    - >? and <? operators, and their >?= and <?= cousins.
+    - classes with virtual methods need virtual destructors (compiler warning
+        available, but not turned on yet.)
+
+    Additionally, check for constructor/destructor style violations as it
+    is very convenient to do so while checking for gcc-2 compliance.
+
+    Args:
+      clean_lines: A CleansedLines instance containing the file.
+      line_number: The number of the line to check.
+      class_state: A _ClassState instance which maintains information about
+                   the current stack of nested class declarations being parsed.
+      error: A callable to which errors are reported, which takes parameters:
+             line number, error level, and message
+    """
+
+    # Remove comments from the line, but leave in strings for now.
+    line = clean_lines.lines[line_number]
+
+    if search(r'printf\s*\(.*".*%[-+ ]?\d*q', line):
+        error(line_number, 'runtime/printf_format', 3,
+              '%q in format strings is deprecated.  Use %ll instead.')
+
+    if search(r'printf\s*\(.*".*%\d+\$', line):
+        error(line_number, 'runtime/printf_format', 2,
+              '%N$ formats are unconventional.  Try rewriting to avoid them.')
+
+    # Remove escaped backslashes before looking for undefined escapes.
+    line = line.replace('\\\\', '')
+
+    if search(r'("|\').*\\(%|\[|\(|{)', line):
+        error(line_number, 'build/printf_format', 3,
+              '%, [, (, and { are undefined character escapes.  Unescape them.')
+
+    # For the rest, work with both comments and strings removed.
+    line = clean_lines.elided[line_number]
+
+    if search(r'\b(const|volatile|void|char|short|int|long'
+              r'|float|double|signed|unsigned'
+              r'|schar|u?int8|u?int16|u?int32|u?int64)'
+              r'\s+(auto|register|static|extern|typedef)\b',
+              line):
+        error(line_number, 'build/storage_class', 5,
+              'Storage class (static, extern, typedef, etc) should be first.')
+
+    if match(r'\s*#\s*endif\s*[^/\s]+', line):
+        error(line_number, 'build/endif_comment', 5,
+              'Uncommented text after #endif is non-standard.  Use a comment.')
+
+    if match(r'\s*class\s+(\w+\s*::\s*)+\w+\s*;', line):
+        error(line_number, 'build/forward_decl', 5,
+              'Inner-style forward declarations are invalid.  Remove this line.')
+
+    if search(r'(\w+|[+-]?\d+(\.\d*)?)\s*(<|>)\?=?\s*(\w+|[+-]?\d+)(\.\d*)?', line):
+        error(line_number, 'build/deprecated', 3,
+              '>? and <? (max and min) operators are non-standard and deprecated.')
+
+    # Track class entry and exit, and attempt to find cases within the
+    # class declaration that don't meet the C++ style
+    # guidelines. Tracking is very dependent on the code matching Google
+    # style guidelines, but it seems to perform well enough in testing
+    # to be a worthwhile addition to the checks.
+    classinfo_stack = class_state.classinfo_stack
+    # Look for a class declaration
+    class_decl_match = match(
+        r'\s*(template\s*<[\w\s<>,:]*>\s*)?(class|struct)\s+(\w+(::\w+)*)', line)
+    if class_decl_match:
+        classinfo_stack.append(_ClassInfo(class_decl_match.group(3), line_number))
+
+    # Everything else in this function uses the top of the stack if it's
+    # not empty.
+    if not classinfo_stack:
+        return
+
+    classinfo = classinfo_stack[-1]
+
+    # If the opening brace hasn't been seen look for it and also
+    # parent class declarations.
+    if not classinfo.seen_open_brace:
+        # If the line has a ';' in it, assume it's a forward declaration or
+        # a single-line class declaration, which we won't process.
+        if line.find(';') != -1:
+            classinfo_stack.pop()
+            return
+        classinfo.seen_open_brace = (line.find('{') != -1)
+        # Look for a bare ':'
+        if search('(^|[^:]):($|[^:])', line):
+            classinfo.is_derived = True
+        if not classinfo.seen_open_brace:
+            return  # Everything else in this function is for after open brace
+
+    # The class may have been declared with namespace or classname qualifiers.
+    # The constructor and destructor will not have those qualifiers.
+    base_classname = classinfo.name.split('::')[-1]
+
+    # Look for single-argument constructors that aren't marked explicit.
+    # Technically a valid construct, but against style.
+    args = match(r'(?<!explicit)\s+%s\s*\(([^,()]+)\)'
+                 % re.escape(base_classname),
+                 line)
+    if (args
+        and args.group(1) != 'void'
+        and not match(r'(const\s+)?%s\s*&' % re.escape(base_classname),
+                      args.group(1).strip())):
+        error(line_number, 'runtime/explicit', 5,
+              'Single-argument constructors should be marked explicit.')
+
+    # Look for methods declared virtual.
+    if search(r'\bvirtual\b', line):
+        classinfo.virtual_method_line_number = line_number
+        # Only look for a destructor declaration on the same line. It would
+        # be extremely unlikely for the destructor declaration to occupy
+        # more than one line.
+        if search(r'~%s\s*\(' % base_classname, line):
+            classinfo.has_virtual_destructor = True
+
+    # Look for class end.
+    brace_depth = classinfo.brace_depth
+    brace_depth = brace_depth + line.count('{') - line.count('}')
+    if brace_depth <= 0:
+        classinfo = classinfo_stack.pop()
+        # Try to detect missing virtual destructor declarations.
+        # For now, only warn if a non-derived class with virtual methods lacks
+        # a virtual destructor. This is to make it less likely that people will
+        # declare derived virtual destructors without declaring the base
+        # destructor virtual.
+        if ((classinfo.virtual_method_line_number is not None)
+            and (not classinfo.has_virtual_destructor)
+            and (not classinfo.is_derived)):  # Only warn for base classes
+            error(classinfo.line_number, 'runtime/virtual', 4,
+                  'The class %s probably needs a virtual destructor due to '
+                  'having virtual method(s), one declared at line %d.'
+                  % (classinfo.name, classinfo.virtual_method_line_number))
+    else:
+        classinfo.brace_depth = brace_depth
+
+
+def check_spacing_for_function_call(line, line_number, error):
+    """Checks for the correctness of various spacing around function calls.
+
+    Args:
+      line: The text of the line to check.
+      line_number: The number of the line to check.
+      error: The function to call with any errors found.
+    """
+
+    # Since function calls often occur inside if/for/foreach/while/switch
+    # expressions - which have their own, more liberal conventions - we
+    # first see if we should be looking inside such an expression for a
+    # function call, to which we can apply more strict standards.
+    function_call = line    # if there's no control flow construct, look at whole line
+    for pattern in (r'\bif\s*\((.*)\)\s*{',
+                    r'\bfor\s*\((.*)\)\s*{',
+                    r'\bforeach\s*\((.*)\)\s*{',
+                    r'\bwhile\s*\((.*)\)\s*[{;]',
+                    r'\bswitch\s*\((.*)\)\s*{'):
+        matched = search(pattern, line)
+        if matched:
+            function_call = matched.group(1)    # look inside the parens for function calls
+            break
+
+    # Except in if/for/foreach/while/switch, there should never be space
+    # immediately inside parens (eg "f( 3, 4 )").  We make an exception
+    # for nested parens ( (a+b) + c ).  Likewise, there should never be
+    # a space before a ( when it's a function argument.  I assume it's a
+    # function argument when the char before the whitespace is legal in
+    # a function name (alnum + _) and we're not starting a macro. Also ignore
+    # pointers and references to arrays and functions coz they're too tricky:
+    # we use a very simple way to recognize these:
+    # " (something)(maybe-something)" or
+    # " (something)(maybe-something," or
+    # " (something)[something]"
+    # Note that we assume the contents of [] to be short enough that
+    # they'll never need to wrap.
+    if (  # Ignore control structures.
+        not search(r'\b(if|for|foreach|while|switch|return|new|delete)\b', function_call)
+        # Ignore pointers/references to functions.
+        and not search(r' \([^)]+\)\([^)]*(\)|,$)', function_call)
+        # Ignore pointers/references to arrays.
+        and not search(r' \([^)]+\)\[[^\]]+\]', function_call)):
+        if search(r'\w\s*\([ \t](?!\s*\\$)', function_call):      # a ( used for a fn call
+            error(line_number, 'whitespace/parens', 4,
+                  'Extra space after ( in function call')
+        elif search(r'\([ \t]+(?!(\s*\\)|\()', function_call):
+            error(line_number, 'whitespace/parens', 2,
+                  'Extra space after (')
+        if (search(r'\w\s+\(', function_call)
+            and not match(r'\s*(#|typedef)', function_call)):
+            error(line_number, 'whitespace/parens', 4,
+                  'Extra space before ( in function call')
+        # If the ) is followed only by a newline or a { + newline, assume it's
+        # part of a control statement (if/while/etc), and don't complain
+        if search(r'[^)\s]\s+\)(?!\s*$|{\s*$)', function_call):
+            error(line_number, 'whitespace/parens', 2,
+                  'Extra space before )')
+
+
+def is_blank_line(line):
+    """Returns true if the given line is blank.
+
+    We consider a line to be blank if the line is empty or consists of
+    only white spaces.
+
+    Args:
+      line: A line of a string.
+
+    Returns:
+      True, if the given line is blank.
+    """
+    return not line or line.isspace()
+
+
+def detect_functions(clean_lines, line_number, function_state, error):
+    """Finds where functions start and end.
+
+    Uses a simplistic algorithm assuming other style guidelines
+    (especially spacing) are followed.
+    Trivial bodies are unchecked, so constructors with huge initializer lists
+    may be missed.
+
+    Args:
+      clean_lines: A CleansedLines instance containing the file.
+      line_number: The number of the line to check.
+      function_state: Current function name and lines in body so far.
+      error: The function to call with any errors found.
+    """
+    # Are we now past the end of a function?
+    if function_state.end_position.row + 1 == line_number:
+        function_state.end()
+
+    # If we're in a function, don't try to detect a new one.
+    if function_state.in_a_function:
+        return
+
+    lines = clean_lines.lines
+    line = lines[line_number]
+    raw = clean_lines.raw_lines
+    raw_line = raw[line_number]
+
+    # Lines ending with a \ indicate a macro. Don't try to check them.
+    if raw_line.endswith('\\'):
+        return
+
+    regexp = r'\s*(\w(\w|::|\*|\&|\s|<|>|,|~|(operator\s*(/|-|=|!|\+)+))*)\('  # decls * & space::name( ...
+    match_result = match(regexp, line)
+    if not match_result:
+        return
+
+    # If the name is all caps and underscores, figure it's a macro and
+    # ignore it, unless it's TEST or TEST_F.
+    function_name = match_result.group(1).split()[-1]
+    if function_name != 'TEST' and function_name != 'TEST_F' and match(r'[A-Z_]+$', function_name):
+        return
+
+    joined_line = ''
+    for start_line_number in xrange(line_number, clean_lines.num_lines()):
+        start_line = clean_lines.elided[start_line_number]
+        joined_line += ' ' + start_line.lstrip()
+        body_match = search(r'{|;', start_line)
+        if body_match:
+            body_start_position = Position(start_line_number, body_match.start(0))
+
+            # Replace template constructs with _ so that no spaces remain in the function name,
+            # while keeping the column numbers of other characters the same as "line".
+            line_with_no_templates = iteratively_replace_matches_with_char(r'<[^<>]*>', '_', line)
+            match_function = search(r'((\w|:|<|>|,|~|(operator\s*(/|-|=|!|\+)+))*)\(', line_with_no_templates)
+            if not match_function:
+                return  # The '(' must have been inside of a template.
+
+            # Use the column numbers from the modified line to find the
+            # function name in the original line.
+            function = line[match_function.start(1):match_function.end(1)]
+            function_name_start_position = Position(line_number, match_function.start(1))
+
+            if match(r'TEST', function):    # Handle TEST... macros
+                parameter_regexp = search(r'(\(.*\))', joined_line)
+                if parameter_regexp:             # Ignore bad syntax
+                    function += parameter_regexp.group(1)
+            else:
+                function += '()'
+
+            parameter_start_position = Position(line_number, match_function.end(1))
+            parameter_end_position = close_expression(clean_lines.elided, parameter_start_position)
+            if parameter_end_position.row == len(clean_lines.elided):
+                # No end was found.
+                return
+
+            if start_line[body_start_position.column] == ';':
+                end_position = Position(body_start_position.row, body_start_position.column + 1)
+            else:
+                end_position = close_expression(clean_lines.elided, body_start_position)
+
+            # Check for nonsensical positions. (This happens in test cases which check code snippets.)
+            if parameter_end_position > body_start_position:
+                return
+
+            function_state.begin(function, function_name_start_position, body_start_position, end_position,
+                                 parameter_start_position, parameter_end_position, clean_lines)
+            return
+
+    # No body for the function (or evidence of a non-function) was found.
+    error(line_number, 'readability/fn_size', 5,
+          'Lint failed to find start of function body.')
+
+
+def check_for_function_lengths(clean_lines, line_number, function_state, error):
+    """Reports for long function bodies.
+
+    For an overview why this is done, see:
+    http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Write_Short_Functions
+
+    Blank/comment lines are not counted so as to avoid encouraging the removal
+    of vertical space and commments just to get through a lint check.
+    NOLINT *on the last line of a function* disables this check.
+
+    Args:
+      clean_lines: A CleansedLines instance containing the file.
+      line_number: The number of the line to check.
+      function_state: Current function name and lines in body so far.
+      error: The function to call with any errors found.
+    """
+    lines = clean_lines.lines
+    line = lines[line_number]
+    raw = clean_lines.raw_lines
+    raw_line = raw[line_number]
+
+    if function_state.end_position.row == line_number:  # last line
+        if not search(r'\bNOLINT\b', raw_line):
+            function_state.check(error, line_number)
+    elif not match(r'^\s*$', line):
+        function_state.count(line_number)  # Count non-blank/non-comment lines.
+
+
+def _check_parameter_name_against_text(parameter, text, error):
+    """Checks to see if the parameter name is contained within the text.
+
+    Return false if the check failed (i.e. an error was produced).
+    """
+
+    # Treat 'lower with underscores' as a canonical form because it is
+    # case insensitive while still retaining word breaks. (This ensures that
+    # 'elate' doesn't look like it is duplicating of 'NateLate'.)
+    canonical_parameter_name = parameter.lower_with_underscores_name()
+
+    # Appends "object" to all text to catch variables that did the same (but only
+    # do this when the parameter name is more than a single character to avoid
+    # flagging 'b' which may be an ok variable when used in an rgba function).
+    if len(canonical_parameter_name) > 1:
+        text = sub(r'(\w)\b', r'\1Object', text)
+    canonical_text = _convert_to_lower_with_underscores(text)
+
+    # Used to detect cases like ec for ExceptionCode.
+    acronym = _create_acronym(text).lower()
+    if canonical_text.find(canonical_parameter_name) != -1 or acronym.find(canonical_parameter_name) != -1:
+        error(parameter.row, 'readability/parameter_name', 5,
+              'The parameter name "%s" adds no information, so it should be removed.' % parameter.name)
+        return False
+    return True
+
+
+def check_function_definition_and_pass_ptr(type_text, row, location_description, error):
+    """Check that function definitions for use Pass*Ptr instead of *Ptr.
+
+    Args:
+       type_text: A string containing the type. (For return values, it may contain more than the type.)
+       row: The row number of the type.
+       location_description: Used to indicate where the type is. This is either 'parameter' or 'return'.
+       error: The function to call with any errors found.
+    """
+    match_ref_or_own_ptr = '(?=\W|^)(Ref|Own)Ptr(?=\W)'
+    bad_type_usage = search(match_ref_or_own_ptr, type_text)
+    if not bad_type_usage or type_text.endswith('&') or type_text.endswith('*'):
+        return
+    type_name = bad_type_usage.group(0)
+    error(row, 'readability/pass_ptr', 5,
+          'The %s type should use Pass%s instead of %s.' % (location_description, type_name, type_name))
+
+
+def check_function_definition(filename, file_extension, clean_lines, line_number, function_state, error):
+    """Check that function definitions for style issues.
+
+    Specifically, check that parameter names in declarations add information.
+
+    Args:
+       filename: Filename of the file that is being processed.
+       file_extension: The current file extension, without the leading dot.
+       clean_lines: A CleansedLines instance containing the file.
+       line_number: The number of the line to check.
+       function_state: Current function name and lines in body so far.
+       error: The function to call with any errors found.
+    """
+    if line_number != function_state.body_start_position.row:
+        return
+
+    modifiers_and_return_type = function_state.modifiers_and_return_type()
+    if filename.find('/chromium/') != -1 and search(r'\bWEBKIT_EXPORT\b', modifiers_and_return_type):
+        if filename.find('/chromium/public/') == -1 and filename.find('/chromium/tests/') == -1 and filename.find('chromium/platform') == -1:
+            error(function_state.function_name_start_position.row, 'readability/webkit_export', 5,
+                  'WEBKIT_EXPORT should only appear in the chromium public (or tests) directory.')
+        elif not file_extension == "h":
+            error(function_state.function_name_start_position.row, 'readability/webkit_export', 5,
+                  'WEBKIT_EXPORT should only be used in header files.')
+        elif not function_state.is_declaration or search(r'\binline\b', modifiers_and_return_type):
+            error(function_state.function_name_start_position.row, 'readability/webkit_export', 5,
+                  'WEBKIT_EXPORT should not be used on a function with a body.')
+        elif function_state.is_pure:
+            error(function_state.function_name_start_position.row, 'readability/webkit_export', 5,
+                  'WEBKIT_EXPORT should not be used with a pure virtual function.')
+
+    check_function_definition_and_pass_ptr(modifiers_and_return_type, function_state.function_name_start_position.row, 'return', error)
+
+    parameter_list = function_state.parameter_list()
+    for parameter in parameter_list:
+        check_function_definition_and_pass_ptr(parameter.type, parameter.row, 'parameter', error)
+
+        # Do checks specific to function declarations and parameter names.
+        if not function_state.is_declaration or not parameter.name:
+            continue
+
+        # Check the parameter name against the function name for single parameter set functions.
+        if len(parameter_list) == 1 and match('set[A-Z]', function_state.current_function):
+            trimmed_function_name = function_state.current_function[len('set'):]
+            if not _check_parameter_name_against_text(parameter, trimmed_function_name, error):
+                continue  # Since an error was noted for this name, move to the next parameter.
+
+        # Check the parameter name against the type.
+        if not _check_parameter_name_against_text(parameter, parameter.type, error):
+            continue  # Since an error was noted for this name, move to the next parameter.
+
+
+def check_pass_ptr_usage(clean_lines, line_number, function_state, error):
+    """Check for proper usage of Pass*Ptr.
+
+    Currently this is limited to detecting declarations of Pass*Ptr
+    variables inside of functions.
+
+    Args:
+      clean_lines: A CleansedLines instance containing the file.
+      line_number: The number of the line to check.
+      function_state: Current function name and lines in body so far.
+      error: The function to call with any errors found.
+    """
+    if not function_state.in_a_function:
+        return
+
+    lines = clean_lines.lines
+    line = lines[line_number]
+    if line_number > function_state.body_start_position.row:
+        matched_pass_ptr = match(r'^\s*Pass([A-Z][A-Za-z]*)Ptr<', line)
+        if matched_pass_ptr:
+            type_name = 'Pass%sPtr' % matched_pass_ptr.group(1)
+            error(line_number, 'readability/pass_ptr', 5,
+                  'Local variables should never be %s (see '
+                  'http://webkit.org/coding/RefPtr.html).' % type_name)
+
+
+def check_for_leaky_patterns(clean_lines, line_number, function_state, error):
+    """Check for constructs known to be leak prone.
+    Args:
+      clean_lines: A CleansedLines instance containing the file.
+      line_number: The number of the line to check.
+      function_state: Current function name and lines in body so far.
+      error: The function to call with any errors found.
+    """
+    lines = clean_lines.lines
+    line = lines[line_number]
+
+    matched_get_dc = search(r'\b(?P<function_name>GetDC(Ex)?)\s*\(', line)
+    if matched_get_dc:
+        error(line_number, 'runtime/leaky_pattern', 5,
+              'Use the class HWndDC instead of calling %s to avoid potential '
+              'memory leaks.' % matched_get_dc.group('function_name'))
+
+    matched_create_dc = search(r'\b(?P<function_name>Create(Compatible)?DC)\s*\(', line)
+    matched_own_dc = search(r'\badoptPtr\b', line)
+    if matched_create_dc and not matched_own_dc:
+        error(line_number, 'runtime/leaky_pattern', 5,
+              'Use adoptPtr and OwnPtr<HDC> when calling %s to avoid potential '
+              'memory leaks.' % matched_create_dc.group('function_name'))
+
+
+def check_spacing(file_extension, clean_lines, line_number, error):
+    """Checks for the correctness of various spacing issues in the code.
+
+    Things we check for: spaces around operators, spaces after
+    if/for/while/switch, no spaces around parens in function calls, two
+    spaces between code and comment, don't start a block with a blank
+    line, don't end a function with a blank line, don't have too many
+    blank lines in a row.
+
+    Args:
+      file_extension: The current file extension, without the leading dot.
+      clean_lines: A CleansedLines instance containing the file.
+      line_number: The number of the line to check.
+      error: The function to call with any errors found.
+    """
+
+    raw = clean_lines.raw_lines
+    line = raw[line_number]
+
+    # Before nixing comments, check if the line is blank for no good
+    # reason.  This includes the first line after a block is opened, and
+    # blank lines at the end of a function (ie, right before a line like '}').
+    if is_blank_line(line):
+        elided = clean_lines.elided
+        previous_line = elided[line_number - 1]
+        previous_brace = previous_line.rfind('{')
+        # FIXME: Don't complain if line before blank line, and line after,
+        #        both start with alnums and are indented the same amount.
+        #        This ignores whitespace at the start of a namespace block
+        #        because those are not usually indented.
+        if (previous_brace != -1 and previous_line[previous_brace:].find('}') == -1
+            and previous_line[:previous_brace].find('namespace') == -1):
+            # OK, we have a blank line at the start of a code block.  Before we
+            # complain, we check if it is an exception to the rule: The previous
+            # non-empty line has the parameters of a function header that are indented
+            # 4 spaces (because they did not fit in a 80 column line when placed on
+            # the same line as the function name).  We also check for the case where
+            # the previous line is indented 6 spaces, which may happen when the
+            # initializers of a constructor do not fit into a 80 column line.
+            exception = False
+            if match(r' {6}\w', previous_line):  # Initializer list?
+                # We are looking for the opening column of initializer list, which
+                # should be indented 4 spaces to cause 6 space indentation afterwards.
+                search_position = line_number - 2
+                while (search_position >= 0
+                       and match(r' {6}\w', elided[search_position])):
+                    search_position -= 1
+                exception = (search_position >= 0
+                             and elided[search_position][:5] == '    :')
+            else:
+                # Search for the function arguments or an initializer list.  We use a
+                # simple heuristic here: If the line is indented 4 spaces; and we have a
+                # closing paren, without the opening paren, followed by an opening brace
+                # or colon (for initializer lists) we assume that it is the last line of
+                # a function header.  If we have a colon indented 4 spaces, it is an
+                # initializer list.
+                exception = (match(r' {4}\w[^\(]*\)\s*(const\s*)?(\{\s*$|:)',
+                                   previous_line)
+                             or match(r' {4}:', previous_line))
+
+            if not exception:
+                error(line_number, 'whitespace/blank_line', 2,
+                      'Blank line at the start of a code block.  Is this needed?')
+        # This doesn't ignore whitespace at the end of a namespace block
+        # because that is too hard without pairing open/close braces;
+        # however, a special exception is made for namespace closing
+        # brackets which have a comment containing "namespace".
+        #
+        # Also, ignore blank lines at the end of a block in a long if-else
+        # chain, like this:
+        #   if (condition1) {
+        #     // Something followed by a blank line
+        #
+        #   } else if (condition2) {
+        #     // Something else
+        #   }
+        if line_number + 1 < clean_lines.num_lines():
+            next_line = raw[line_number + 1]
+            if (next_line
+                and match(r'\s*}', next_line)
+                and next_line.find('namespace') == -1
+                and next_line.find('} else ') == -1):
+                error(line_number, 'whitespace/blank_line', 3,
+                      'Blank line at the end of a code block.  Is this needed?')
+
+    # Next, we check for proper spacing with respect to comments.
+    comment_position = line.find('//')
+    if comment_position != -1:
+        # Check if the // may be in quotes.  If so, ignore it
+        # Comparisons made explicit for clarity -- pylint: disable-msg=C6403
+        if (line.count('"', 0, comment_position) - line.count('\\"', 0, comment_position)) % 2 == 0:   # not in quotes
+            # Allow one space before end of line comment.
+            if (not match(r'^\s*$', line[:comment_position])
+                and (comment_position >= 1
+                and ((line[comment_position - 1] not in string.whitespace)
+                     or (comment_position >= 2
+                         and line[comment_position - 2] in string.whitespace)))):
+                error(line_number, 'whitespace/comments', 5,
+                      'One space before end of line comments')
+            # There should always be a space between the // and the comment
+            commentend = comment_position + 2
+            if commentend < len(line) and not line[commentend] == ' ':
+                # but some lines are exceptions -- e.g. if they're big
+                # comment delimiters like:
+                # //----------------------------------------------------------
+                # or they begin with multiple slashes followed by a space:
+                # //////// Header comment
+                matched = (search(r'[=/-]{4,}\s*$', line[commentend:])
+                           or search(r'^/+ ', line[commentend:]))
+                if not matched:
+                    error(line_number, 'whitespace/comments', 4,
+                          'Should have a space between // and comment')
+
+            # There should only be one space after punctuation in a comment.
+            if search(r'[.!?,;:]\s\s+\w', line[comment_position:]):
+                error(line_number, 'whitespace/comments', 5,
+                      'Should have only a single space after a punctuation in a comment.')
+
+    line = clean_lines.elided[line_number]  # get rid of comments and strings
+
+    # Don't try to do spacing checks for operator methods
+    line = sub(r'operator(==|!=|<|<<|<=|>=|>>|>|\+=|-=|\*=|/=|%=|&=|\|=|^=|<<=|>>=|/)\(', 'operator\(', line)
+    # Don't try to do spacing checks for #include or #import statements at
+    # minimum because it messes up checks for spacing around /
+    if match(r'\s*#\s*(?:include|import)', line):
+        return
+    if search(r'[\w.]=[\w.]', line):
+        error(line_number, 'whitespace/operators', 4,
+              'Missing spaces around =')
+
+    # FIXME: It's not ok to have spaces around binary operators like .
+
+    # You should always have whitespace around binary operators.
+    # Alas, we can't test < or > because they're legitimately used sans spaces
+    # (a->b, vector<int> a).  The only time we can tell is a < with no >, and
+    # only if it's not template params list spilling into the next line.
+    matched = search(r'[^<>=!\s](==|!=|\+=|-=|\*=|/=|/|\|=|&=|<<=|>>=|<=|>=|\|\||\||&&|>>|<<)[^<>=!\s]', line)
+    if not matched:
+        # Note that while it seems that the '<[^<]*' term in the following
+        # regexp could be simplified to '<.*', which would indeed match
+        # the same class of strings, the [^<] means that searching for the
+        # regexp takes linear rather than quadratic time.
+        if not search(r'<[^<]*,\s*$', line):  # template params spill
+            matched = search(r'[^<>=!\s](<)[^<>=!\s]([^>]|->)*$', line)
+    if matched:
+        error(line_number, 'whitespace/operators', 3,
+              'Missing spaces around %s' % matched.group(1))
+
+    # There shouldn't be space around unary operators
+    matched = search(r'(!\s|~\s|[\s]--[\s;]|[\s]\+\+[\s;])', line)
+    if matched:
+        error(line_number, 'whitespace/operators', 4,
+              'Extra space for operator %s' % matched.group(1))
+
+    # A pet peeve of mine: no spaces after an if, while, switch, or for
+    matched = search(r' (if\(|for\(|foreach\(|while\(|switch\()', line)
+    if matched:
+        error(line_number, 'whitespace/parens', 5,
+              'Missing space before ( in %s' % matched.group(1))
+
+    # For if/for/foreach/while/switch, the left and right parens should be
+    # consistent about how many spaces are inside the parens, and
+    # there should either be zero or one spaces inside the parens.
+    # We don't want: "if ( foo)" or "if ( foo   )".
+    # Exception: "for ( ; foo; bar)" and "for (foo; bar; )" are allowed.
+    matched = search(r'\b(?P<statement>if|for|foreach|while|switch)\s*\((?P<remainder>.*)$', line)
+    if matched:
+        statement = matched.group('statement')
+        condition, rest = up_to_unmatched_closing_paren(matched.group('remainder'))
+        if condition is not None:
+            condition_match = search(r'(?P<leading>[ ]*)(?P<separator>.).*[^ ]+(?P<trailing>[ ]*)', condition)
+            if condition_match:
+                n_leading = len(condition_match.group('leading'))
+                n_trailing = len(condition_match.group('trailing'))
+                if n_leading != 0:
+                    for_exception = statement == 'for' and condition.startswith(' ;')
+                    if not for_exception:
+                        error(line_number, 'whitespace/parens', 5,
+                              'Extra space after ( in %s' % statement)
+                if n_trailing != 0:
+                    for_exception = statement == 'for' and condition.endswith('; ')
+                    if not for_exception:
+                        error(line_number, 'whitespace/parens', 5,
+                              'Extra space before ) in %s' % statement)
+
+            # Do not check for more than one command in macros
+            in_preprocessor_directive = match(r'\s*#', line)
+            if not in_preprocessor_directive and not match(r'((\s*{\s*}?)|(\s*;?))\s*\\?$', rest):
+                error(line_number, 'whitespace/parens', 4,
+                      'More than one command on the same line in %s' % statement)
+
+    # You should always have a space after a comma (either as fn arg or operator)
+    if search(r',[^\s]', line):
+        error(line_number, 'whitespace/comma', 3,
+              'Missing space after ,')
+
+    matched = search(r'^\s*(?P<token1>[a-zA-Z0-9_\*&]+)\s\s+(?P<token2>[a-zA-Z0-9_\*&]+)', line)
+    if matched:
+        error(line_number, 'whitespace/declaration', 3,
+              'Extra space between %s and %s' % (matched.group('token1'), matched.group('token2')))
+
+    if file_extension == 'cpp':
+        # C++ should have the & or * beside the type not the variable name.
+        matched = match(r'\s*\w+(?<!\breturn|\bdelete)\s+(?P<pointer_operator>\*|\&)\w+', line)
+        if matched:
+            error(line_number, 'whitespace/declaration', 3,
+                  'Declaration has space between type name and %s in %s' % (matched.group('pointer_operator'), matched.group(0).strip()))
+
+    elif file_extension == 'c':
+        # C Pointer declaration should have the * beside the variable not the type name.
+        matched = search(r'^\s*\w+\*\s+\w+', line)
+        if matched:
+            error(line_number, 'whitespace/declaration', 3,
+                  'Declaration has space between * and variable name in %s' % matched.group(0).strip())
+
+    # Next we will look for issues with function calls.
+    check_spacing_for_function_call(line, line_number, error)
+
+    # Except after an opening paren, you should have spaces before your braces.
+    # And since you should never have braces at the beginning of a line, this is
+    # an easy test.
+    if search(r'[^ ({]{', line):
+        error(line_number, 'whitespace/braces', 5,
+              'Missing space before {')
+
+    # Make sure '} else {' has spaces.
+    if search(r'}else', line):
+        error(line_number, 'whitespace/braces', 5,
+              'Missing space before else')
+
+    # You shouldn't have spaces before your brackets, except maybe after
+    # 'delete []' or 'new char * []'.
+    if search(r'\w\s+\[', line) and not search(r'delete\s+\[', line):
+        error(line_number, 'whitespace/braces', 5,
+              'Extra space before [')
+
+    # There should always be a single space in between braces on the same line.
+    if search(r'\{\}', line):
+        error(line_number, 'whitespace/braces', 5, 'Missing space inside { }.')
+    if search(r'\{\s\s+\}', line):
+        error(line_number, 'whitespace/braces', 5, 'Too many spaces inside { }.')
+
+    # You shouldn't have a space before a semicolon at the end of the line.
+    # There's a special case for "for" since the style guide allows space before
+    # the semicolon there.
+    if search(r':\s*;\s*$', line):
+        error(line_number, 'whitespace/semicolon', 5,
+              'Semicolon defining empty statement. Use { } instead.')
+    elif search(r'^\s*;\s*$', line):
+        error(line_number, 'whitespace/semicolon', 5,
+              'Line contains only semicolon. If this should be an empty statement, '
+              'use { } instead.')
+    elif (search(r'\s+;\s*$', line) and not search(r'\bfor\b', line)):
+        error(line_number, 'whitespace/semicolon', 5,
+              'Extra space before last semicolon. If this should be an empty '
+              'statement, use { } instead.')
+    elif (search(r'\b(for|while)\s*\(.*\)\s*;\s*$', line)
+          and line.count('(') == line.count(')')
+          # Allow do {} while();
+          and not search(r'}\s*while', line)):
+        error(line_number, 'whitespace/semicolon', 5,
+              'Semicolon defining empty statement for this loop. Use { } instead.')
+
+
+def get_previous_non_blank_line(clean_lines, line_number):
+    """Return the most recent non-blank line and its line number.
+
+    Args:
+      clean_lines: A CleansedLines instance containing the file contents.
+      line_number: The number of the line to check.
+
+    Returns:
+      A tuple with two elements.  The first element is the contents of the last
+      non-blank line before the current line, or the empty string if this is the
+      first non-blank line.  The second is the line number of that line, or -1
+      if this is the first non-blank line.
+    """
+
+    previous_line_number = line_number - 1
+    while previous_line_number >= 0:
+        previous_line = clean_lines.elided[previous_line_number]
+        if not is_blank_line(previous_line):     # if not a blank line...
+            return (previous_line, previous_line_number)
+        previous_line_number -= 1
+    return ('', -1)
+
+
+def check_namespace_indentation(clean_lines, line_number, file_extension, file_state, error):
+    """Looks for indentation errors inside of namespaces.
+
+    Args:
+      clean_lines: A CleansedLines instance containing the file.
+      line_number: The number of the line to check.
+      file_extension: The extension (dot not included) of the file.
+      file_state: A _FileState instance which maintains information about
+                  the state of things in the file.
+      error: The function to call with any errors found.
+    """
+
+    line = clean_lines.elided[line_number] # Get rid of comments and strings.
+
+    namespace_match = match(r'(?P<namespace_indentation>\s*)namespace\s+\S+\s*{\s*$', line)
+    if not namespace_match:
+        return
+
+    current_indentation_level = len(namespace_match.group('namespace_indentation'))
+    if current_indentation_level > 0:
+        # Don't warn about an indented namespace if we already warned about indented code.
+        if not file_state.did_inside_namespace_indent_warning():
+            error(line_number, 'whitespace/indent', 4,
+                  'namespace should never be indented.')
+        return
+    looking_for_semicolon = False;
+    line_offset = 0
+    in_preprocessor_directive = False;
+    for current_line in clean_lines.elided[line_number + 1:]:
+        line_offset += 1
+        if not current_line.strip():
+            continue
+        if not current_indentation_level:
+            if not (in_preprocessor_directive or looking_for_semicolon):
+                if not match(r'\S', current_line) and not file_state.did_inside_namespace_indent_warning():
+                    file_state.set_did_inside_namespace_indent_warning()
+                    error(line_number + line_offset, 'whitespace/indent', 4,
+                          'Code inside a namespace should not be indented.')
+            if in_preprocessor_directive or (current_line.strip()[0] == '#'): # This takes care of preprocessor directive syntax.
+                in_preprocessor_directive = current_line[-1] == '\\'
+            else:
+                looking_for_semicolon = ((current_line.find(';') == -1) and (current_line.strip()[-1] != '}')) or (current_line[-1] == '\\')
+        else:
+            looking_for_semicolon = False; # If we have a brace we may not need a semicolon.
+        current_indentation_level += current_line.count('{') - current_line.count('}')
+        if current_indentation_level < 0:
+            break;
+
+
+def check_directive_indentation(clean_lines, line_number, file_state, error):
+    """Looks for indentation of preprocessor directives.
+
+    Args:
+      clean_lines: A CleansedLines instance containing the file.
+      line_number: The number of the line to check.
+      file_state: A _FileState instance which maintains information about
+                  the state of things in the file.
+      error: The function to call with any errors found.
+    """
+
+    line = clean_lines.elided[line_number]  # Get rid of comments and strings.
+
+    indented_preprocessor_directives = match(r'\s+#', line)
+    if not indented_preprocessor_directives:
+        return
+
+    error(line_number, 'whitespace/indent', 4, 'preprocessor directives (e.g., #ifdef, #define, #import) should never be indented.')
+
+
+def get_initial_spaces_for_line(clean_line):
+    initial_spaces = 0
+    while initial_spaces < len(clean_line) and clean_line[initial_spaces] == ' ':
+        initial_spaces += 1
+    return initial_spaces
+
+
+def check_indentation_amount(clean_lines, line_number, error):
+    line = clean_lines.elided[line_number]
+    initial_spaces = get_initial_spaces_for_line(line)
+
+    if initial_spaces % 4:
+        error(line_number, 'whitespace/indent', 3,
+              'Weird number of spaces at line-start.  Are you using a 4-space indent?')
+        return
+
+    previous_line = get_previous_non_blank_line(clean_lines, line_number)[0]
+    if not previous_line.strip() or match(r'\s*\w+\s*:\s*$', previous_line) or previous_line[0] == '#':
+        return
+
+    previous_line_initial_spaces = get_initial_spaces_for_line(previous_line)
+    if initial_spaces > previous_line_initial_spaces + 4:
+        error(line_number, 'whitespace/indent', 3, 'When wrapping a line, only indent 4 spaces.')
+
+
+def check_using_std(clean_lines, line_number, file_state, error):
+    """Looks for 'using std::foo;' statements which should be replaced with 'using namespace std;'.
+
+    Args:
+      clean_lines: A CleansedLines instance containing the file.
+      line_number: The number of the line to check.
+      file_state: A _FileState instance which maintains information about
+                  the state of things in the file.
+      error: The function to call with any errors found.
+    """
+
+    # This check doesn't apply to C or Objective-C implementation files.
+    if file_state.is_c_or_objective_c():
+        return
+
+    line = clean_lines.elided[line_number] # Get rid of comments and strings.
+
+    using_std_match = match(r'\s*using\s+std::(?P<method_name>\S+)\s*;\s*$', line)
+    if not using_std_match:
+        return
+
+    method_name = using_std_match.group('method_name')
+    error(line_number, 'build/using_std', 4,
+          "Use 'using namespace std;' instead of 'using std::%s;'." % method_name)
+
+
+def check_max_min_macros(clean_lines, line_number, file_state, error):
+    """Looks use of MAX() and MIN() macros that should be replaced with std::max() and std::min().
+
+    Args:
+      clean_lines: A CleansedLines instance containing the file.
+      line_number: The number of the line to check.
+      file_state: A _FileState instance which maintains information about
+                  the state of things in the file.
+      error: The function to call with any errors found.
+    """
+
+    # This check doesn't apply to C or Objective-C implementation files.
+    if file_state.is_c_or_objective_c():
+        return
+
+    line = clean_lines.elided[line_number] # Get rid of comments and strings.
+
+    max_min_macros_search = search(r'\b(?P<max_min_macro>(MAX|MIN))\s*\(', line)
+    if not max_min_macros_search:
+        return
+
+    max_min_macro = max_min_macros_search.group('max_min_macro')
+    max_min_macro_lower = max_min_macro.lower()
+    error(line_number, 'runtime/max_min_macros', 4,
+          'Use std::%s() or std::%s<type>() instead of the %s() macro.'
+          % (max_min_macro_lower, max_min_macro_lower, max_min_macro))
+
+
+def check_ctype_functions(clean_lines, line_number, file_state, error):
+    """Looks for use of the standard functions in ctype.h and suggest they be replaced
+       by use of equivilent ones in <wtf/ASCIICType.h>?.
+
+    Args:
+      clean_lines: A CleansedLines instance containing the file.
+      line_number: The number of the line to check.
+      file_state: A _FileState instance which maintains information about
+                  the state of things in the file.
+      error: The function to call with any errors found.
+    """
+
+    line = clean_lines.elided[line_number]  # Get rid of comments and strings.
+
+    ctype_function_search = search(r'\b(?P<ctype_function>(isalnum|isalpha|isascii|isblank|iscntrl|isdigit|isgraph|islower|isprint|ispunct|isspace|isupper|isxdigit|toascii|tolower|toupper))\s*\(', line)
+    if not ctype_function_search:
+        return
+
+    ctype_function = ctype_function_search.group('ctype_function')
+    error(line_number, 'runtime/ctype_function', 4,
+          'Use equivelent function in <wtf/ASCIICType.h> instead of the %s() function.'
+          % (ctype_function))
+
+def check_switch_indentation(clean_lines, line_number, error):
+    """Looks for indentation errors inside of switch statements.
+
+    Args:
+      clean_lines: A CleansedLines instance containing the file.
+      line_number: The number of the line to check.
+      error: The function to call with any errors found.
+    """
+
+    line = clean_lines.elided[line_number] # Get rid of comments and strings.
+
+    switch_match = match(r'(?P<switch_indentation>\s*)switch\s*\(.+\)\s*{\s*$', line)
+    if not switch_match:
+        return
+
+    switch_indentation = switch_match.group('switch_indentation')
+    inner_indentation = switch_indentation + ' ' * 4
+    line_offset = 0
+    encountered_nested_switch = False
+
+    for current_line in clean_lines.elided[line_number + 1:]:
+        line_offset += 1
+
+        # Skip not only empty lines but also those with preprocessor directives.
+        if current_line.strip() == '' or current_line.startswith('#'):
+            continue
+
+        if match(r'\s*switch\s*\(.+\)\s*{\s*$', current_line):
+            # Complexity alarm - another switch statement nested inside the one
+            # that we're currently testing. We'll need to track the extent of
+            # that inner switch if the upcoming label tests are still supposed
+            # to work correctly. Let's not do that; instead, we'll finish
+            # checking this line, and then leave it like that. Assuming the
+            # indentation is done consistently (even if incorrectly), this will
+            # still catch all indentation issues in practice.
+            encountered_nested_switch = True
+
+        current_indentation_match = match(r'(?P<indentation>\s*)(?P<remaining_line>.*)$', current_line);
+        current_indentation = current_indentation_match.group('indentation')
+        remaining_line = current_indentation_match.group('remaining_line')
+
+        # End the check at the end of the switch statement.
+        if remaining_line.startswith('}') and current_indentation == switch_indentation:
+            break
+        # Case and default branches should not be indented. The regexp also
+        # catches single-line cases like "default: break;" but does not trigger
+        # on stuff like "Document::Foo();".
+        elif match(r'(default|case\s+.*)\s*:([^:].*)?$', remaining_line):
+            if current_indentation != switch_indentation:
+                error(line_number + line_offset, 'whitespace/indent', 4,
+                      'A case label should not be indented, but line up with its switch statement.')
+                # Don't throw an error for multiple badly indented labels,
+                # one should be enough to figure out the problem.
+                break
+        # We ignore goto labels at the very beginning of a line.
+        elif match(r'\w+\s*:\s*$', remaining_line):
+            continue
+        # It's not a goto label, so check if it's indented at least as far as
+        # the switch statement plus one more level of indentation.
+        elif not current_indentation.startswith(inner_indentation):
+            error(line_number + line_offset, 'whitespace/indent', 4,
+                  'Non-label code inside switch statements should be indented.')
+            # Don't throw an error for multiple badly indented statements,
+            # one should be enough to figure out the problem.
+            break
+
+        if encountered_nested_switch:
+            break
+
+
+def check_braces(clean_lines, line_number, error):
+    """Looks for misplaced braces (e.g. at the end of line).
+
+    Args:
+      clean_lines: A CleansedLines instance containing the file.
+      line_number: The number of the line to check.
+      error: The function to call with any errors found.
+    """
+
+    line = clean_lines.elided[line_number] # Get rid of comments and strings.
+
+    if match(r'\s*{\s*$', line):
+        # We allow an open brace to start a line in the case where someone
+        # is using braces for function definition or in a block to
+        # explicitly create a new scope, which is commonly used to control
+        # the lifetime of stack-allocated variables.  We don't detect this
+        # perfectly: we just don't complain if the last non-whitespace
+        # character on the previous non-blank line is ';', ':', '{', '}',
+        # ')', or ') const' and doesn't begin with 'if|for|while|switch|else'.
+        # We also allow '#' for #endif and '=' for array initialization.
+        previous_line = get_previous_non_blank_line(clean_lines, line_number)[0]
+        if ((not search(r'[;:}{)=]\s*$|\)\s*((const|OVERRIDE)\s*)*\s*$', previous_line)
+             or search(r'\b(if|for|foreach|while|switch|else)\b', previous_line))
+            and previous_line.find('#') < 0):
+            error(line_number, 'whitespace/braces', 4,
+                  'This { should be at the end of the previous line')
+    elif (search(r'\)\s*(((const|OVERRIDE)\s*)*\s*)?{\s*$', line)
+          and line.count('(') == line.count(')')
+          and not search(r'\b(if|for|foreach|while|switch)\b', line)
+          and not match(r'\s+[A-Z_][A-Z_0-9]+\b', line)):
+        error(line_number, 'whitespace/braces', 4,
+              'Place brace on its own line for function definitions.')
+
+    if (match(r'\s*}\s*(else\s*({\s*)?)?$', line) and line_number > 1):
+        # We check if a closed brace has started a line to see if a
+        # one line control statement was previous.
+        previous_line = clean_lines.elided[line_number - 2]
+        last_open_brace = previous_line.rfind('{')
+        if (last_open_brace != -1 and previous_line.find('}', last_open_brace) == -1
+            and search(r'\b(if|for|foreach|while|else)\b', previous_line)):
+            error(line_number, 'whitespace/braces', 4,
+                  'One line control clauses should not use braces.')
+
+    # An else clause should be on the same line as the preceding closing brace.
+    if match(r'\s*else\s*', line):
+        previous_line = get_previous_non_blank_line(clean_lines, line_number)[0]
+        if match(r'\s*}\s*$', previous_line):
+            error(line_number, 'whitespace/newline', 4,
+                  'An else should appear on the same line as the preceding }')
+
+    # Likewise, an else should never have the else clause on the same line
+    if search(r'\belse [^\s{]', line) and not search(r'\belse if\b', line):
+        error(line_number, 'whitespace/newline', 4,
+              'Else clause should never be on same line as else (use 2 lines)')
+
+    # In the same way, a do/while should never be on one line
+    if match(r'\s*do [^\s{]', line):
+        error(line_number, 'whitespace/newline', 4,
+              'do/while clauses should not be on a single line')
+
+    # Braces shouldn't be followed by a ; unless they're defining a struct
+    # or initializing an array.
+    # We can't tell in general, but we can for some common cases.
+    previous_line_number = line_number
+    while True:
+        (previous_line, previous_line_number) = get_previous_non_blank_line(clean_lines, previous_line_number)
+        if match(r'\s+{.*}\s*;', line) and not previous_line.count(';'):
+            line = previous_line + line
+        else:
+            break
+    if (search(r'{.*}\s*;', line)
+        and line.count('{') == line.count('}')
+        and not search(r'struct|class|enum|\s*=\s*{', line)):
+        error(line_number, 'readability/braces', 4,
+              "You don't need a ; after a }")
+
+
+def check_exit_statement_simplifications(clean_lines, line_number, error):
+    """Looks for else or else-if statements that should be written as an
+    if statement when the prior if concludes with a return, break, continue or
+    goto statement.
+
+    Args:
+      clean_lines: A CleansedLines instance containing the file.
+      line_number: The number of the line to check.
+      error: The function to call with any errors found.
+    """
+
+    line = clean_lines.elided[line_number] # Get rid of comments and strings.
+
+    else_match = match(r'(?P<else_indentation>\s*)(\}\s*)?else(\s+if\s*\(|(?P<else>\s*(\{\s*)?\Z))', line)
+    if not else_match:
+        return
+
+    else_indentation = else_match.group('else_indentation')
+    inner_indentation = else_indentation + ' ' * 4
+
+    previous_lines = clean_lines.elided[:line_number]
+    previous_lines.reverse()
+    line_offset = 0
+    encountered_exit_statement = False
+
+    for current_line in previous_lines:
+        line_offset -= 1
+
+        # Skip not only empty lines but also those with preprocessor directives
+        # and goto labels.
+        if current_line.strip() == '' or current_line.startswith('#') or match(r'\w+\s*:\s*$', current_line):
+            continue
+
+        # Skip lines with closing braces on the original indentation level.
+        # Even though the styleguide says they should be on the same line as
+        # the "else if" statement, we also want to check for instances where
+        # the current code does not comply with the coding style. Thus, ignore
+        # these lines and proceed to the line before that.
+        if current_line == else_indentation + '}':
+            continue
+
+        current_indentation_match = match(r'(?P<indentation>\s*)(?P<remaining_line>.*)$', current_line);
+        current_indentation = current_indentation_match.group('indentation')
+        remaining_line = current_indentation_match.group('remaining_line')
+
+        # As we're going up the lines, the first real statement to encounter
+        # has to be an exit statement (return, break, continue or goto) -
+        # otherwise, this check doesn't apply.
+        if not encountered_exit_statement:
+            # We only want to find exit statements if they are on exactly
+            # the same level of indentation as expected from the code inside
+            # the block. If the indentation doesn't strictly match then we
+            # might have a nested if or something, which must be ignored.
+            if current_indentation != inner_indentation:
+                break
+            if match(r'(return(\W+.*)|(break|continue)\s*;|goto\s*\w+;)$', remaining_line):
+                encountered_exit_statement = True
+                continue
+            break
+
+        # When code execution reaches this point, we've found an exit statement
+        # as last statement of the previous block. Now we only need to make
+        # sure that the block belongs to an "if", then we can throw an error.
+
+        # Skip lines with opening braces on the original indentation level,
+        # similar to the closing braces check above. ("if (condition)\n{")
+        if current_line == else_indentation + '{':
+            continue
+
+        # Skip everything that's further indented than our "else" or "else if".
+        if current_indentation.startswith(else_indentation) and current_indentation != else_indentation:
+            continue
+
+        # So we've got a line with same (or less) indentation. Is it an "if"?
+        # If yes: throw an error. If no: don't throw an error.
+        # Whatever the outcome, this is the end of our loop.
+        if match(r'if\s*\(', remaining_line):
+            if else_match.start('else') != -1:
+                error(line_number + line_offset, 'readability/control_flow', 4,
+                      'An else statement can be removed when the prior "if" '
+                      'concludes with a return, break, continue or goto statement.')
+            else:
+                error(line_number + line_offset, 'readability/control_flow', 4,
+                      'An else if statement should be written as an if statement '
+                      'when the prior "if" concludes with a return, break, '
+                      'continue or goto statement.')
+        break
+
+
+def replaceable_check(operator, macro, line):
+    """Determine whether a basic CHECK can be replaced with a more specific one.
+
+    For example suggest using CHECK_EQ instead of CHECK(a == b) and
+    similarly for CHECK_GE, CHECK_GT, CHECK_LE, CHECK_LT, CHECK_NE.
+
+    Args:
+      operator: The C++ operator used in the CHECK.
+      macro: The CHECK or EXPECT macro being called.
+      line: The current source line.
+
+    Returns:
+      True if the CHECK can be replaced with a more specific one.
+    """
+
+    # This matches decimal and hex integers, strings, and chars (in that order).
+    match_constant = r'([-+]?(\d+|0[xX][0-9a-fA-F]+)[lLuU]{0,3}|".*"|\'.*\')'
+
+    # Expression to match two sides of the operator with something that
+    # looks like a literal, since CHECK(x == iterator) won't compile.
+    # This means we can't catch all the cases where a more specific
+    # CHECK is possible, but it's less annoying than dealing with
+    # extraneous warnings.
+    match_this = (r'\s*' + macro + r'\((\s*' +
+                  match_constant + r'\s*' + operator + r'[^<>].*|'
+                  r'.*[^<>]' + operator + r'\s*' + match_constant +
+                  r'\s*\))')
+
+    # Don't complain about CHECK(x == NULL) or similar because
+    # CHECK_EQ(x, NULL) won't compile (requires a cast).
+    # Also, don't complain about more complex boolean expressions
+    # involving && or || such as CHECK(a == b || c == d).
+    return match(match_this, line) and not search(r'NULL|&&|\|\|', line)
+
+
+def check_check(clean_lines, line_number, error):
+    """Checks the use of CHECK and EXPECT macros.
+
+    Args:
+      clean_lines: A CleansedLines instance containing the file.
+      line_number: The number of the line to check.
+      error: The function to call with any errors found.
+    """
+
+    # Decide the set of replacement macros that should be suggested
+    raw_lines = clean_lines.raw_lines
+    current_macro = ''
+    for macro in _CHECK_MACROS:
+        if raw_lines[line_number].find(macro) >= 0:
+            current_macro = macro
+            break
+    if not current_macro:
+        # Don't waste time here if line doesn't contain 'CHECK' or 'EXPECT'
+        return
+
+    line = clean_lines.elided[line_number]        # get rid of comments and strings
+
+    # Encourage replacing plain CHECKs with CHECK_EQ/CHECK_NE/etc.
+    for operator in ['==', '!=', '>=', '>', '<=', '<']:
+        if replaceable_check(operator, current_macro, line):
+            error(line_number, 'readability/check', 2,
+                  'Consider using %s instead of %s(a %s b)' % (
+                      _CHECK_REPLACEMENT[current_macro][operator],
+                      current_macro, operator))
+            break
+
+
+def check_for_comparisons_to_zero(clean_lines, line_number, error):
+    # Get the line without comments and strings.
+    line = clean_lines.elided[line_number]
+
+    # Include NULL here so that users don't have to convert NULL to 0 first and then get this error.
+    if search(r'[=!]=\s*(NULL|0|true|false)[^\w.]', line) or search(r'[^\w.](NULL|0|true|false)\s*[=!]=', line):
+        if not search('LIKELY', line) and not search('UNLIKELY', line):
+            error(line_number, 'readability/comparison_to_zero', 5,
+                  'Tests for true/false, null/non-null, and zero/non-zero should all be done without equality comparisons.')
+
+
+def check_for_null(clean_lines, line_number, file_state, error):
+    # This check doesn't apply to C or Objective-C implementation files.
+    if file_state.is_c_or_objective_c():
+        return
+
+    line = clean_lines.elided[line_number]
+
+    # Don't warn about NULL usage in g_*(). See Bug 32858 and 39372.
+    if search(r'\bg(_[a-z]+)+\b', line):
+        return
+
+    # Don't warn about NULL usage in gst_*(). See Bug 70498.
+    if search(r'\bgst(_[a-z]+)+\b', line):
+        return
+
+    # Don't warn about NULL usage in gdk_pixbuf_save_to_*{join,concat}(). See Bug 43090.
+    if search(r'\bgdk_pixbuf_save_to\w+\b', line):
+        return
+
+    # Don't warn about NULL usage in gtk_widget_style_get() or gtk_style_context_get_style. See Bug 51758
+    if search(r'\bgtk_widget_style_get\(\w+\b', line) or search(r'\bgtk_style_context_get_style\(\w+\b', line):
+        return
+
+    # Don't warn about NULL usage in soup_server_new(). See Bug 77890.
+    if search(r'\bsoup_server_new\(\w+\b', line):
+        return
+
+    if search(r'\bNULL\b', line):
+        error(line_number, 'readability/null', 5, 'Use 0 instead of NULL.')
+        return
+
+    line = clean_lines.raw_lines[line_number]
+    # See if NULL occurs in any comments in the line. If the search for NULL using the raw line
+    # matches, then do the check with strings collapsed to avoid giving errors for
+    # NULLs occurring in strings.
+    if search(r'\bNULL\b', line) and search(r'\bNULL\b', CleansedLines.collapse_strings(line)):
+        error(line_number, 'readability/null', 4, 'Use 0 or null instead of NULL (even in *comments*).')
+
+def get_line_width(line):
+    """Determines the width of the line in column positions.
+
+    Args:
+      line: A string, which may be a Unicode string.
+
+    Returns:
+      The width of the line in column positions, accounting for Unicode
+      combining characters and wide characters.
+    """
+    if isinstance(line, unicode):
+        width = 0
+        for c in unicodedata.normalize('NFC', line):
+            if unicodedata.east_asian_width(c) in ('W', 'F'):
+                width += 2
+            elif not unicodedata.combining(c):
+                width += 1
+        return width
+    return len(line)
+
+
+def check_style(clean_lines, line_number, file_extension, class_state, file_state, error):
+    """Checks rules from the 'C++ style rules' section of cppguide.html.
+
+    Most of these rules are hard to test (naming, comment style), but we
+    do what we can.  In particular we check for 4-space indents, line lengths,
+    tab usage, spaces inside code, etc.
+
+    Args:
+      clean_lines: A CleansedLines instance containing the file.
+      line_number: The number of the line to check.
+      file_extension: The extension (without the dot) of the filename.
+      class_state: A _ClassState instance which maintains information about
+                   the current stack of nested class declarations being parsed.
+      file_state: A _FileState instance which maintains information about
+                  the state of things in the file.
+      error: The function to call with any errors found.
+    """
+
+    raw_lines = clean_lines.raw_lines
+    line = raw_lines[line_number]
+
+    if line.find('\t') != -1:
+        error(line_number, 'whitespace/tab', 1,
+              'Tab found; better to use spaces')
+
+    cleansed_line = clean_lines.elided[line_number]
+    if line and line[-1].isspace():
+        error(line_number, 'whitespace/end_of_line', 4,
+              'Line ends in whitespace.  Consider deleting these extra spaces.')
+
+    if (cleansed_line.count(';') > 1
+        # for loops are allowed two ;'s (and may run over two lines).
+        and cleansed_line.find('for') == -1
+        and (get_previous_non_blank_line(clean_lines, line_number)[0].find('for') == -1
+             or get_previous_non_blank_line(clean_lines, line_number)[0].find(';') != -1)
+        # It's ok to have many commands in a switch case that fits in 1 line
+        and not ((cleansed_line.find('case ') != -1
+                  or cleansed_line.find('default:') != -1)
+                 and cleansed_line.find('break;') != -1)
+        # Also it's ok to have many commands in trivial single-line accessors in class definitions.
+        and not (match(r'.*\(.*\).*{.*.}', line)
+                 and class_state.classinfo_stack
+                 and line.count('{') == line.count('}'))
+        and not cleansed_line.startswith('#define ')
+        # It's ok to use use WTF_MAKE_NONCOPYABLE and WTF_MAKE_FAST_ALLOCATED macros in 1 line
+        and not (cleansed_line.find("WTF_MAKE_NONCOPYABLE") != -1
+                 and cleansed_line.find("WTF_MAKE_FAST_ALLOCATED") != -1)):
+        error(line_number, 'whitespace/newline', 4,
+              'More than one command on the same line')
+
+    if cleansed_line.strip().endswith('||') or cleansed_line.strip().endswith('&&'):
+        error(line_number, 'whitespace/operators', 4,
+              'Boolean expressions that span multiple lines should have their '
+              'operators on the left side of the line instead of the right side.')
+
+    # Some more style checks
+    check_namespace_indentation(clean_lines, line_number, file_extension, file_state, error)
+    check_directive_indentation(clean_lines, line_number, file_state, error)
+    check_using_std(clean_lines, line_number, file_state, error)
+    check_max_min_macros(clean_lines, line_number, file_state, error)
+    check_ctype_functions(clean_lines, line_number, file_state, error)
+    check_switch_indentation(clean_lines, line_number, error)
+    check_braces(clean_lines, line_number, error)
+    check_exit_statement_simplifications(clean_lines, line_number, error)
+    check_spacing(file_extension, clean_lines, line_number, error)
+    check_check(clean_lines, line_number, error)
+    check_for_comparisons_to_zero(clean_lines, line_number, error)
+    check_for_null(clean_lines, line_number, file_state, error)
+    check_indentation_amount(clean_lines, line_number, error)
+
+
+_RE_PATTERN_INCLUDE_NEW_STYLE = re.compile(r'#include +"[^/]+\.h"')
+_RE_PATTERN_INCLUDE = re.compile(r'^\s*#\s*include\s*([<"])([^>"]*)[>"].*$')
+# Matches the first component of a filename delimited by -s and _s. That is:
+#  _RE_FIRST_COMPONENT.match('foo').group(0) == 'foo'
+#  _RE_FIRST_COMPONENT.match('foo.cpp').group(0) == 'foo'
+#  _RE_FIRST_COMPONENT.match('foo-bar_baz.cpp').group(0) == 'foo'
+#  _RE_FIRST_COMPONENT.match('foo_bar-baz.cpp').group(0) == 'foo'
+_RE_FIRST_COMPONENT = re.compile(r'^[^-_.]+')
+
+
+def _drop_common_suffixes(filename):
+    """Drops common suffixes like _test.cpp or -inl.h from filename.
+
+    For example:
+      >>> _drop_common_suffixes('foo/foo-inl.h')
+      'foo/foo'
+      >>> _drop_common_suffixes('foo/bar/foo.cpp')
+      'foo/bar/foo'
+      >>> _drop_common_suffixes('foo/foo_internal.h')
+      'foo/foo'
+      >>> _drop_common_suffixes('foo/foo_unusualinternal.h')
+      'foo/foo_unusualinternal'
+
+    Args:
+      filename: The input filename.
+
+    Returns:
+      The filename with the common suffix removed.
+    """
+    for suffix in ('test.cpp', 'regtest.cpp', 'unittest.cpp',
+                   'inl.h', 'impl.h', 'internal.h'):
+        if (filename.endswith(suffix) and len(filename) > len(suffix)
+            and filename[-len(suffix) - 1] in ('-', '_')):
+            return filename[:-len(suffix) - 1]
+    return os.path.splitext(filename)[0]
+
+
+def _classify_include(filename, include, is_system, include_state):
+    """Figures out what kind of header 'include' is.
+
+    Args:
+      filename: The current file cpp_style is running over.
+      include: The path to a #included file.
+      is_system: True if the #include used <> rather than "".
+      include_state: An _IncludeState instance in which the headers are inserted.
+
+    Returns:
+      One of the _XXX_HEADER constants.
+
+    For example:
+      >>> _classify_include('foo.cpp', 'config.h', False)
+      _CONFIG_HEADER
+      >>> _classify_include('foo.cpp', 'foo.h', False)
+      _PRIMARY_HEADER
+      >>> _classify_include('foo.cpp', 'bar.h', False)
+      _OTHER_HEADER
+    """
+
+    # If it is a system header we know it is classified as _OTHER_HEADER.
+    if is_system and not include.startswith('public/'):
+        return _OTHER_HEADER
+
+    # If the include is named config.h then this is WebCore/config.h.
+    if include == "config.h":
+        return _CONFIG_HEADER
+
+    # There cannot be primary includes in header files themselves. Only an
+    # include exactly matches the header filename will be is flagged as
+    # primary, so that it triggers the "don't include yourself" check.
+    if filename.endswith('.h') and filename != include:
+        return _OTHER_HEADER;
+
+    # Qt's moc files do not follow the naming and ordering rules, so they should be skipped
+    if include.startswith('moc_') and include.endswith('.cpp'):
+        return _MOC_HEADER
+
+    if include.endswith('.moc'):
+        return _MOC_HEADER
+
+    # If the target file basename starts with the include we're checking
+    # then we consider it the primary header.
+    target_base = FileInfo(filename).base_name()
+    include_base = FileInfo(include).base_name()
+
+    # If we haven't encountered a primary header, then be lenient in checking.
+    if not include_state.visited_primary_section():
+        if target_base.find(include_base) != -1:
+            return _PRIMARY_HEADER
+        # Qt private APIs use _p.h suffix.
+        if include_base.find(target_base) != -1 and include_base.endswith('_p'):
+            return _PRIMARY_HEADER
+
+    # If we already encountered a primary header, perform a strict comparison.
+    # In case the two filename bases are the same then the above lenient check
+    # probably was a false positive.
+    elif include_state.visited_primary_section() and target_base == include_base:
+        if include == "ResourceHandleWin.h":
+            # FIXME: Thus far, we've only seen one example of these, but if we
+            # start to see more, please consider generalizing this check
+            # somehow.
+            return _OTHER_HEADER
+        return _PRIMARY_HEADER
+
+    return _OTHER_HEADER
+
+
+def _does_primary_header_exist(filename):
+    """Return a primary header file name for a file, or empty string
+    if the file is not source file or primary header does not exist.
+    """
+    fileinfo = FileInfo(filename)
+    if not fileinfo.is_source():
+        return False
+    primary_header = fileinfo.no_extension() + ".h"
+    return os.path.isfile(primary_header)
+
+
+def check_include_line(filename, file_extension, clean_lines, line_number, include_state, error):
+    """Check rules that are applicable to #include lines.
+
+    Strings on #include lines are NOT removed from elided line, to make
+    certain tasks easier. However, to prevent false positives, checks
+    applicable to #include lines in CheckLanguage must be put here.
+
+    Args:
+      filename: The name of the current file.
+      file_extension: The current file extension, without the leading dot.
+      clean_lines: A CleansedLines instance containing the file.
+      line_number: The number of the line to check.
+      include_state: An _IncludeState instance in which the headers are inserted.
+      error: The function to call with any errors found.
+    """
+    # FIXME: For readability or as a possible optimization, consider
+    #        exiting early here by checking whether the "build/include"
+    #        category should be checked for the given filename.  This
+    #        may involve having the error handler classes expose a
+    #        should_check() method, in addition to the usual __call__
+    #        method.
+    line = clean_lines.lines[line_number]
+
+    matched = _RE_PATTERN_INCLUDE.search(line)
+    if not matched:
+        return
+
+    include = matched.group(2)
+    is_system = (matched.group(1) == '<')
+
+    # Look for any of the stream classes that are part of standard C++.
+    if match(r'(f|ind|io|i|o|parse|pf|stdio|str|)?stream$', include):
+        error(line_number, 'readability/streams', 3,
+              'Streams are highly discouraged.')
+
+    # Look for specific includes to fix.
+    if include.startswith('wtf/') and not is_system:
+        error(line_number, 'build/include', 4,
+              'wtf includes should be <wtf/file.h> instead of "wtf/file.h".')
+
+    if filename.find('/chromium/') != -1 and include.startswith('cc/CC'):
+        error(line_number, 'build/include', 4,
+              'cc includes should be "CCFoo.h" instead of "cc/CCFoo.h".')
+
+    duplicate_header = include in include_state
+    if duplicate_header:
+        error(line_number, 'build/include', 4,
+              '"%s" already included at %s:%s' %
+              (include, filename, include_state[include]))
+    else:
+        include_state[include] = line_number
+
+    header_type = _classify_include(filename, include, is_system, include_state)
+    primary_header_exists = _does_primary_header_exist(filename)
+    include_state.header_types[line_number] = header_type
+
+    # Only proceed if this isn't a duplicate header.
+    if duplicate_header:
+        return
+
+    # We want to ensure that headers appear in the right order:
+    # 1) for implementation files: config.h, primary header, blank line, alphabetically sorted
+    # 2) for header files: alphabetically sorted
+    # The include_state object keeps track of the last type seen
+    # and complains if the header types are out of order or missing.
+    error_message = include_state.check_next_include_order(header_type,
+                                                           file_extension == "h",
+                                                           primary_header_exists)
+
+    # Check to make sure we have a blank line after primary header.
+    if not error_message and header_type == _PRIMARY_HEADER:
+         next_line = clean_lines.raw_lines[line_number + 1]
+         if not is_blank_line(next_line):
+            error(line_number, 'build/include_order', 4,
+                  'You should add a blank line after implementation file\'s own header.')
+
+    # Check to make sure all headers besides config.h and the primary header are
+    # alphabetically sorted. Skip Qt's moc files.
+    if not error_message and header_type == _OTHER_HEADER:
+         previous_line_number = line_number - 1;
+         previous_line = clean_lines.lines[previous_line_number]
+         previous_match = _RE_PATTERN_INCLUDE.search(previous_line)
+         while (not previous_match and previous_line_number > 0
+                and not search(r'\A(#if|#ifdef|#ifndef|#else|#elif|#endif)', previous_line)):
+            previous_line_number -= 1;
+            previous_line = clean_lines.lines[previous_line_number]
+            previous_match = _RE_PATTERN_INCLUDE.search(previous_line)
+         if previous_match:
+            previous_header_type = include_state.header_types[previous_line_number]
+            if previous_header_type == _OTHER_HEADER and previous_line.strip() > line.strip():
+                # This type of error is potentially a problem with this line or the previous one,
+                # so if the error is filtered for one line, report it for the next. This is so that
+                # we properly handle patches, for which only modified lines produce errors.
+                if not error(line_number - 1, 'build/include_order', 4, 'Alphabetical sorting problem.'):
+                    error(line_number, 'build/include_order', 4, 'Alphabetical sorting problem.')
+
+    if error_message:
+        if file_extension == 'h':
+            error(line_number, 'build/include_order', 4,
+                  '%s Should be: alphabetically sorted.' %
+                  error_message)
+        else:
+            error(line_number, 'build/include_order', 4,
+                  '%s Should be: config.h, primary header, blank line, and then alphabetically sorted.' %
+                  error_message)
+
+
+def check_language(filename, clean_lines, line_number, file_extension, include_state,
+                   file_state, error):
+    """Checks rules from the 'C++ language rules' section of cppguide.html.
+
+    Some of these rules are hard to test (function overloading, using
+    uint32 inappropriately), but we do the best we can.
+
+    Args:
+      filename: The name of the current file.
+      clean_lines: A CleansedLines instance containing the file.
+      line_number: The number of the line to check.
+      file_extension: The extension (without the dot) of the filename.
+      include_state: An _IncludeState instance in which the headers are inserted.
+      file_state: A _FileState instance which maintains information about
+                  the state of things in the file.
+      error: The function to call with any errors found.
+    """
+    # If the line is empty or consists of entirely a comment, no need to
+    # check it.
+    line = clean_lines.elided[line_number]
+    if not line:
+        return
+
+    matched = _RE_PATTERN_INCLUDE.search(line)
+    if matched:
+        check_include_line(filename, file_extension, clean_lines, line_number, include_state, error)
+        return
+
+    # FIXME: figure out if they're using default arguments in fn proto.
+
+    # Check to see if they're using an conversion function cast.
+    # I just try to capture the most common basic types, though there are more.
+    # Parameterless conversion functions, such as bool(), are allowed as they are
+    # probably a member operator declaration or default constructor.
+    matched = search(
+        r'\b(int|float|double|bool|char|int32|uint32|int64|uint64)\([^)]', line)
+    if matched:
+        # gMock methods are defined using some variant of MOCK_METHODx(name, type)
+        # where type may be float(), int(string), etc.  Without context they are
+        # virtually indistinguishable from int(x) casts.
+        if not match(r'^\s*MOCK_(CONST_)?METHOD\d+(_T)?\(', line):
+            error(line_number, 'readability/casting', 4,
+                  'Using deprecated casting style.  '
+                  'Use static_cast<%s>(...) instead' %
+                  matched.group(1))
+
+    check_c_style_cast(line_number, line, clean_lines.raw_lines[line_number],
+                       'static_cast',
+                       r'\((int|float|double|bool|char|u?int(16|32|64))\)',
+                       error)
+    # This doesn't catch all cases.  Consider (const char * const)"hello".
+    check_c_style_cast(line_number, line, clean_lines.raw_lines[line_number],
+                       'reinterpret_cast', r'\((\w+\s?\*+\s?)\)', error)
+
+    # In addition, we look for people taking the address of a cast.  This
+    # is dangerous -- casts can assign to temporaries, so the pointer doesn't
+    # point where you think.
+    if search(
+        r'(&\([^)]+\)[\w(])|(&(static|dynamic|reinterpret)_cast\b)', line):
+        error(line_number, 'runtime/casting', 4,
+              ('Are you taking an address of a cast?  '
+               'This is dangerous: could be a temp var.  '
+               'Take the address before doing the cast, rather than after'))
+
+    # Check for people declaring static/global STL strings at the top level.
+    # This is dangerous because the C++ language does not guarantee that
+    # globals with constructors are initialized before the first access.
+    matched = match(
+        r'((?:|static +)(?:|const +))string +([a-zA-Z0-9_:]+)\b(.*)',
+        line)
+    # Make sure it's not a function.
+    # Function template specialization looks like: "string foo<Type>(...".
+    # Class template definitions look like: "string Foo<Type>::Method(...".
+    if matched and not match(r'\s*(<.*>)?(::[a-zA-Z0-9_]+)?\s*\(([^"]|$)',
+                             matched.group(3)):
+        error(line_number, 'runtime/string', 4,
+              'For a static/global string constant, use a C style string instead: '
+              '"%schar %s[]".' %
+              (matched.group(1), matched.group(2)))
+
+    # Check that we're not using RTTI outside of testing code.
+    if search(r'\bdynamic_cast<', line):
+        error(line_number, 'runtime/rtti', 5,
+              'Do not use dynamic_cast<>.  If you need to cast within a class '
+              "hierarchy, use static_cast<> to upcast.  Google doesn't support "
+              'RTTI.')
+
+    if search(r'\b([A-Za-z0-9_]*_)\(\1\)', line):
+        error(line_number, 'runtime/init', 4,
+              'You seem to be initializing a member variable with itself.')
+
+    if file_extension == 'h':
+        # FIXME: check that 1-arg constructors are explicit.
+        #        How to tell it's a constructor?
+        #        (handled in check_for_non_standard_constructs for now)
+        pass
+
+    # Check if people are using the verboten C basic types.  The only exception
+    # we regularly allow is "unsigned short port" for port.
+    if search(r'\bshort port\b', line):
+        if not search(r'\bunsigned short port\b', line):
+            error(line_number, 'runtime/int', 4,
+                  'Use "unsigned short" for ports, not "short"')
+
+    # When snprintf is used, the second argument shouldn't be a literal.
+    matched = search(r'snprintf\s*\(([^,]*),\s*([0-9]*)\s*,', line)
+    if matched:
+        error(line_number, 'runtime/printf', 3,
+              'If you can, use sizeof(%s) instead of %s as the 2nd arg '
+              'to snprintf.' % (matched.group(1), matched.group(2)))
+
+    # Check if some verboten C functions are being used.
+    if search(r'\bsprintf\b', line):
+        error(line_number, 'runtime/printf', 5,
+              'Never use sprintf.  Use snprintf instead.')
+    matched = search(r'\b(strcpy|strcat)\b', line)
+    if matched:
+        error(line_number, 'runtime/printf', 4,
+              'Almost always, snprintf is better than %s' % matched.group(1))
+
+    if search(r'\bsscanf\b', line):
+        error(line_number, 'runtime/printf', 1,
+              'sscanf can be ok, but is slow and can overflow buffers.')
+
+    # Check for suspicious usage of "if" like
+    # } if (a == b) {
+    if search(r'\}\s*if\s*\(', line):
+        error(line_number, 'readability/braces', 4,
+              'Did you mean "else if"? If not, start a new line for "if".')
+
+    # Check for potential format string bugs like printf(foo).
+    # We constrain the pattern not to pick things like DocidForPrintf(foo).
+    # Not perfect but it can catch printf(foo.c_str()) and printf(foo->c_str())
+    matched = re.search(r'\b((?:string)?printf)\s*\(([\w.\->()]+)\)', line, re.I)
+    if matched:
+        error(line_number, 'runtime/printf', 4,
+              'Potential format string bug. Do %s("%%s", %s) instead.'
+              % (matched.group(1), matched.group(2)))
+
+    # Check for potential memset bugs like memset(buf, sizeof(buf), 0).
+    matched = search(r'memset\s*\(([^,]*),\s*([^,]*),\s*0\s*\)', line)
+    if matched and not match(r"^''|-?[0-9]+|0x[0-9A-Fa-f]$", matched.group(2)):
+        error(line_number, 'runtime/memset', 4,
+              'Did you mean "memset(%s, 0, %s)"?'
+              % (matched.group(1), matched.group(2)))
+
+    # Detect variable-length arrays.
+    matched = match(r'\s*(.+::)?(\w+) [a-z]\w*\[(.+)];', line)
+    if (matched and matched.group(2) != 'return' and matched.group(2) != 'delete' and
+        matched.group(3).find(']') == -1):
+        # Split the size using space and arithmetic operators as delimiters.
+        # If any of the resulting tokens are not compile time constants then
+        # report the error.
+        tokens = re.split(r'\s|\+|\-|\*|\/|<<|>>]', matched.group(3))
+        is_const = True
+        skip_next = False
+        for tok in tokens:
+            if skip_next:
+                skip_next = False
+                continue
+
+            if search(r'sizeof\(.+\)', tok):
+                continue
+            if search(r'arraysize\(\w+\)', tok):
+                continue
+
+            tok = tok.lstrip('(')
+            tok = tok.rstrip(')')
+            if not tok:
+                continue
+            if match(r'\d+', tok):
+                continue
+            if match(r'0[xX][0-9a-fA-F]+', tok):
+                continue
+            if match(r'k[A-Z0-9]\w*', tok):
+                continue
+            if match(r'(.+::)?k[A-Z0-9]\w*', tok):
+                continue
+            if match(r'(.+::)?[A-Z][A-Z0-9_]*', tok):
+                continue
+            # A catch all for tricky sizeof cases, including 'sizeof expression',
+            # 'sizeof(*type)', 'sizeof(const type)', 'sizeof(struct StructName)'
+            # requires skipping the next token becasue we split on ' ' and '*'.
+            if tok.startswith('sizeof'):
+                skip_next = True
+                continue
+            is_const = False
+            break
+        if not is_const:
+            error(line_number, 'runtime/arrays', 1,
+                  'Do not use variable-length arrays.  Use an appropriately named '
+                  "('k' followed by CamelCase) compile-time constant for the size.")
+
+    # Check for use of unnamed namespaces in header files.  Registration
+    # macros are typically OK, so we allow use of "namespace {" on lines
+    # that end with backslashes.
+    if (file_extension == 'h'
+        and search(r'\bnamespace\s*{', line)
+        and line[-1] != '\\'):
+        error(line_number, 'build/namespaces', 4,
+              'Do not use unnamed namespaces in header files.  See '
+              'http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Namespaces'
+              ' for more information.')
+
+    # Check for plain bitfields declared without either "singed" or "unsigned".
+    # Most compilers treat such bitfields as signed, but there are still compilers like
+    # RVCT 4.0 that use unsigned by default.
+    matched = re.match(r'\s*((const|mutable)\s+)?(char|(short(\s+int)?)|int|long(\s+(long|int))?)\s+[a-zA-Z_][a-zA-Z0-9_]*\s*:\s*\d+\s*;', line)
+    if matched:
+        error(line_number, 'runtime/bitfields', 5,
+              'Please declare integral type bitfields with either signed or unsigned.')
+
+    check_identifier_name_in_declaration(filename, line_number, line, file_state, error)
+
+    # Check for unsigned int (should be just 'unsigned')
+    if search(r'\bunsigned int\b', line):
+        error(line_number, 'runtime/unsigned', 1,
+              'Omit int when using unsigned')
+
+    # Check that we're not using static_cast<Text*>.
+    if search(r'\bstatic_cast<Text\*>', line):
+        error(line_number, 'readability/check', 4,
+              'Consider using toText helper function in WebCore/dom/Text.h '
+              'instead of static_cast<Text*>')
+
+def check_identifier_name_in_declaration(filename, line_number, line, file_state, error):
+    """Checks if identifier names contain any underscores.
+
+    As identifiers in libraries we are using have a bunch of
+    underscores, we only warn about the declarations of identifiers
+    and don't check use of identifiers.
+
+    Args:
+      filename: The name of the current file.
+      line_number: The number of the line to check.
+      line: The line of code to check.
+      file_state: A _FileState instance which maintains information about
+                  the state of things in the file.
+      error: The function to call with any errors found.
+    """
+    # We don't check a return statement.
+    if match(r'\s*(return|delete)\b', line):
+        return
+
+    # Basically, a declaration is a type name followed by whitespaces
+    # followed by an identifier. The type name can be complicated
+    # due to type adjectives and templates. We remove them first to
+    # simplify the process to find declarations of identifiers.
+
+    # Convert "long long", "long double", and "long long int" to
+    # simple types, but don't remove simple "long".
+    line = sub(r'long (long )?(?=long|double|int)', '', line)
+    # Convert unsigned/signed types to simple types, too.
+    line = sub(r'(unsigned|signed) (?=char|short|int|long)', '', line)
+    line = sub(r'\b(inline|using|static|const|volatile|auto|register|extern|typedef|restrict|struct|class|virtual)(?=\W)', '', line)
+
+    # Remove "new" and "new (expr)" to simplify, too.
+    line = sub(r'new\s*(\([^)]*\))?', '', line)
+
+    # Remove all template parameters by removing matching < and >.
+    # Loop until no templates are removed to remove nested templates.
+    while True:
+        line, number_of_replacements = subn(r'<([\w\s:]|::)+\s*[*&]*\s*>', '', line)
+        if not number_of_replacements:
+            break
+
+    # Declarations of local variables can be in condition expressions
+    # of control flow statements (e.g., "if (RenderObject* p = o->parent())").
+    # We remove the keywords and the first parenthesis.
+    #
+    # Declarations in "while", "if", and "switch" are different from
+    # other declarations in two aspects:
+    #
+    # - There can be only one declaration between the parentheses.
+    #   (i.e., you cannot write "if (int i = 0, j = 1) {}")
+    # - The variable must be initialized.
+    #   (i.e., you cannot write "if (int i) {}")
+    #
+    # and we will need different treatments for them.
+    line = sub(r'^\s*for\s*\(', '', line)
+    line, control_statement = subn(r'^\s*(while|else if|if|switch)\s*\(', '', line)
+
+    # Detect variable and functions.
+    type_regexp = r'\w([\w]|\s*[*&]\s*|::)+'
+    identifier_regexp = r'(?P<identifier>[\w:]+)'
+    maybe_bitfield_regexp = r'(:\s*\d+\s*)?'
+    character_after_identifier_regexp = r'(?P<character_after_identifier>[[;()=,])(?!=)'
+    declaration_without_type_regexp = r'\s*' + identifier_regexp + r'\s*' + maybe_bitfield_regexp + character_after_identifier_regexp
+    declaration_with_type_regexp = r'\s*' + type_regexp + r'\s' + declaration_without_type_regexp
+    is_function_arguments = False
+    number_of_identifiers = 0
+    while True:
+        # If we are seeing the first identifier or arguments of a
+        # function, there should be a type name before an identifier.
+        if not number_of_identifiers or is_function_arguments:
+            declaration_regexp = declaration_with_type_regexp
+        else:
+            declaration_regexp = declaration_without_type_regexp
+
+        matched = match(declaration_regexp, line)
+        if not matched:
+            return
+        identifier = matched.group('identifier')
+        character_after_identifier = matched.group('character_after_identifier')
+
+        # If we removed a non-for-control statement, the character after
+        # the identifier should be '='. With this rule, we can avoid
+        # warning for cases like "if (val & INT_MAX) {".
+        if control_statement and character_after_identifier != '=':
+            return
+
+        is_function_arguments = is_function_arguments or character_after_identifier == '('
+
+        # Remove "m_" and "s_" to allow them.
+        modified_identifier = sub(r'(^|(?<=::))[ms]_', '', identifier)
+        if not file_state.is_objective_c() and modified_identifier.find('_') >= 0:
+            # Various exceptions to the rule: JavaScript op codes functions, const_iterator.
+            if (not (filename.find('JavaScriptCore') >= 0 and modified_identifier.find('op_') >= 0)
+                and not (filename.find('gtk') >= 0 and modified_identifier.startswith('webkit_') >= 0)
+                and not modified_identifier.startswith('tst_')
+                and not modified_identifier.startswith('webkit_dom_object_')
+                and not modified_identifier.startswith('webkit_soup')
+                and not modified_identifier.startswith('NPN_')
+                and not modified_identifier.startswith('NPP_')
+                and not modified_identifier.startswith('NP_')
+                and not modified_identifier.startswith('qt_')
+                and not modified_identifier.startswith('_q_')
+                and not modified_identifier.startswith('cairo_')
+                and not modified_identifier.startswith('Ecore_')
+                and not modified_identifier.startswith('Eina_')
+                and not modified_identifier.startswith('Evas_')
+                and not modified_identifier.startswith('Ewk_')
+                and not modified_identifier.startswith('cti_')
+                and not modified_identifier.find('::qt_') >= 0
+                and not modified_identifier.find('::_q_') >= 0
+                and not modified_identifier == "const_iterator"
+                and not modified_identifier == "vm_throw"
+                and not modified_identifier == "DFG_OPERATION"):
+                error(line_number, 'readability/naming/underscores', 4, identifier + " is incorrectly named. Don't use underscores in your identifier names.")
+
+        # Check for variables named 'l', these are too easy to confuse with '1' in some fonts
+        if modified_identifier == 'l':
+            error(line_number, 'readability/naming', 4, identifier + " is incorrectly named. Don't use the single letter 'l' as an identifier name.")
+
+        # There can be only one declaration in non-for-control statements.
+        if control_statement:
+            return
+        # We should continue checking if this is a function
+        # declaration because we need to check its arguments.
+        # Also, we need to check multiple declarations.
+        if character_after_identifier != '(' and character_after_identifier != ',':
+            return
+
+        number_of_identifiers += 1
+        line = line[matched.end():]
+
+def check_c_style_cast(line_number, line, raw_line, cast_type, pattern,
+                       error):
+    """Checks for a C-style cast by looking for the pattern.
+
+    This also handles sizeof(type) warnings, due to similarity of content.
+
+    Args:
+      line_number: The number of the line to check.
+      line: The line of code to check.
+      raw_line: The raw line of code to check, with comments.
+      cast_type: The string for the C++ cast to recommend.  This is either
+                 reinterpret_cast or static_cast, depending.
+      pattern: The regular expression used to find C-style casts.
+      error: The function to call with any errors found.
+    """
+    matched = search(pattern, line)
+    if not matched:
+        return
+
+    # e.g., sizeof(int)
+    sizeof_match = match(r'.*sizeof\s*$', line[0:matched.start(1) - 1])
+    if sizeof_match:
+        error(line_number, 'runtime/sizeof', 1,
+              'Using sizeof(type).  Use sizeof(varname) instead if possible')
+        return
+
+    remainder = line[matched.end(0):]
+
+    # The close paren is for function pointers as arguments to a function.
+    # eg, void foo(void (*bar)(int));
+    # The semicolon check is a more basic function check; also possibly a
+    # function pointer typedef.
+    # eg, void foo(int); or void foo(int) const;
+    # The equals check is for function pointer assignment.
+    # eg, void *(*foo)(int) = ...
+    #
+    # Right now, this will only catch cases where there's a single argument, and
+    # it's unnamed.  It should probably be expanded to check for multiple
+    # arguments with some unnamed.
+    function_match = match(r'\s*(\)|=|(const)?\s*(;|\{|throw\(\)))', remainder)
+    if function_match:
+        if (not function_match.group(3)
+            or function_match.group(3) == ';'
+            or raw_line.find('/*') < 0):
+            error(line_number, 'readability/function', 3,
+                  'All parameters should be named in a function')
+        return
+
+    # At this point, all that should be left is actual casts.
+    error(line_number, 'readability/casting', 4,
+          'Using C-style cast.  Use %s<%s>(...) instead' %
+          (cast_type, matched.group(1)))
+
+
+_HEADERS_CONTAINING_TEMPLATES = (
+    ('<deque>', ('deque',)),
+    ('<functional>', ('unary_function', 'binary_function',
+                      'plus', 'minus', 'multiplies', 'divides', 'modulus',
+                      'negate',
+                      'equal_to', 'not_equal_to', 'greater', 'less',
+                      'greater_equal', 'less_equal',
+                      'logical_and', 'logical_or', 'logical_not',
+                      'unary_negate', 'not1', 'binary_negate', 'not2',
+                      'bind1st', 'bind2nd',
+                      'pointer_to_unary_function',
+                      'pointer_to_binary_function',
+                      'ptr_fun',
+                      'mem_fun_t', 'mem_fun', 'mem_fun1_t', 'mem_fun1_ref_t',
+                      'mem_fun_ref_t',
+                      'const_mem_fun_t', 'const_mem_fun1_t',
+                      'const_mem_fun_ref_t', 'const_mem_fun1_ref_t',
+                      'mem_fun_ref',
+                     )),
+    ('<limits>', ('numeric_limits',)),
+    ('<list>', ('list',)),
+    ('<map>', ('map', 'multimap',)),
+    ('<memory>', ('allocator',)),
+    ('<queue>', ('queue', 'priority_queue',)),
+    ('<set>', ('set', 'multiset',)),
+    ('<stack>', ('stack',)),
+    ('<string>', ('char_traits', 'basic_string',)),
+    ('<utility>', ('pair',)),
+    ('<vector>', ('vector',)),
+
+    # gcc extensions.
+    # Note: std::hash is their hash, ::hash is our hash
+    ('<hash_map>', ('hash_map', 'hash_multimap',)),
+    ('<hash_set>', ('hash_set', 'hash_multiset',)),
+    ('<slist>', ('slist',)),
+    )
+
+_HEADERS_ACCEPTED_BUT_NOT_PROMOTED = {
+    # We can trust with reasonable confidence that map gives us pair<>, too.
+    'pair<>': ('map', 'multimap', 'hash_map', 'hash_multimap')
+}
+
+_RE_PATTERN_STRING = re.compile(r'\bstring\b')
+
+_re_pattern_algorithm_header = []
+for _template in ('copy', 'max', 'min', 'min_element', 'sort', 'swap',
+                  'transform'):
+    # Match max<type>(..., ...), max(..., ...), but not foo->max, foo.max or
+    # type::max().
+    _re_pattern_algorithm_header.append(
+        (re.compile(r'[^>.]\b' + _template + r'(<.*?>)?\([^\)]'),
+         _template,
+         '<algorithm>'))
+
+_re_pattern_templates = []
+for _header, _templates in _HEADERS_CONTAINING_TEMPLATES:
+    for _template in _templates:
+        _re_pattern_templates.append(
+            (re.compile(r'(\<|\b)' + _template + r'\s*\<'),
+             _template + '<>',
+             _header))
+
+
+def files_belong_to_same_module(filename_cpp, filename_h):
+    """Check if these two filenames belong to the same module.
+
+    The concept of a 'module' here is a as follows:
+    foo.h, foo-inl.h, foo.cpp, foo_test.cpp and foo_unittest.cpp belong to the
+    same 'module' if they are in the same directory.
+    some/path/public/xyzzy and some/path/internal/xyzzy are also considered
+    to belong to the same module here.
+
+    If the filename_cpp contains a longer path than the filename_h, for example,
+    '/absolute/path/to/base/sysinfo.cpp', and this file would include
+    'base/sysinfo.h', this function also produces the prefix needed to open the
+    header. This is used by the caller of this function to more robustly open the
+    header file. We don't have access to the real include paths in this context,
+    so we need this guesswork here.
+
+    Known bugs: tools/base/bar.cpp and base/bar.h belong to the same module
+    according to this implementation. Because of this, this function gives
+    some false positives. This should be sufficiently rare in practice.
+
+    Args:
+      filename_cpp: is the path for the .cpp file
+      filename_h: is the path for the header path
+
+    Returns:
+      Tuple with a bool and a string:
+      bool: True if filename_cpp and filename_h belong to the same module.
+      string: the additional prefix needed to open the header file.
+    """
+
+    if not filename_cpp.endswith('.cpp'):
+        return (False, '')
+    filename_cpp = filename_cpp[:-len('.cpp')]
+    if filename_cpp.endswith('_unittest'):
+        filename_cpp = filename_cpp[:-len('_unittest')]
+    elif filename_cpp.endswith('_test'):
+        filename_cpp = filename_cpp[:-len('_test')]
+    filename_cpp = filename_cpp.replace('/public/', '/')
+    filename_cpp = filename_cpp.replace('/internal/', '/')
+
+    if not filename_h.endswith('.h'):
+        return (False, '')
+    filename_h = filename_h[:-len('.h')]
+    if filename_h.endswith('-inl'):
+        filename_h = filename_h[:-len('-inl')]
+    filename_h = filename_h.replace('/public/', '/')
+    filename_h = filename_h.replace('/internal/', '/')
+
+    files_belong_to_same_module = filename_cpp.endswith(filename_h)
+    common_path = ''
+    if files_belong_to_same_module:
+        common_path = filename_cpp[:-len(filename_h)]
+    return files_belong_to_same_module, common_path
+
+
+def update_include_state(filename, include_state, io=codecs):
+    """Fill up the include_state with new includes found from the file.
+
+    Args:
+      filename: the name of the header to read.
+      include_state: an _IncludeState instance in which the headers are inserted.
+      io: The io factory to use to read the file. Provided for testability.
+
+    Returns:
+      True if a header was succesfully added. False otherwise.
+    """
+    io = _unit_test_config.get(INCLUDE_IO_INJECTION_KEY, codecs)
+    header_file = None
+    try:
+        header_file = io.open(filename, 'r', 'utf8', 'replace')
+    except IOError:
+        return False
+    line_number = 0
+    for line in header_file:
+        line_number += 1
+        clean_line = cleanse_comments(line)
+        matched = _RE_PATTERN_INCLUDE.search(clean_line)
+        if matched:
+            include = matched.group(2)
+            # The value formatting is cute, but not really used right now.
+            # What matters here is that the key is in include_state.
+            include_state.setdefault(include, '%s:%d' % (filename, line_number))
+    return True
+
+
+def check_for_include_what_you_use(filename, clean_lines, include_state, error):
+    """Reports for missing stl includes.
+
+    This function will output warnings to make sure you are including the headers
+    necessary for the stl containers and functions that you use. We only give one
+    reason to include a header. For example, if you use both equal_to<> and
+    less<> in a .h file, only one (the latter in the file) of these will be
+    reported as a reason to include the <functional>.
+
+    Args:
+      filename: The name of the current file.
+      clean_lines: A CleansedLines instance containing the file.
+      include_state: An _IncludeState instance.
+      error: The function to call with any errors found.
+    """
+    required = {}  # A map of header name to line_number and the template entity.
+        # Example of required: { '<functional>': (1219, 'less<>') }
+
+    for line_number in xrange(clean_lines.num_lines()):
+        line = clean_lines.elided[line_number]
+        if not line or line[0] == '#':
+            continue
+
+        # String is special -- it is a non-templatized type in STL.
+        if _RE_PATTERN_STRING.search(line):
+            required['<string>'] = (line_number, 'string')
+
+        for pattern, template, header in _re_pattern_algorithm_header:
+            if pattern.search(line):
+                required[header] = (line_number, template)
+
+        # The following function is just a speed up, no semantics are changed.
+        if not '<' in line:  # Reduces the cpu time usage by skipping lines.
+            continue
+
+        for pattern, template, header in _re_pattern_templates:
+            if pattern.search(line):
+                required[header] = (line_number, template)
+
+    # The policy is that if you #include something in foo.h you don't need to
+    # include it again in foo.cpp. Here, we will look at possible includes.
+    # Let's copy the include_state so it is only messed up within this function.
+    include_state = include_state.copy()
+
+    # Did we find the header for this file (if any) and succesfully load it?
+    header_found = False
+
+    # Use the absolute path so that matching works properly.
+    abs_filename = os.path.abspath(filename)
+
+    # For Emacs's flymake.
+    # If cpp_style is invoked from Emacs's flymake, a temporary file is generated
+    # by flymake and that file name might end with '_flymake.cpp'. In that case,
+    # restore original file name here so that the corresponding header file can be
+    # found.
+    # e.g. If the file name is 'foo_flymake.cpp', we should search for 'foo.h'
+    # instead of 'foo_flymake.h'
+    abs_filename = re.sub(r'_flymake\.cpp$', '.cpp', abs_filename)
+
+    # include_state is modified during iteration, so we iterate over a copy of
+    # the keys.
+    for header in include_state.keys():  #NOLINT
+        (same_module, common_path) = files_belong_to_same_module(abs_filename, header)
+        fullpath = common_path + header
+        if same_module and update_include_state(fullpath, include_state):
+            header_found = True
+
+    # If we can't find the header file for a .cpp, assume it's because we don't
+    # know where to look. In that case we'll give up as we're not sure they
+    # didn't include it in the .h file.
+    # FIXME: Do a better job of finding .h files so we are confident that
+    #        not having the .h file means there isn't one.
+    if filename.endswith('.cpp') and not header_found:
+        return
+
+    # All the lines have been processed, report the errors found.
+    for required_header_unstripped in required:
+        template = required[required_header_unstripped][1]
+        if template in _HEADERS_ACCEPTED_BUT_NOT_PROMOTED:
+            headers = _HEADERS_ACCEPTED_BUT_NOT_PROMOTED[template]
+            if [True for header in headers if header in include_state]:
+                continue
+        if required_header_unstripped.strip('<>"') not in include_state:
+            error(required[required_header_unstripped][0],
+                  'build/include_what_you_use', 4,
+                  'Add #include ' + required_header_unstripped + ' for ' + template)
+
+
+def process_line(filename, file_extension,
+                 clean_lines, line, include_state, function_state,
+                 class_state, file_state, error):
+    """Processes a single line in the file.
+
+    Args:
+      filename: Filename of the file that is being processed.
+      file_extension: The extension (dot not included) of the file.
+      clean_lines: An array of strings, each representing a line of the file,
+                   with comments stripped.
+      line: Number of line being processed.
+      include_state: An _IncludeState instance in which the headers are inserted.
+      function_state: A _FunctionState instance which counts function lines, etc.
+      class_state: A _ClassState instance which maintains information about
+                   the current stack of nested class declarations being parsed.
+      file_state: A _FileState instance which maintains information about
+                  the state of things in the file.
+      error: A callable to which errors are reported, which takes arguments:
+             line number, error level, and message
+
+    """
+    raw_lines = clean_lines.raw_lines
+    detect_functions(clean_lines, line, function_state, error)
+    check_for_function_lengths(clean_lines, line, function_state, error)
+    if search(r'\bNOLINT\b', raw_lines[line]):  # ignore nolint lines
+        return
+    if match(r'\s*\b__asm\b', raw_lines[line]):  # Ignore asm lines as they format differently.
+        return
+    check_function_definition(filename, file_extension, clean_lines, line, function_state, error)
+    check_pass_ptr_usage(clean_lines, line, function_state, error)
+    check_for_leaky_patterns(clean_lines, line, function_state, error)
+    check_for_multiline_comments_and_strings(clean_lines, line, error)
+    check_style(clean_lines, line, file_extension, class_state, file_state, error)
+    check_language(filename, clean_lines, line, file_extension, include_state,
+                   file_state, error)
+    check_for_non_standard_constructs(clean_lines, line, class_state, error)
+    check_posix_threading(clean_lines, line, error)
+    check_invalid_increment(clean_lines, line, error)
+
+
+def _process_lines(filename, file_extension, lines, error, min_confidence):
+    """Performs lint checks and reports any errors to the given error function.
+
+    Args:
+      filename: Filename of the file that is being processed.
+      file_extension: The extension (dot not included) of the file.
+      lines: An array of strings, each representing a line of the file, with the
+             last element being empty if the file is termined with a newline.
+      error: A callable to which errors are reported, which takes 4 arguments:
+    """
+    lines = (['// marker so line numbers and indices both start at 1'] + lines +
+             ['// marker so line numbers end in a known way'])
+
+    include_state = _IncludeState()
+    function_state = _FunctionState(min_confidence)
+    class_state = _ClassState()
+
+    check_for_copyright(lines, error)
+
+    if file_extension == 'h':
+        check_for_header_guard(filename, lines, error)
+
+    remove_multi_line_comments(lines, error)
+    clean_lines = CleansedLines(lines)
+    file_state = _FileState(clean_lines, file_extension)
+    for line in xrange(clean_lines.num_lines()):
+        process_line(filename, file_extension, clean_lines, line,
+                     include_state, function_state, class_state, file_state, error)
+    class_state.check_finished(error)
+
+    check_for_include_what_you_use(filename, clean_lines, include_state, error)
+
+    # We check here rather than inside process_line so that we see raw
+    # lines rather than "cleaned" lines.
+    check_for_unicode_replacement_characters(lines, error)
+
+    check_for_new_line_at_eof(lines, error)
+
+
+class CppChecker(object):
+
+    """Processes C++ lines for checking style."""
+
+    # This list is used to--
+    #
+    # (1) generate an explicit list of all possible categories,
+    # (2) unit test that all checked categories have valid names, and
+    # (3) unit test that all categories are getting unit tested.
+    #
+    categories = set([
+        'build/class',
+        'build/deprecated',
+        'build/endif_comment',
+        'build/forward_decl',
+        'build/header_guard',
+        'build/include',
+        'build/include_order',
+        'build/include_what_you_use',
+        'build/namespaces',
+        'build/printf_format',
+        'build/storage_class',
+        'build/using_std',
+        'legal/copyright',
+        'readability/braces',
+        'readability/casting',
+        'readability/check',
+        'readability/comparison_to_zero',
+        'readability/constructors',
+        'readability/control_flow',
+        'readability/fn_size',
+        'readability/function',
+        'readability/multiline_comment',
+        'readability/multiline_string',
+        'readability/parameter_name',
+        'readability/naming',
+        'readability/naming/underscores',
+        'readability/null',
+        'readability/pass_ptr',
+        'readability/streams',
+        'readability/todo',
+        'readability/utf8',
+        'readability/webkit_export',
+        'runtime/arrays',
+        'runtime/bitfields',
+        'runtime/casting',
+        'runtime/ctype_function',
+        'runtime/explicit',
+        'runtime/init',
+        'runtime/int',
+        'runtime/invalid_increment',
+        'runtime/leaky_pattern',
+        'runtime/max_min_macros',
+        'runtime/memset',
+        'runtime/printf',
+        'runtime/printf_format',
+        'runtime/references',
+        'runtime/rtti',
+        'runtime/sizeof',
+        'runtime/string',
+        'runtime/threadsafe_fn',
+        'runtime/unsigned',
+        'runtime/virtual',
+        'whitespace/blank_line',
+        'whitespace/braces',
+        'whitespace/comma',
+        'whitespace/comments',
+        'whitespace/declaration',
+        'whitespace/end_of_line',
+        'whitespace/ending_newline',
+        'whitespace/indent',
+        'whitespace/line_length',
+        'whitespace/newline',
+        'whitespace/operators',
+        'whitespace/parens',
+        'whitespace/semicolon',
+        'whitespace/tab',
+        'whitespace/todo',
+        ])
+
+    def __init__(self, file_path, file_extension, handle_style_error,
+                 min_confidence):
+        """Create a CppChecker instance.
+
+        Args:
+          file_extension: A string that is the file extension, without
+                          the leading dot.
+
+        """
+        self.file_extension = file_extension
+        self.file_path = file_path
+        self.handle_style_error = handle_style_error
+        self.min_confidence = min_confidence
+
+    # Useful for unit testing.
+    def __eq__(self, other):
+        """Return whether this CppChecker instance is equal to another."""
+        if self.file_extension != other.file_extension:
+            return False
+        if self.file_path != other.file_path:
+            return False
+        if self.handle_style_error != other.handle_style_error:
+            return False
+        if self.min_confidence != other.min_confidence:
+            return False
+
+        return True
+
+    # Useful for unit testing.
+    def __ne__(self, other):
+        # Python does not automatically deduce __ne__() from __eq__().
+        return not self.__eq__(other)
+
+    def check(self, lines):
+        _process_lines(self.file_path, self.file_extension, lines,
+                       self.handle_style_error, self.min_confidence)
+
+
+# FIXME: Remove this function (requires refactoring unit tests).
+def process_file_data(filename, file_extension, lines, error, min_confidence, unit_test_config):
+    global _unit_test_config
+    _unit_test_config = unit_test_config
+    checker = CppChecker(filename, file_extension, error, min_confidence)
+    checker.check(lines)
+    _unit_test_config = {}
diff --git a/Tools/Scripts/webkitpy/style/checkers/cpp_unittest.py b/Tools/Scripts/webkitpy/style/checkers/cpp_unittest.py
new file mode 100644
index 0000000..5522201
--- /dev/null
+++ b/Tools/Scripts/webkitpy/style/checkers/cpp_unittest.py
@@ -0,0 +1,4866 @@
+#!/usr/bin/python
+# -*- coding: utf-8; -*-
+#
+# Copyright (C) 2011 Google Inc. All rights reserved.
+# Copyright (C) 2009 Torch Mobile Inc.
+# Copyright (C) 2009 Apple Inc. All rights reserved.
+# Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org)
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#    * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Unit test for cpp_style.py."""
+
+# FIXME: Add a good test that tests UpdateIncludeState.
+
+import codecs
+import os
+import random
+import re
+import unittest
+import cpp as cpp_style
+from cpp import CppChecker
+from ..filter import FilterConfiguration
+
+# This class works as an error collector and replaces cpp_style.Error
+# function for the unit tests.  We also verify each category we see
+# is in STYLE_CATEGORIES, to help keep that list up to date.
+class ErrorCollector:
+    _all_style_categories = CppChecker.categories
+    # This is a list including all categories seen in any unit test.
+    _seen_style_categories = {}
+
+    def __init__(self, assert_fn, filter=None, lines_to_check=None):
+        """assert_fn: a function to call when we notice a problem.
+           filter: filters the errors that we are concerned about."""
+        self._assert_fn = assert_fn
+        self._errors = []
+        self._lines_to_check = lines_to_check
+        if not filter:
+            filter = FilterConfiguration()
+        self._filter = filter
+
+    def __call__(self, line_number, category, confidence, message):
+        self._assert_fn(category in self._all_style_categories,
+                        'Message "%s" has category "%s",'
+                        ' which is not in STYLE_CATEGORIES' % (message, category))
+
+        if self._lines_to_check and not line_number in self._lines_to_check:
+            return False
+
+        if self._filter.should_check(category, ""):
+            self._seen_style_categories[category] = 1
+            self._errors.append('%s  [%s] [%d]' % (message, category, confidence))
+        return True
+
+    def results(self):
+        if len(self._errors) < 2:
+            return ''.join(self._errors)  # Most tests expect to have a string.
+        else:
+            return self._errors  # Let's give a list if there is more than one.
+
+    def result_list(self):
+        return self._errors
+
+    def verify_all_categories_are_seen(self):
+        """Fails if there's a category in _all_style_categories - _seen_style_categories.
+
+        This should only be called after all tests are run, so
+        _seen_style_categories has had a chance to fully populate.  Since
+        this isn't called from within the normal unittest framework, we
+        can't use the normal unittest assert macros.  Instead we just exit
+        when we see an error.  Good thing this test is always run last!
+        """
+        for category in self._all_style_categories:
+            if category not in self._seen_style_categories:
+                import sys
+                sys.exit('FATAL ERROR: There are no tests for category "%s"' % category)
+
+
+# This class is a lame mock of codecs. We do not verify filename, mode, or
+# encoding, but for the current use case it is not needed.
+class MockIo:
+    def __init__(self, mock_file):
+        self.mock_file = mock_file
+
+    def open(self, unused_filename, unused_mode, unused_encoding, _):  # NOLINT
+        # (lint doesn't like open as a method name)
+        return self.mock_file
+
+
+class CppFunctionsTest(unittest.TestCase):
+
+    """Supports testing functions that do not need CppStyleTestBase."""
+
+    def test_convert_to_lower_with_underscores(self):
+        self.assertEquals(cpp_style._convert_to_lower_with_underscores('ABC'), 'abc')
+        self.assertEquals(cpp_style._convert_to_lower_with_underscores('aB'), 'a_b')
+        self.assertEquals(cpp_style._convert_to_lower_with_underscores('isAName'), 'is_a_name')
+        self.assertEquals(cpp_style._convert_to_lower_with_underscores('AnotherTest'), 'another_test')
+        self.assertEquals(cpp_style._convert_to_lower_with_underscores('PassRefPtr<MyClass>'), 'pass_ref_ptr<my_class>')
+        self.assertEquals(cpp_style._convert_to_lower_with_underscores('_ABC'), '_abc')
+
+    def test_create_acronym(self):
+        self.assertEquals(cpp_style._create_acronym('ABC'), 'ABC')
+        self.assertEquals(cpp_style._create_acronym('IsAName'), 'IAN')
+        self.assertEquals(cpp_style._create_acronym('PassRefPtr<MyClass>'), 'PRP<MC>')
+
+    def test_is_c_or_objective_c(self):
+        clean_lines = cpp_style.CleansedLines([''])
+        clean_objc_lines = cpp_style.CleansedLines(['#import "header.h"'])
+        self.assertTrue(cpp_style._FileState(clean_lines, 'c').is_c_or_objective_c())
+        self.assertTrue(cpp_style._FileState(clean_lines, 'm').is_c_or_objective_c())
+        self.assertFalse(cpp_style._FileState(clean_lines, 'cpp').is_c_or_objective_c())
+        self.assertFalse(cpp_style._FileState(clean_lines, 'cc').is_c_or_objective_c())
+        self.assertFalse(cpp_style._FileState(clean_lines, 'h').is_c_or_objective_c())
+        self.assertTrue(cpp_style._FileState(clean_objc_lines, 'h').is_c_or_objective_c())
+
+    def test_parameter(self):
+        # Test type.
+        parameter = cpp_style.Parameter('ExceptionCode', 13, 1)
+        self.assertEquals(parameter.type, 'ExceptionCode')
+        self.assertEquals(parameter.name, '')
+        self.assertEquals(parameter.row, 1)
+
+        # Test type and name.
+        parameter = cpp_style.Parameter('PassRefPtr<MyClass> parent', 19, 1)
+        self.assertEquals(parameter.type, 'PassRefPtr<MyClass>')
+        self.assertEquals(parameter.name, 'parent')
+        self.assertEquals(parameter.row, 1)
+
+        # Test type, no name, with default value.
+        parameter = cpp_style.Parameter('MyClass = 0', 7, 0)
+        self.assertEquals(parameter.type, 'MyClass')
+        self.assertEquals(parameter.name, '')
+        self.assertEquals(parameter.row, 0)
+
+        # Test type, name, and default value.
+        parameter = cpp_style.Parameter('MyClass a = 0', 7, 0)
+        self.assertEquals(parameter.type, 'MyClass')
+        self.assertEquals(parameter.name, 'a')
+        self.assertEquals(parameter.row, 0)
+
+    def test_single_line_view(self):
+        start_position = cpp_style.Position(row=1, column=1)
+        end_position = cpp_style.Position(row=3, column=1)
+        single_line_view = cpp_style.SingleLineView(['0', 'abcde', 'fgh', 'i'], start_position, end_position)
+        self.assertEquals(single_line_view.single_line, 'bcde fgh i')
+        self.assertEquals(single_line_view.convert_column_to_row(0), 1)
+        self.assertEquals(single_line_view.convert_column_to_row(4), 1)
+        self.assertEquals(single_line_view.convert_column_to_row(5), 2)
+        self.assertEquals(single_line_view.convert_column_to_row(8), 2)
+        self.assertEquals(single_line_view.convert_column_to_row(9), 3)
+        self.assertEquals(single_line_view.convert_column_to_row(100), 3)
+
+        start_position = cpp_style.Position(row=0, column=3)
+        end_position = cpp_style.Position(row=0, column=4)
+        single_line_view = cpp_style.SingleLineView(['abcdef'], start_position, end_position)
+        self.assertEquals(single_line_view.single_line, 'd')
+
+    def test_create_skeleton_parameters(self):
+        self.assertEquals(cpp_style.create_skeleton_parameters(''), '')
+        self.assertEquals(cpp_style.create_skeleton_parameters(' '), ' ')
+        self.assertEquals(cpp_style.create_skeleton_parameters('long'), 'long,')
+        self.assertEquals(cpp_style.create_skeleton_parameters('const unsigned long int'), '                    int,')
+        self.assertEquals(cpp_style.create_skeleton_parameters('long int*'), '     int ,')
+        self.assertEquals(cpp_style.create_skeleton_parameters('PassRefPtr<Foo> a'), 'PassRefPtr      a,')
+        self.assertEquals(cpp_style.create_skeleton_parameters(
+                'ComplexTemplate<NestedTemplate1<MyClass1, MyClass2>, NestedTemplate1<MyClass1, MyClass2> > param, int second'),
+                          'ComplexTemplate                                                                            param, int second,')
+        self.assertEquals(cpp_style.create_skeleton_parameters('int = 0, Namespace::Type& a'), 'int    ,            Type  a,')
+        # Create skeleton parameters is a bit too aggressive with function variables, but
+        # it allows for parsing other parameters and declarations like this are rare.
+        self.assertEquals(cpp_style.create_skeleton_parameters('void (*fn)(int a, int b), Namespace::Type& a'),
+                          'void                    ,            Type  a,')
+
+        # This doesn't look like functions declarations but the simplifications help to eliminate false positives.
+        self.assertEquals(cpp_style.create_skeleton_parameters('b{d}'), 'b   ,')
+
+    def test_find_parameter_name_index(self):
+        self.assertEquals(cpp_style.find_parameter_name_index(' int a '), 5)
+        self.assertEquals(cpp_style.find_parameter_name_index(' PassRefPtr     '), 16)
+        self.assertEquals(cpp_style.find_parameter_name_index('double'), 6)
+
+    def test_parameter_list(self):
+        elided_lines = ['int blah(PassRefPtr<MyClass> paramName,',
+                        'const Other1Class& foo,',
+                        'const ComplexTemplate<Class1, NestedTemplate<P1, P2> >* const * param = new ComplexTemplate<Class1, NestedTemplate<P1, P2> >(34, 42),',
+                        'int* myCount = 0);']
+        start_position = cpp_style.Position(row=0, column=8)
+        end_position = cpp_style.Position(row=3, column=16)
+
+        expected_parameters = ({'type': 'PassRefPtr<MyClass>', 'name': 'paramName', 'row': 0},
+                               {'type': 'const Other1Class&', 'name': 'foo', 'row': 1},
+                               {'type': 'const ComplexTemplate<Class1, NestedTemplate<P1, P2> >* const *', 'name': 'param', 'row': 2},
+                               {'type': 'int*', 'name': 'myCount', 'row': 3})
+        index = 0
+        for parameter in cpp_style.parameter_list(elided_lines, start_position, end_position):
+            expected_parameter = expected_parameters[index]
+            self.assertEquals(parameter.type, expected_parameter['type'])
+            self.assertEquals(parameter.name, expected_parameter['name'])
+            self.assertEquals(parameter.row, expected_parameter['row'])
+            index += 1
+        self.assertEquals(index, len(expected_parameters))
+
+    def test_check_parameter_against_text(self):
+        error_collector = ErrorCollector(self.assert_)
+        parameter = cpp_style.Parameter('FooF ooF', 4, 1)
+        self.assertFalse(cpp_style._check_parameter_name_against_text(parameter, 'FooF', error_collector))
+        self.assertEquals(error_collector.results(),
+                          'The parameter name "ooF" adds no information, so it should be removed.  [readability/parameter_name] [5]')
+
+class CppStyleTestBase(unittest.TestCase):
+    """Provides some useful helper functions for cpp_style tests.
+
+    Attributes:
+      min_confidence: An integer that is the current minimum confidence
+                      level for the tests.
+
+    """
+
+    # FIXME: Refactor the unit tests so the confidence level is passed
+    #        explicitly, just like it is in the real code.
+    min_confidence = 1;
+
+    # Helper function to avoid needing to explicitly pass confidence
+    # in all the unit test calls to cpp_style.process_file_data().
+    def process_file_data(self, filename, file_extension, lines, error, unit_test_config={}):
+        """Call cpp_style.process_file_data() with the min_confidence."""
+        return cpp_style.process_file_data(filename, file_extension, lines,
+                                           error, self.min_confidence, unit_test_config)
+
+    def perform_lint(self, code, filename, basic_error_rules, unit_test_config={}, lines_to_check=None):
+        error_collector = ErrorCollector(self.assert_, FilterConfiguration(basic_error_rules), lines_to_check)
+        lines = code.split('\n')
+        extension = filename.split('.')[1]
+        self.process_file_data(filename, extension, lines, error_collector, unit_test_config)
+        return error_collector.results()
+
+    # Perform lint on single line of input and return the error message.
+    def perform_single_line_lint(self, code, filename):
+        basic_error_rules = ('-build/header_guard',
+                             '-legal/copyright',
+                             '-readability/fn_size',
+                             '-readability/parameter_name',
+                             '-readability/pass_ptr',
+                             '-whitespace/ending_newline')
+        return self.perform_lint(code, filename, basic_error_rules)
+
+    # Perform lint over multiple lines and return the error message.
+    def perform_multi_line_lint(self, code, file_extension):
+        basic_error_rules = ('-build/header_guard',
+                             '-legal/copyright',
+                             '-readability/parameter_name',
+                             '-whitespace/ending_newline')
+        return self.perform_lint(code, 'test.' + file_extension, basic_error_rules)
+
+    # Only keep some errors related to includes, namespaces and rtti.
+    def perform_language_rules_check(self, filename, code, lines_to_check=None):
+        basic_error_rules = ('-',
+                             '+build/include',
+                             '+build/include_order',
+                             '+build/namespaces',
+                             '+runtime/rtti')
+        return self.perform_lint(code, filename, basic_error_rules, lines_to_check=lines_to_check)
+
+    # Only keep function length errors.
+    def perform_function_lengths_check(self, code):
+        basic_error_rules = ('-',
+                             '+readability/fn_size')
+        return self.perform_lint(code, 'test.cpp', basic_error_rules)
+
+    # Only keep pass ptr errors.
+    def perform_pass_ptr_check(self, code):
+        basic_error_rules = ('-',
+                             '+readability/pass_ptr')
+        return self.perform_lint(code, 'test.cpp', basic_error_rules)
+
+    # Only keep leaky pattern errors.
+    def perform_leaky_pattern_check(self, code):
+        basic_error_rules = ('-',
+                             '+runtime/leaky_pattern')
+        return self.perform_lint(code, 'test.cpp', basic_error_rules)
+
+    # Only include what you use errors.
+    def perform_include_what_you_use(self, code, filename='foo.h', io=codecs):
+        basic_error_rules = ('-',
+                             '+build/include_what_you_use')
+        unit_test_config = {cpp_style.INCLUDE_IO_INJECTION_KEY: io}
+        return self.perform_lint(code, filename, basic_error_rules, unit_test_config)
+
+    # Perform lint and compare the error message with "expected_message".
+    def assert_lint(self, code, expected_message, file_name='foo.cpp'):
+        self.assertEquals(expected_message, self.perform_single_line_lint(code, file_name))
+
+    def assert_lint_one_of_many_errors_re(self, code, expected_message_re, file_name='foo.cpp'):
+        messages = self.perform_single_line_lint(code, file_name)
+        for message in messages:
+            if re.search(expected_message_re, message):
+                return
+
+        self.assertEquals(expected_message_re, messages)
+
+    def assert_multi_line_lint(self, code, expected_message, file_name='foo.h'):
+        file_extension = file_name[file_name.rfind('.') + 1:]
+        self.assertEquals(expected_message, self.perform_multi_line_lint(code, file_extension))
+
+    def assert_multi_line_lint_re(self, code, expected_message_re, file_name='foo.h'):
+        file_extension = file_name[file_name.rfind('.') + 1:]
+        message = self.perform_multi_line_lint(code, file_extension)
+        if not re.search(expected_message_re, message):
+            self.fail('Message was:\n' + message + 'Expected match to "' + expected_message_re + '"')
+
+    def assert_language_rules_check(self, file_name, code, expected_message, lines_to_check=None):
+        self.assertEquals(expected_message,
+                          self.perform_language_rules_check(file_name, code, lines_to_check))
+
+    def assert_include_what_you_use(self, code, expected_message):
+        self.assertEquals(expected_message,
+                          self.perform_include_what_you_use(code))
+
+    def assert_blank_lines_check(self, lines, start_errors, end_errors):
+        error_collector = ErrorCollector(self.assert_)
+        self.process_file_data('foo.cpp', 'cpp', lines, error_collector)
+        self.assertEquals(
+            start_errors,
+            error_collector.results().count(
+                'Blank line at the start of a code block.  Is this needed?'
+                '  [whitespace/blank_line] [2]'))
+        self.assertEquals(
+            end_errors,
+            error_collector.results().count(
+                'Blank line at the end of a code block.  Is this needed?'
+                '  [whitespace/blank_line] [3]'))
+
+    def assert_positions_equal(self, position, tuple_position):
+        """Checks if the two positions are equal.
+
+        position: a cpp_style.Position object.
+        tuple_position: a tuple (row, column) to compare against."""
+        self.assertEquals(position, cpp_style.Position(tuple_position[0], tuple_position[1]),
+                          'position %s, tuple_position %s' % (position, tuple_position))
+
+
+class FunctionDetectionTest(CppStyleTestBase):
+    def perform_function_detection(self, lines, function_information, detection_line=0):
+        clean_lines = cpp_style.CleansedLines(lines)
+        function_state = cpp_style._FunctionState(5)
+        error_collector = ErrorCollector(self.assert_)
+        cpp_style.detect_functions(clean_lines, detection_line, function_state, error_collector)
+        if not function_information:
+            self.assertEquals(function_state.in_a_function, False)
+            return
+        self.assertEquals(function_state.in_a_function, True)
+        self.assertEquals(function_state.current_function, function_information['name'] + '()')
+        self.assertEquals(function_state.modifiers_and_return_type(), function_information['modifiers_and_return_type'])
+        self.assertEquals(function_state.is_pure, function_information['is_pure'])
+        self.assertEquals(function_state.is_declaration, function_information['is_declaration'])
+        self.assert_positions_equal(function_state.function_name_start_position, function_information['function_name_start_position'])
+        self.assert_positions_equal(function_state.parameter_start_position, function_information['parameter_start_position'])
+        self.assert_positions_equal(function_state.parameter_end_position, function_information['parameter_end_position'])
+        self.assert_positions_equal(function_state.body_start_position, function_information['body_start_position'])
+        self.assert_positions_equal(function_state.end_position, function_information['end_position'])
+        expected_parameters = function_information.get('parameter_list')
+        if expected_parameters:
+            actual_parameters = function_state.parameter_list()
+            self.assertEquals(len(actual_parameters), len(expected_parameters))
+            for index in range(len(expected_parameters)):
+                actual_parameter = actual_parameters[index]
+                expected_parameter = expected_parameters[index]
+                self.assertEquals(actual_parameter.type, expected_parameter['type'])
+                self.assertEquals(actual_parameter.name, expected_parameter['name'])
+                self.assertEquals(actual_parameter.row, expected_parameter['row'])
+
+    def test_basic_function_detection(self):
+        self.perform_function_detection(
+            ['void theTestFunctionName(int) {',
+             '}'],
+            {'name': 'theTestFunctionName',
+             'modifiers_and_return_type': 'void',
+             'function_name_start_position': (0, 5),
+             'parameter_start_position': (0, 24),
+             'parameter_end_position': (0, 29),
+             'body_start_position': (0, 30),
+             'end_position': (1, 1),
+             'is_pure': False,
+             'is_declaration': False})
+
+    def test_function_declaration_detection(self):
+        self.perform_function_detection(
+            ['void aFunctionName(int);'],
+            {'name': 'aFunctionName',
+             'modifiers_and_return_type': 'void',
+             'function_name_start_position': (0, 5),
+             'parameter_start_position': (0, 18),
+             'parameter_end_position': (0, 23),
+             'body_start_position': (0, 23),
+             'end_position': (0, 24),
+             'is_pure': False,
+             'is_declaration': True})
+
+        self.perform_function_detection(
+            ['CheckedInt<T> operator /(const CheckedInt<T> &lhs, const CheckedInt<T> &rhs);'],
+            {'name': 'operator /',
+             'modifiers_and_return_type': 'CheckedInt<T>',
+             'function_name_start_position': (0, 14),
+             'parameter_start_position': (0, 24),
+             'parameter_end_position': (0, 76),
+             'body_start_position': (0, 76),
+             'end_position': (0, 77),
+             'is_pure': False,
+             'is_declaration': True})
+
+        self.perform_function_detection(
+            ['CheckedInt<T> operator -(const CheckedInt<T> &lhs, const CheckedInt<T> &rhs);'],
+            {'name': 'operator -',
+             'modifiers_and_return_type': 'CheckedInt<T>',
+             'function_name_start_position': (0, 14),
+             'parameter_start_position': (0, 24),
+             'parameter_end_position': (0, 76),
+             'body_start_position': (0, 76),
+             'end_position': (0, 77),
+             'is_pure': False,
+             'is_declaration': True})
+
+        self.perform_function_detection(
+            ['CheckedInt<T> operator !=(const CheckedInt<T> &lhs, const CheckedInt<T> &rhs);'],
+            {'name': 'operator !=',
+             'modifiers_and_return_type': 'CheckedInt<T>',
+             'function_name_start_position': (0, 14),
+             'parameter_start_position': (0, 25),
+             'parameter_end_position': (0, 77),
+             'body_start_position': (0, 77),
+             'end_position': (0, 78),
+             'is_pure': False,
+             'is_declaration': True})
+
+        self.perform_function_detection(
+            ['CheckedInt<T> operator +(const CheckedInt<T> &lhs, const CheckedInt<T> &rhs);'],
+            {'name': 'operator +',
+             'modifiers_and_return_type': 'CheckedInt<T>',
+             'function_name_start_position': (0, 14),
+             'parameter_start_position': (0, 24),
+             'parameter_end_position': (0, 76),
+             'body_start_position': (0, 76),
+             'end_position': (0, 77),
+             'is_pure': False,
+             'is_declaration': True})
+
+    def test_pure_function_detection(self):
+        self.perform_function_detection(
+            ['virtual void theTestFunctionName(int = 0);'],
+            {'name': 'theTestFunctionName',
+             'modifiers_and_return_type': 'virtual void',
+             'function_name_start_position': (0, 13),
+             'parameter_start_position': (0, 32),
+             'parameter_end_position': (0, 41),
+             'body_start_position': (0, 41),
+             'end_position': (0, 42),
+             'is_pure': False,
+             'is_declaration': True})
+
+        self.perform_function_detection(
+            ['virtual void theTestFunctionName(int) = 0;'],
+            {'name': 'theTestFunctionName',
+             'modifiers_and_return_type': 'virtual void',
+             'function_name_start_position': (0, 13),
+             'parameter_start_position': (0, 32),
+             'parameter_end_position': (0, 37),
+             'body_start_position': (0, 41),
+             'end_position': (0, 42),
+             'is_pure': True,
+             'is_declaration': True})
+
+        # Hopefully, no one writes code like this but it is a tricky case.
+        self.perform_function_detection(
+            ['virtual void theTestFunctionName(int)',
+             ' = ',
+             ' 0 ;'],
+            {'name': 'theTestFunctionName',
+             'modifiers_and_return_type': 'virtual void',
+             'function_name_start_position': (0, 13),
+             'parameter_start_position': (0, 32),
+             'parameter_end_position': (0, 37),
+             'body_start_position': (2, 3),
+             'end_position': (2, 4),
+             'is_pure': True,
+             'is_declaration': True})
+
+    def test_ignore_macros(self):
+        self.perform_function_detection(['void aFunctionName(int); \\'], None)
+
+    def test_non_functions(self):
+        # This case exposed an error because the open brace was in quotes.
+        self.perform_function_detection(
+            ['asm(',
+             '    "stmdb sp!, {r1-r3}" "\n"',
+             ');'],
+            # This isn't a function but it looks like one to our simple
+            # algorithm and that is ok.
+            {'name': 'asm',
+             'modifiers_and_return_type': '',
+             'function_name_start_position': (0, 0),
+             'parameter_start_position': (0, 3),
+             'parameter_end_position': (2, 1),
+             'body_start_position': (2, 1),
+             'end_position': (2, 2),
+             'is_pure': False,
+             'is_declaration': True})
+
+        # Simple test case with something that is not a function.
+        self.perform_function_detection(['class Stuff;'], None)
+
+    def test_parameter_list(self):
+        # A function with no arguments.
+        function_state = self.perform_function_detection(
+            ['void functionName();'],
+            {'name': 'functionName',
+             'modifiers_and_return_type': 'void',
+             'function_name_start_position': (0, 5),
+             'parameter_start_position': (0, 17),
+             'parameter_end_position': (0, 19),
+             'body_start_position': (0, 19),
+             'end_position': (0, 20),
+             'is_pure': False,
+             'is_declaration': True,
+             'parameter_list': ()})
+
+        # A function with one argument.
+        function_state = self.perform_function_detection(
+            ['void functionName(int);'],
+            {'name': 'functionName',
+             'modifiers_and_return_type': 'void',
+             'function_name_start_position': (0, 5),
+             'parameter_start_position': (0, 17),
+             'parameter_end_position': (0, 22),
+             'body_start_position': (0, 22),
+             'end_position': (0, 23),
+             'is_pure': False,
+             'is_declaration': True,
+             'parameter_list':
+                 ({'type': 'int', 'name': '', 'row': 0},)})
+
+        # A function with unsigned and short arguments
+        function_state = self.perform_function_detection(
+            ['void functionName(unsigned a, short b, long c, long long short unsigned int);'],
+            {'name': 'functionName',
+             'modifiers_and_return_type': 'void',
+             'function_name_start_position': (0, 5),
+             'parameter_start_position': (0, 17),
+             'parameter_end_position': (0, 76),
+             'body_start_position': (0, 76),
+             'end_position': (0, 77),
+             'is_pure': False,
+             'is_declaration': True,
+             'parameter_list':
+                 ({'type': 'unsigned', 'name': 'a', 'row': 0},
+                  {'type': 'short', 'name': 'b', 'row': 0},
+                  {'type': 'long', 'name': 'c', 'row': 0},
+                  {'type': 'long long short unsigned int', 'name': '', 'row': 0})})
+
+        # Some parameter type with modifiers and no parameter names.
+        function_state = self.perform_function_detection(
+            ['virtual void determineARIADropEffects(Vector<String>*&, const unsigned long int*&, const MediaPlayer::Preload, Other<Other2, Other3<P1, P2> >, int);'],
+            {'name': 'determineARIADropEffects',
+             'modifiers_and_return_type': 'virtual void',
+             'parameter_start_position': (0, 37),
+             'function_name_start_position': (0, 13),
+             'parameter_end_position': (0, 147),
+             'body_start_position': (0, 147),
+             'end_position': (0, 148),
+             'is_pure': False,
+             'is_declaration': True,
+             'parameter_list':
+                 ({'type': 'Vector<String>*&', 'name': '', 'row': 0},
+                  {'type': 'const unsigned long int*&', 'name': '', 'row': 0},
+                  {'type': 'const MediaPlayer::Preload', 'name': '', 'row': 0},
+                  {'type': 'Other<Other2, Other3<P1, P2> >', 'name': '', 'row': 0},
+                  {'type': 'int', 'name': '', 'row': 0})})
+
+        # Try parsing a function with a very complex definition.
+        function_state = self.perform_function_detection(
+            ['#define MyMacro(a) a',
+             'virtual',
+             'AnotherTemplate<Class1, Class2> aFunctionName(PassRefPtr<MyClass> paramName,',
+             'const Other1Class& foo,',
+             'const ComplexTemplate<Class1, NestedTemplate<P1, P2> >* const * param = new ComplexTemplate<Class1, NestedTemplate<P1, P2> >(34, 42),',
+             'int* myCount = 0);'],
+            {'name': 'aFunctionName',
+             'modifiers_and_return_type': 'virtual AnotherTemplate<Class1, Class2>',
+             'function_name_start_position': (2, 32),
+             'parameter_start_position': (2, 45),
+             'parameter_end_position': (5, 17),
+             'body_start_position': (5, 17),
+             'end_position': (5, 18),
+             'is_pure': False,
+             'is_declaration': True,
+             'parameter_list':
+                 ({'type': 'PassRefPtr<MyClass>', 'name': 'paramName', 'row': 2},
+                  {'type': 'const Other1Class&', 'name': 'foo', 'row': 3},
+                  {'type': 'const ComplexTemplate<Class1, NestedTemplate<P1, P2> >* const *', 'name': 'param', 'row': 4},
+                  {'type': 'int*', 'name': 'myCount', 'row': 5})},
+            detection_line=2)
+
+
+class CppStyleTest(CppStyleTestBase):
+
+    def test_asm_lines_ignored(self):
+        self.assert_lint(
+            '__asm mov [registration], eax',
+            '')
+
+    # Test get line width.
+    def test_get_line_width(self):
+        self.assertEquals(0, cpp_style.get_line_width(''))
+        self.assertEquals(10, cpp_style.get_line_width(u'x' * 10))
+        self.assertEquals(16, cpp_style.get_line_width(u'都|道|府|県|支庁'))
+
+    def test_find_next_multi_line_comment_start(self):
+        self.assertEquals(1, cpp_style.find_next_multi_line_comment_start([''], 0))
+
+        lines = ['a', 'b', '/* c']
+        self.assertEquals(2, cpp_style.find_next_multi_line_comment_start(lines, 0))
+
+        lines = ['char a[] = "/*";']  # not recognized as comment.
+        self.assertEquals(1, cpp_style.find_next_multi_line_comment_start(lines, 0))
+
+    def test_find_next_multi_line_comment_end(self):
+        self.assertEquals(1, cpp_style.find_next_multi_line_comment_end([''], 0))
+        lines = ['a', 'b', ' c */']
+        self.assertEquals(2, cpp_style.find_next_multi_line_comment_end(lines, 0))
+
+    def test_remove_multi_line_comments_from_range(self):
+        lines = ['a', '  /* comment ', ' * still comment', ' comment */   ', 'b']
+        cpp_style.remove_multi_line_comments_from_range(lines, 1, 4)
+        self.assertEquals(['a', '// dummy', '// dummy', '// dummy', 'b'], lines)
+
+    def test_position(self):
+        position = cpp_style.Position(3, 4)
+        self.assert_positions_equal(position, (3, 4))
+        self.assertEquals(position.row, 3)
+        self.assertTrue(position > cpp_style.Position(position.row - 1, position.column + 1))
+        self.assertTrue(position > cpp_style.Position(position.row, position.column - 1))
+        self.assertTrue(position < cpp_style.Position(position.row, position.column + 1))
+        self.assertTrue(position < cpp_style.Position(position.row + 1, position.column - 1))
+        self.assertEquals(position.__str__(), '(3, 4)')
+
+    def test_rfind_in_lines(self):
+        not_found_position = cpp_style.Position(10, 11)
+        start_position = cpp_style.Position(2, 2)
+        lines = ['ab', 'ace', 'test']
+        self.assertEquals(not_found_position, cpp_style._rfind_in_lines('st', lines, start_position, not_found_position))
+        self.assertTrue(cpp_style.Position(1, 1) == cpp_style._rfind_in_lines('a', lines, start_position, not_found_position))
+        self.assertEquals(cpp_style.Position(2, 2), cpp_style._rfind_in_lines('(te|a)', lines, start_position, not_found_position))
+
+    def test_close_expression(self):
+        self.assertEquals(cpp_style.Position(1, -1), cpp_style.close_expression([')('], cpp_style.Position(0, 1)))
+        self.assertEquals(cpp_style.Position(1, -1), cpp_style.close_expression([') ()'], cpp_style.Position(0, 1)))
+        self.assertEquals(cpp_style.Position(0, 4), cpp_style.close_expression([')[)]'], cpp_style.Position(0, 1)))
+        self.assertEquals(cpp_style.Position(0, 5), cpp_style.close_expression(['}{}{}'], cpp_style.Position(0, 3)))
+        self.assertEquals(cpp_style.Position(1, 1), cpp_style.close_expression(['}{}{', '}'], cpp_style.Position(0, 3)))
+        self.assertEquals(cpp_style.Position(2, -1), cpp_style.close_expression(['][][', ' '], cpp_style.Position(0, 3)))
+
+    def test_spaces_at_end_of_line(self):
+        self.assert_lint(
+            '// Hello there ',
+            'Line ends in whitespace.  Consider deleting these extra spaces.'
+            '  [whitespace/end_of_line] [4]')
+
+    # Test C-style cast cases.
+    def test_cstyle_cast(self):
+        self.assert_lint(
+            'int a = (int)1.0;',
+            'Using C-style cast.  Use static_cast<int>(...) instead'
+            '  [readability/casting] [4]')
+        self.assert_lint(
+            'int *a = (int *)DEFINED_VALUE;',
+            'Using C-style cast.  Use reinterpret_cast<int *>(...) instead'
+            '  [readability/casting] [4]', 'foo.c')
+        self.assert_lint(
+            'uint16 a = (uint16)1.0;',
+            'Using C-style cast.  Use static_cast<uint16>(...) instead'
+            '  [readability/casting] [4]')
+        self.assert_lint(
+            'int32 a = (int32)1.0;',
+            'Using C-style cast.  Use static_cast<int32>(...) instead'
+            '  [readability/casting] [4]')
+        self.assert_lint(
+            'uint64 a = (uint64)1.0;',
+            'Using C-style cast.  Use static_cast<uint64>(...) instead'
+            '  [readability/casting] [4]')
+
+    # Test taking address of casts (runtime/casting)
+    def test_runtime_casting(self):
+        self.assert_lint(
+            'int* x = &static_cast<int*>(foo);',
+            'Are you taking an address of a cast?  '
+            'This is dangerous: could be a temp var.  '
+            'Take the address before doing the cast, rather than after'
+            '  [runtime/casting] [4]')
+
+        self.assert_lint(
+            'int* x = &dynamic_cast<int *>(foo);',
+            ['Are you taking an address of a cast?  '
+             'This is dangerous: could be a temp var.  '
+             'Take the address before doing the cast, rather than after'
+             '  [runtime/casting] [4]',
+             'Do not use dynamic_cast<>.  If you need to cast within a class '
+             'hierarchy, use static_cast<> to upcast.  Google doesn\'t support '
+             'RTTI.  [runtime/rtti] [5]'])
+
+        self.assert_lint(
+            'int* x = &reinterpret_cast<int *>(foo);',
+            'Are you taking an address of a cast?  '
+            'This is dangerous: could be a temp var.  '
+            'Take the address before doing the cast, rather than after'
+            '  [runtime/casting] [4]')
+
+        # It's OK to cast an address.
+        self.assert_lint(
+            'int* x = reinterpret_cast<int *>(&foo);',
+            '')
+
+    def test_runtime_selfinit(self):
+        self.assert_lint(
+            'Foo::Foo(Bar r, Bel l) : r_(r_), l_(l_) { }',
+            'You seem to be initializing a member variable with itself.'
+            '  [runtime/init] [4]')
+        self.assert_lint(
+            'Foo::Foo(Bar r, Bel l) : r_(r), l_(l) { }',
+            '')
+        self.assert_lint(
+            'Foo::Foo(Bar r) : r_(r), l_(r_), ll_(l_) { }',
+            '')
+
+    def test_runtime_rtti(self):
+        statement = 'int* x = dynamic_cast<int*>(&foo);'
+        error_message = (
+            'Do not use dynamic_cast<>.  If you need to cast within a class '
+            'hierarchy, use static_cast<> to upcast.  Google doesn\'t support '
+            'RTTI.  [runtime/rtti] [5]')
+        # dynamic_cast is disallowed in most files.
+        self.assert_language_rules_check('foo.cpp', statement, error_message)
+        self.assert_language_rules_check('foo.h', statement, error_message)
+
+    # Test for static_cast readability.
+    def test_static_cast_readability(self):
+        self.assert_lint(
+            'Text* x = static_cast<Text*>(foo);',
+            'Consider using toText helper function in WebCore/dom/Text.h '
+            'instead of static_cast<Text*>'
+            '  [readability/check] [4]')
+
+    # We cannot test this functionality because of difference of
+    # function definitions.  Anyway, we may never enable this.
+    #
+    # # Test for unnamed arguments in a method.
+    # def test_check_for_unnamed_params(self):
+    #   message = ('All parameters should be named in a function'
+    #              '  [readability/function] [3]')
+    #   self.assert_lint('virtual void A(int*) const;', message)
+    #   self.assert_lint('virtual void B(void (*fn)(int*));', message)
+    #   self.assert_lint('virtual void C(int*);', message)
+    #   self.assert_lint('void *(*f)(void *) = x;', message)
+    #   self.assert_lint('void Method(char*) {', message)
+    #   self.assert_lint('void Method(char*);', message)
+    #   self.assert_lint('void Method(char* /*x*/);', message)
+    #   self.assert_lint('typedef void (*Method)(int32);', message)
+    #   self.assert_lint('static void operator delete[](void*) throw();', message)
+    #
+    #   self.assert_lint('virtual void D(int* p);', '')
+    #   self.assert_lint('void operator delete(void* x) throw();', '')
+    #   self.assert_lint('void Method(char* x)\n{', '')
+    #   self.assert_lint('void Method(char* /*x*/)\n{', '')
+    #   self.assert_lint('void Method(char* x);', '')
+    #   self.assert_lint('typedef void (*Method)(int32 x);', '')
+    #   self.assert_lint('static void operator delete[](void* x) throw();', '')
+    #   self.assert_lint('static void operator delete[](void* /*x*/) throw();', '')
+    #
+    #   # This one should technically warn, but doesn't because the function
+    #   # pointer is confusing.
+    #   self.assert_lint('virtual void E(void (*fn)(int* p));', '')
+
+    # Test deprecated casts such as int(d)
+    def test_deprecated_cast(self):
+        self.assert_lint(
+            'int a = int(2.2);',
+            'Using deprecated casting style.  '
+            'Use static_cast<int>(...) instead'
+            '  [readability/casting] [4]')
+        # Checks for false positives...
+        self.assert_lint(
+            'int a = int(); // Constructor, o.k.',
+            '')
+        self.assert_lint(
+            'X::X() : a(int()) { } // default Constructor, o.k.',
+            '')
+        self.assert_lint(
+            'operator bool(); // Conversion operator, o.k.',
+            '')
+
+    # The second parameter to a gMock method definition is a function signature
+    # that often looks like a bad cast but should not picked up by lint.
+    def test_mock_method(self):
+        self.assert_lint(
+            'MOCK_METHOD0(method, int());',
+            '')
+        self.assert_lint(
+            'MOCK_CONST_METHOD1(method, float(string));',
+            '')
+        self.assert_lint(
+            'MOCK_CONST_METHOD2_T(method, double(float, float));',
+            '')
+
+    # Test sizeof(type) cases.
+    def test_sizeof_type(self):
+        self.assert_lint(
+            'sizeof(int);',
+            'Using sizeof(type).  Use sizeof(varname) instead if possible'
+            '  [runtime/sizeof] [1]')
+        self.assert_lint(
+            'sizeof(int *);',
+            'Using sizeof(type).  Use sizeof(varname) instead if possible'
+            '  [runtime/sizeof] [1]')
+
+    # Test typedef cases.  There was a bug that cpp_style misidentified
+    # typedef for pointer to function as C-style cast and produced
+    # false-positive error messages.
+    def test_typedef_for_pointer_to_function(self):
+        self.assert_lint(
+            'typedef void (*Func)(int x);',
+            '')
+        self.assert_lint(
+            'typedef void (*Func)(int *x);',
+            '')
+        self.assert_lint(
+            'typedef void Func(int x);',
+            '')
+        self.assert_lint(
+            'typedef void Func(int *x);',
+            '')
+
+    def test_include_what_you_use_no_implementation_files(self):
+        code = 'std::vector<int> foo;'
+        self.assertEquals('Add #include <vector> for vector<>'
+                          '  [build/include_what_you_use] [4]',
+                          self.perform_include_what_you_use(code, 'foo.h'))
+        self.assertEquals('',
+                          self.perform_include_what_you_use(code, 'foo.cpp'))
+
+    def test_include_what_you_use(self):
+        self.assert_include_what_you_use(
+            '''#include <vector>
+               std::vector<int> foo;
+            ''',
+            '')
+        self.assert_include_what_you_use(
+            '''#include <map>
+               std::pair<int,int> foo;
+            ''',
+            '')
+        self.assert_include_what_you_use(
+            '''#include <multimap>
+               std::pair<int,int> foo;
+            ''',
+            '')
+        self.assert_include_what_you_use(
+            '''#include <hash_map>
+               std::pair<int,int> foo;
+            ''',
+            '')
+        self.assert_include_what_you_use(
+            '''#include <utility>
+               std::pair<int,int> foo;
+            ''',
+            '')
+        self.assert_include_what_you_use(
+            '''#include <vector>
+               DECLARE_string(foobar);
+            ''',
+            '')
+        self.assert_include_what_you_use(
+            '''#include <vector>
+               DEFINE_string(foobar, "", "");
+            ''',
+            '')
+        self.assert_include_what_you_use(
+            '''#include <vector>
+               std::pair<int,int> foo;
+            ''',
+            'Add #include <utility> for pair<>'
+            '  [build/include_what_you_use] [4]')
+        self.assert_include_what_you_use(
+            '''#include "base/foobar.h"
+               std::vector<int> foo;
+            ''',
+            'Add #include <vector> for vector<>'
+            '  [build/include_what_you_use] [4]')
+        self.assert_include_what_you_use(
+            '''#include <vector>
+               std::set<int> foo;
+            ''',
+            'Add #include <set> for set<>'
+            '  [build/include_what_you_use] [4]')
+        self.assert_include_what_you_use(
+            '''#include "base/foobar.h"
+              hash_map<int, int> foobar;
+            ''',
+            'Add #include <hash_map> for hash_map<>'
+            '  [build/include_what_you_use] [4]')
+        self.assert_include_what_you_use(
+            '''#include "base/foobar.h"
+               bool foobar = std::less<int>(0,1);
+            ''',
+            'Add #include <functional> for less<>'
+            '  [build/include_what_you_use] [4]')
+        self.assert_include_what_you_use(
+            '''#include "base/foobar.h"
+               bool foobar = min<int>(0,1);
+            ''',
+            'Add #include <algorithm> for min  [build/include_what_you_use] [4]')
+        self.assert_include_what_you_use(
+            'void a(const string &foobar);',
+            'Add #include <string> for string  [build/include_what_you_use] [4]')
+        self.assert_include_what_you_use(
+            '''#include "base/foobar.h"
+               bool foobar = swap(0,1);
+            ''',
+            'Add #include <algorithm> for swap  [build/include_what_you_use] [4]')
+        self.assert_include_what_you_use(
+            '''#include "base/foobar.h"
+               bool foobar = transform(a.begin(), a.end(), b.start(), Foo);
+            ''',
+            'Add #include <algorithm> for transform  '
+            '[build/include_what_you_use] [4]')
+        self.assert_include_what_you_use(
+            '''#include "base/foobar.h"
+               bool foobar = min_element(a.begin(), a.end());
+            ''',
+            'Add #include <algorithm> for min_element  '
+            '[build/include_what_you_use] [4]')
+        self.assert_include_what_you_use(
+            '''foo->swap(0,1);
+               foo.swap(0,1);
+            ''',
+            '')
+        self.assert_include_what_you_use(
+            '''#include <string>
+               void a(const std::multimap<int,string> &foobar);
+            ''',
+            'Add #include <map> for multimap<>'
+            '  [build/include_what_you_use] [4]')
+        self.assert_include_what_you_use(
+            '''#include <queue>
+               void a(const std::priority_queue<int> &foobar);
+            ''',
+            '')
+        self.assert_include_what_you_use(
+             '''#include "base/basictypes.h"
+                #include "base/port.h"
+                #include <assert.h>
+                #include <string>
+                #include <vector>
+                vector<string> hajoa;''', '')
+        self.assert_include_what_you_use(
+            '''#include <string>
+               int i = numeric_limits<int>::max()
+            ''',
+            'Add #include <limits> for numeric_limits<>'
+            '  [build/include_what_you_use] [4]')
+        self.assert_include_what_you_use(
+            '''#include <limits>
+               int i = numeric_limits<int>::max()
+            ''',
+            '')
+
+        # Test the UpdateIncludeState code path.
+        mock_header_contents = ['#include "blah/foo.h"', '#include "blah/bar.h"']
+        message = self.perform_include_what_you_use(
+            '#include "config.h"\n'
+            '#include "blah/a.h"\n',
+            filename='blah/a.cpp',
+            io=MockIo(mock_header_contents))
+        self.assertEquals(message, '')
+
+        mock_header_contents = ['#include <set>']
+        message = self.perform_include_what_you_use(
+            '''#include "config.h"
+               #include "blah/a.h"
+
+               std::set<int> foo;''',
+            filename='blah/a.cpp',
+            io=MockIo(mock_header_contents))
+        self.assertEquals(message, '')
+
+        # If there's just a .cpp and the header can't be found then it's ok.
+        message = self.perform_include_what_you_use(
+            '''#include "config.h"
+               #include "blah/a.h"
+
+               std::set<int> foo;''',
+            filename='blah/a.cpp')
+        self.assertEquals(message, '')
+
+        # Make sure we find the headers with relative paths.
+        mock_header_contents = ['']
+        message = self.perform_include_what_you_use(
+            '''#include "config.h"
+               #include "%s%sa.h"
+
+               std::set<int> foo;''' % (os.path.basename(os.getcwd()), os.path.sep),
+            filename='a.cpp',
+            io=MockIo(mock_header_contents))
+        self.assertEquals(message, 'Add #include <set> for set<>  '
+                                   '[build/include_what_you_use] [4]')
+
+    def test_files_belong_to_same_module(self):
+        f = cpp_style.files_belong_to_same_module
+        self.assertEquals((True, ''), f('a.cpp', 'a.h'))
+        self.assertEquals((True, ''), f('base/google.cpp', 'base/google.h'))
+        self.assertEquals((True, ''), f('base/google_test.cpp', 'base/google.h'))
+        self.assertEquals((True, ''),
+                          f('base/google_unittest.cpp', 'base/google.h'))
+        self.assertEquals((True, ''),
+                          f('base/internal/google_unittest.cpp',
+                            'base/public/google.h'))
+        self.assertEquals((True, 'xxx/yyy/'),
+                          f('xxx/yyy/base/internal/google_unittest.cpp',
+                            'base/public/google.h'))
+        self.assertEquals((True, 'xxx/yyy/'),
+                          f('xxx/yyy/base/google_unittest.cpp',
+                            'base/public/google.h'))
+        self.assertEquals((True, ''),
+                          f('base/google_unittest.cpp', 'base/google-inl.h'))
+        self.assertEquals((True, '/home/build/google3/'),
+                          f('/home/build/google3/base/google.cpp', 'base/google.h'))
+
+        self.assertEquals((False, ''),
+                          f('/home/build/google3/base/google.cpp', 'basu/google.h'))
+        self.assertEquals((False, ''), f('a.cpp', 'b.h'))
+
+    def test_cleanse_line(self):
+        self.assertEquals('int foo = 0;  ',
+                          cpp_style.cleanse_comments('int foo = 0;  // danger!'))
+        self.assertEquals('int o = 0;',
+                          cpp_style.cleanse_comments('int /* foo */ o = 0;'))
+        self.assertEquals('foo(int a, int b);',
+                          cpp_style.cleanse_comments('foo(int a /* abc */, int b);'))
+        self.assertEqual('f(a, b);',
+                         cpp_style.cleanse_comments('f(a, /* name */ b);'))
+        self.assertEqual('f(a, b);',
+                         cpp_style.cleanse_comments('f(a /* name */, b);'))
+        self.assertEqual('f(a, b);',
+                         cpp_style.cleanse_comments('f(a, /* name */b);'))
+
+    def test_multi_line_comments(self):
+        # missing explicit is bad
+        self.assert_multi_line_lint(
+            r'''int a = 0;
+                /* multi-liner
+                class Foo {
+                Foo(int f);  // should cause a lint warning in code
+                }
+            */ ''',
+        '')
+        self.assert_multi_line_lint(
+            '''\
+            /* int a = 0; multi-liner
+            static const int b = 0;''',
+            ['Could not find end of multi-line comment'
+             '  [readability/multiline_comment] [5]',
+             'Complex multi-line /*...*/-style comment found. '
+             'Lint may give bogus warnings.  Consider replacing these with '
+             '//-style comments, with #if 0...#endif, or with more clearly '
+             'structured multi-line comments.  [readability/multiline_comment] [5]'])
+        self.assert_multi_line_lint(r'''    /* multi-line comment''',
+                                    ['Could not find end of multi-line comment'
+                                     '  [readability/multiline_comment] [5]',
+                                     'Complex multi-line /*...*/-style comment found. '
+                                     'Lint may give bogus warnings.  Consider replacing these with '
+                                     '//-style comments, with #if 0...#endif, or with more clearly '
+                                     'structured multi-line comments.  [readability/multiline_comment] [5]'])
+        self.assert_multi_line_lint(r'''    // /* comment, but not multi-line''', '')
+
+    def test_multiline_strings(self):
+        multiline_string_error_message = (
+            'Multi-line string ("...") found.  This lint script doesn\'t '
+            'do well with such strings, and may give bogus warnings.  They\'re '
+            'ugly and unnecessary, and you should use concatenation instead".'
+            '  [readability/multiline_string] [5]')
+
+        file_path = 'mydir/foo.cpp'
+
+        error_collector = ErrorCollector(self.assert_)
+        self.process_file_data(file_path, 'cpp',
+                               ['const char* str = "This is a\\',
+                                ' multiline string.";'],
+                               error_collector)
+        self.assertEquals(
+            2,  # One per line.
+            error_collector.result_list().count(multiline_string_error_message))
+
+    # Test non-explicit single-argument constructors
+    def test_explicit_single_argument_constructors(self):
+        # missing explicit is bad
+        self.assert_multi_line_lint(
+            '''\
+            class Foo {
+                Foo(int f);
+            };''',
+            'Single-argument constructors should be marked explicit.'
+            '  [runtime/explicit] [5]')
+        # missing explicit is bad, even with whitespace
+        self.assert_multi_line_lint(
+            '''\
+            class Foo {
+                Foo (int f);
+            };''',
+            ['Extra space before ( in function call  [whitespace/parens] [4]',
+             'Single-argument constructors should be marked explicit.'
+             '  [runtime/explicit] [5]'])
+        # missing explicit, with distracting comment, is still bad
+        self.assert_multi_line_lint(
+            '''\
+            class Foo {
+                Foo(int f); // simpler than Foo(blargh, blarg)
+            };''',
+            'Single-argument constructors should be marked explicit.'
+            '  [runtime/explicit] [5]')
+        # missing explicit, with qualified classname
+        self.assert_multi_line_lint(
+            '''\
+            class Qualifier::AnotherOne::Foo {
+                Foo(int f);
+            };''',
+            'Single-argument constructors should be marked explicit.'
+            '  [runtime/explicit] [5]')
+        # structs are caught as well.
+        self.assert_multi_line_lint(
+            '''\
+            struct Foo {
+                Foo(int f);
+            };''',
+            'Single-argument constructors should be marked explicit.'
+            '  [runtime/explicit] [5]')
+        # Templatized classes are caught as well.
+        self.assert_multi_line_lint(
+            '''\
+            template<typename T> class Foo {
+                Foo(int f);
+            };''',
+            'Single-argument constructors should be marked explicit.'
+            '  [runtime/explicit] [5]')
+        # proper style is okay
+        self.assert_multi_line_lint(
+            '''\
+            class Foo {
+                explicit Foo(int f);
+            };''',
+            '')
+        # two argument constructor is okay
+        self.assert_multi_line_lint(
+            '''\
+            class Foo {
+                Foo(int f, int b);
+            };''',
+            '')
+        # two argument constructor, across two lines, is okay
+        self.assert_multi_line_lint(
+            '''\
+            class Foo {
+                Foo(int f,
+                    int b);
+            };''',
+            '')
+        # non-constructor (but similar name), is okay
+        self.assert_multi_line_lint(
+            '''\
+            class Foo {
+                aFoo(int f);
+            };''',
+            '')
+        # constructor with void argument is okay
+        self.assert_multi_line_lint(
+            '''\
+            class Foo {
+                Foo(void);
+            };''',
+            '')
+        # single argument method is okay
+        self.assert_multi_line_lint(
+            '''\
+            class Foo {
+                Bar(int b);
+            };''',
+            '')
+        # comments should be ignored
+        self.assert_multi_line_lint(
+            '''\
+            class Foo {
+            // Foo(int f);
+            };''',
+            '')
+        # single argument function following class definition is okay
+        # (okay, it's not actually valid, but we don't want a false positive)
+        self.assert_multi_line_lint(
+            '''\
+            class Foo {
+                Foo(int f, int b);
+            };
+            Foo(int f);''',
+            '')
+        # single argument function is okay
+        self.assert_multi_line_lint(
+            '''static Foo(int f);''',
+            '')
+        # single argument copy constructor is okay.
+        self.assert_multi_line_lint(
+            '''\
+            class Foo {
+                Foo(const Foo&);
+            };''',
+            '')
+        self.assert_multi_line_lint(
+            '''\
+            class Foo {
+                Foo(Foo&);
+            };''',
+            '')
+
+    def test_slash_star_comment_on_single_line(self):
+        self.assert_multi_line_lint(
+            '''/* static */ Foo(int f);''',
+            '')
+        self.assert_multi_line_lint(
+            '''/*/ static */  Foo(int f);''',
+            '')
+        self.assert_multi_line_lint(
+            '''/*/ static Foo(int f);''',
+            'Could not find end of multi-line comment'
+            '  [readability/multiline_comment] [5]')
+        self.assert_multi_line_lint(
+            '''    /*/ static Foo(int f);''',
+            'Could not find end of multi-line comment'
+            '  [readability/multiline_comment] [5]')
+
+    # Test suspicious usage of "if" like this:
+    # if (a == b) {
+    #   DoSomething();
+    # } if (a == c) {   // Should be "else if".
+    #   DoSomething();  // This gets called twice if a == b && a == c.
+    # }
+    def test_suspicious_usage_of_if(self):
+        self.assert_lint(
+            '    if (a == b) {',
+            '')
+        self.assert_lint(
+            '    } if (a == b) {',
+            'Did you mean "else if"? If not, start a new line for "if".'
+            '  [readability/braces] [4]')
+
+    # Test suspicious usage of memset. Specifically, a 0
+    # as the final argument is almost certainly an error.
+    def test_suspicious_usage_of_memset(self):
+        # Normal use is okay.
+        self.assert_lint(
+            '    memset(buf, 0, sizeof(buf))',
+            '')
+
+        # A 0 as the final argument is almost certainly an error.
+        self.assert_lint(
+            '    memset(buf, sizeof(buf), 0)',
+            'Did you mean "memset(buf, 0, sizeof(buf))"?'
+            '  [runtime/memset] [4]')
+        self.assert_lint(
+            '    memset(buf, xsize * ysize, 0)',
+            'Did you mean "memset(buf, 0, xsize * ysize)"?'
+            '  [runtime/memset] [4]')
+
+        # There is legitimate test code that uses this form.
+        # This is okay since the second argument is a literal.
+        self.assert_lint(
+            "    memset(buf, 'y', 0)",
+            '')
+        self.assert_lint(
+            '    memset(buf, 4, 0)',
+            '')
+        self.assert_lint(
+            '    memset(buf, -1, 0)',
+            '')
+        self.assert_lint(
+            '    memset(buf, 0xF1, 0)',
+            '')
+        self.assert_lint(
+            '    memset(buf, 0xcd, 0)',
+            '')
+
+    def test_check_posix_threading(self):
+        self.assert_lint('sctime_r()', '')
+        self.assert_lint('strtok_r()', '')
+        self.assert_lint('    strtok_r(foo, ba, r)', '')
+        self.assert_lint('brand()', '')
+        self.assert_lint('_rand()', '')
+        self.assert_lint('.rand()', '')
+        self.assert_lint('>rand()', '')
+        self.assert_lint('rand()',
+                         'Consider using rand_r(...) instead of rand(...)'
+                         ' for improved thread safety.'
+                         '  [runtime/threadsafe_fn] [2]')
+        self.assert_lint('strtok()',
+                         'Consider using strtok_r(...) '
+                         'instead of strtok(...)'
+                         ' for improved thread safety.'
+                         '  [runtime/threadsafe_fn] [2]')
+
+    # Test potential format string bugs like printf(foo).
+    def test_format_strings(self):
+        self.assert_lint('printf("foo")', '')
+        self.assert_lint('printf("foo: %s", foo)', '')
+        self.assert_lint('DocidForPrintf(docid)', '')  # Should not trigger.
+        self.assert_lint(
+            'printf(foo)',
+            'Potential format string bug. Do printf("%s", foo) instead.'
+            '  [runtime/printf] [4]')
+        self.assert_lint(
+            'printf(foo.c_str())',
+            'Potential format string bug. '
+            'Do printf("%s", foo.c_str()) instead.'
+            '  [runtime/printf] [4]')
+        self.assert_lint(
+            'printf(foo->c_str())',
+            'Potential format string bug. '
+            'Do printf("%s", foo->c_str()) instead.'
+            '  [runtime/printf] [4]')
+        self.assert_lint(
+            'StringPrintf(foo)',
+            'Potential format string bug. Do StringPrintf("%s", foo) instead.'
+            ''
+            '  [runtime/printf] [4]')
+
+    # Variable-length arrays are not permitted.
+    def test_variable_length_array_detection(self):
+        errmsg = ('Do not use variable-length arrays.  Use an appropriately named '
+                  "('k' followed by CamelCase) compile-time constant for the size."
+                  '  [runtime/arrays] [1]')
+
+        self.assert_lint('int a[any_old_variable];', errmsg)
+        self.assert_lint('int doublesize[some_var * 2];', errmsg)
+        self.assert_lint('int a[afunction()];', errmsg)
+        self.assert_lint('int a[function(kMaxFooBars)];', errmsg)
+        self.assert_lint('bool aList[items_->size()];', errmsg)
+        self.assert_lint('namespace::Type buffer[len+1];', errmsg)
+
+        self.assert_lint('int a[64];', '')
+        self.assert_lint('int a[0xFF];', '')
+        self.assert_lint('int first[256], second[256];', '')
+        self.assert_lint('int arrayName[kCompileTimeConstant];', '')
+        self.assert_lint('char buf[somenamespace::kBufSize];', '')
+        self.assert_lint('int arrayName[ALL_CAPS];', '')
+        self.assert_lint('AClass array1[foo::bar::ALL_CAPS];', '')
+        self.assert_lint('int a[kMaxStrLen + 1];', '')
+        self.assert_lint('int a[sizeof(foo)];', '')
+        self.assert_lint('int a[sizeof(*foo)];', '')
+        self.assert_lint('int a[sizeof foo];', '')
+        self.assert_lint('int a[sizeof(struct Foo)];', '')
+        self.assert_lint('int a[128 - sizeof(const bar)];', '')
+        self.assert_lint('int a[(sizeof(foo) * 4)];', '')
+        self.assert_lint('int a[(arraysize(fixed_size_array)/2) << 1];', 'Missing spaces around /  [whitespace/operators] [3]')
+        self.assert_lint('delete a[some_var];', '')
+        self.assert_lint('return a[some_var];', '')
+
+    # Brace usage
+    def test_braces(self):
+        # Braces shouldn't be followed by a ; unless they're defining a struct
+        # or initializing an array
+        self.assert_lint('int a[3] = { 1, 2, 3 };', '')
+        self.assert_lint(
+            '''\
+            const int foo[] =
+                {1, 2, 3 };''',
+            '')
+        # For single line, unmatched '}' with a ';' is ignored (not enough context)
+        self.assert_multi_line_lint(
+            '''\
+            int a[3] = { 1,
+                2,
+                3 };''',
+            '')
+        self.assert_multi_line_lint(
+            '''\
+            int a[2][3] = { { 1, 2 },
+                { 3, 4 } };''',
+            '')
+        self.assert_multi_line_lint(
+            '''\
+            int a[2][3] =
+                { { 1, 2 },
+                { 3, 4 } };''',
+            '')
+
+    # CHECK/EXPECT_TRUE/EXPECT_FALSE replacements
+    def test_check_check(self):
+        self.assert_lint('CHECK(x == 42)',
+                         'Consider using CHECK_EQ instead of CHECK(a == b)'
+                         '  [readability/check] [2]')
+        self.assert_lint('CHECK(x != 42)',
+                         'Consider using CHECK_NE instead of CHECK(a != b)'
+                         '  [readability/check] [2]')
+        self.assert_lint('CHECK(x >= 42)',
+                         'Consider using CHECK_GE instead of CHECK(a >= b)'
+                         '  [readability/check] [2]')
+        self.assert_lint('CHECK(x > 42)',
+                         'Consider using CHECK_GT instead of CHECK(a > b)'
+                         '  [readability/check] [2]')
+        self.assert_lint('CHECK(x <= 42)',
+                         'Consider using CHECK_LE instead of CHECK(a <= b)'
+                         '  [readability/check] [2]')
+        self.assert_lint('CHECK(x < 42)',
+                         'Consider using CHECK_LT instead of CHECK(a < b)'
+                         '  [readability/check] [2]')
+
+        self.assert_lint('DCHECK(x == 42)',
+                         'Consider using DCHECK_EQ instead of DCHECK(a == b)'
+                         '  [readability/check] [2]')
+        self.assert_lint('DCHECK(x != 42)',
+                         'Consider using DCHECK_NE instead of DCHECK(a != b)'
+                         '  [readability/check] [2]')
+        self.assert_lint('DCHECK(x >= 42)',
+                         'Consider using DCHECK_GE instead of DCHECK(a >= b)'
+                         '  [readability/check] [2]')
+        self.assert_lint('DCHECK(x > 42)',
+                         'Consider using DCHECK_GT instead of DCHECK(a > b)'
+                         '  [readability/check] [2]')
+        self.assert_lint('DCHECK(x <= 42)',
+                         'Consider using DCHECK_LE instead of DCHECK(a <= b)'
+                         '  [readability/check] [2]')
+        self.assert_lint('DCHECK(x < 42)',
+                         'Consider using DCHECK_LT instead of DCHECK(a < b)'
+                         '  [readability/check] [2]')
+
+        self.assert_lint(
+            'EXPECT_TRUE("42" == x)',
+            'Consider using EXPECT_EQ instead of EXPECT_TRUE(a == b)'
+            '  [readability/check] [2]')
+        self.assert_lint(
+            'EXPECT_TRUE("42" != x)',
+            'Consider using EXPECT_NE instead of EXPECT_TRUE(a != b)'
+            '  [readability/check] [2]')
+        self.assert_lint(
+            'EXPECT_TRUE(+42 >= x)',
+            'Consider using EXPECT_GE instead of EXPECT_TRUE(a >= b)'
+            '  [readability/check] [2]')
+        self.assert_lint(
+            'EXPECT_TRUE_M(-42 > x)',
+            'Consider using EXPECT_GT_M instead of EXPECT_TRUE_M(a > b)'
+            '  [readability/check] [2]')
+        self.assert_lint(
+            'EXPECT_TRUE_M(42U <= x)',
+            'Consider using EXPECT_LE_M instead of EXPECT_TRUE_M(a <= b)'
+            '  [readability/check] [2]')
+        self.assert_lint(
+            'EXPECT_TRUE_M(42L < x)',
+            'Consider using EXPECT_LT_M instead of EXPECT_TRUE_M(a < b)'
+            '  [readability/check] [2]')
+
+        self.assert_lint(
+            'EXPECT_FALSE(x == 42)',
+            'Consider using EXPECT_NE instead of EXPECT_FALSE(a == b)'
+            '  [readability/check] [2]')
+        self.assert_lint(
+            'EXPECT_FALSE(x != 42)',
+            'Consider using EXPECT_EQ instead of EXPECT_FALSE(a != b)'
+            '  [readability/check] [2]')
+        self.assert_lint(
+            'EXPECT_FALSE(x >= 42)',
+            'Consider using EXPECT_LT instead of EXPECT_FALSE(a >= b)'
+            '  [readability/check] [2]')
+        self.assert_lint(
+            'ASSERT_FALSE(x > 42)',
+            'Consider using ASSERT_LE instead of ASSERT_FALSE(a > b)'
+            '  [readability/check] [2]')
+        self.assert_lint(
+            'ASSERT_FALSE(x <= 42)',
+            'Consider using ASSERT_GT instead of ASSERT_FALSE(a <= b)'
+            '  [readability/check] [2]')
+        self.assert_lint(
+            'ASSERT_FALSE_M(x < 42)',
+            'Consider using ASSERT_GE_M instead of ASSERT_FALSE_M(a < b)'
+            '  [readability/check] [2]')
+
+        self.assert_lint('CHECK(some_iterator == obj.end())', '')
+        self.assert_lint('EXPECT_TRUE(some_iterator == obj.end())', '')
+        self.assert_lint('EXPECT_FALSE(some_iterator == obj.end())', '')
+
+        self.assert_lint('CHECK(CreateTestFile(dir, (1 << 20)));', '')
+        self.assert_lint('CHECK(CreateTestFile(dir, (1 >> 20)));', '')
+
+        self.assert_lint('CHECK(x<42)',
+                         ['Missing spaces around <'
+                          '  [whitespace/operators] [3]',
+                          'Consider using CHECK_LT instead of CHECK(a < b)'
+                          '  [readability/check] [2]'])
+        self.assert_lint('CHECK(x>42)',
+                         'Consider using CHECK_GT instead of CHECK(a > b)'
+                         '  [readability/check] [2]')
+
+        self.assert_lint(
+            '    EXPECT_TRUE(42 < x) // Random comment.',
+            'Consider using EXPECT_LT instead of EXPECT_TRUE(a < b)'
+            '  [readability/check] [2]')
+        self.assert_lint(
+            'EXPECT_TRUE( 42 < x )',
+            ['Extra space after ( in function call'
+             '  [whitespace/parens] [4]',
+             'Consider using EXPECT_LT instead of EXPECT_TRUE(a < b)'
+             '  [readability/check] [2]'])
+        self.assert_lint(
+            'CHECK("foo" == "foo")',
+            'Consider using CHECK_EQ instead of CHECK(a == b)'
+            '  [readability/check] [2]')
+
+        self.assert_lint('CHECK_EQ("foo", "foo")', '')
+
+    def test_brace_at_begin_of_line(self):
+        self.assert_lint('{',
+                         'This { should be at the end of the previous line'
+                         '  [whitespace/braces] [4]')
+        self.assert_multi_line_lint(
+            '#endif\n'
+            '{\n'
+            '}\n',
+            '')
+        self.assert_multi_line_lint(
+            'if (condition) {',
+            '')
+        self.assert_multi_line_lint(
+            '    MACRO1(macroArg) {',
+            '')
+        self.assert_multi_line_lint(
+            'ACCESSOR_GETTER(MessageEventPorts) {',
+            'Place brace on its own line for function definitions.  [whitespace/braces] [4]')
+        self.assert_multi_line_lint(
+            'int foo() {',
+            'Place brace on its own line for function definitions.  [whitespace/braces] [4]')
+        self.assert_multi_line_lint(
+            'int foo() const {',
+            'Place brace on its own line for function definitions.  [whitespace/braces] [4]')
+        self.assert_multi_line_lint(
+            'int foo() const OVERRIDE {',
+            'Place brace on its own line for function definitions.  [whitespace/braces] [4]')
+        self.assert_multi_line_lint(
+            'int foo() OVERRIDE {',
+            'Place brace on its own line for function definitions.  [whitespace/braces] [4]')
+        self.assert_multi_line_lint(
+            'int foo() const\n'
+            '{\n'
+            '}\n',
+            '')
+        self.assert_multi_line_lint(
+            'int foo() OVERRIDE\n'
+            '{\n'
+            '}\n',
+            '')
+        self.assert_multi_line_lint(
+            'if (condition\n'
+            '    && condition2\n'
+            '    && condition3) {\n'
+            '}\n',
+            '')
+
+    def test_mismatching_spaces_in_parens(self):
+        self.assert_lint('if (foo ) {', 'Extra space before ) in if'
+                         '  [whitespace/parens] [5]')
+        self.assert_lint('switch ( foo) {', 'Extra space after ( in switch'
+                         '  [whitespace/parens] [5]')
+        self.assert_lint('for (foo; ba; bar ) {', 'Extra space before ) in for'
+                         '  [whitespace/parens] [5]')
+        self.assert_lint('for ((foo); (ba); (bar) ) {', 'Extra space before ) in for'
+                         '  [whitespace/parens] [5]')
+        self.assert_lint('for (; foo; bar) {', '')
+        self.assert_lint('for (; (foo); (bar)) {', '')
+        self.assert_lint('for ( ; foo; bar) {', '')
+        self.assert_lint('for ( ; (foo); (bar)) {', '')
+        self.assert_lint('for ( ; foo; bar ) {', 'Extra space before ) in for'
+                         '  [whitespace/parens] [5]')
+        self.assert_lint('for ( ; (foo); (bar) ) {', 'Extra space before ) in for'
+                         '  [whitespace/parens] [5]')
+        self.assert_lint('for (foo; bar; ) {', '')
+        self.assert_lint('for ((foo); (bar); ) {', '')
+        self.assert_lint('foreach (foo, foos ) {', 'Extra space before ) in foreach'
+                         '  [whitespace/parens] [5]')
+        self.assert_lint('foreach ( foo, foos) {', 'Extra space after ( in foreach'
+                         '  [whitespace/parens] [5]')
+        self.assert_lint('while (  foo) {', 'Extra space after ( in while'
+                         '  [whitespace/parens] [5]')
+
+    def test_spacing_for_fncall(self):
+        self.assert_lint('if (foo) {', '')
+        self.assert_lint('for (foo;bar;baz) {', '')
+        self.assert_lint('foreach (foo, foos) {', '')
+        self.assert_lint('while (foo) {', '')
+        self.assert_lint('switch (foo) {', '')
+        self.assert_lint('new (RenderArena()) RenderInline(document())', '')
+        self.assert_lint('foo( bar)', 'Extra space after ( in function call'
+                         '  [whitespace/parens] [4]')
+        self.assert_lint('foobar( \\', '')
+        self.assert_lint('foobar(     \\', '')
+        self.assert_lint('( a + b)', 'Extra space after ('
+                         '  [whitespace/parens] [2]')
+        self.assert_lint('((a+b))', '')
+        self.assert_lint('foo (foo)', 'Extra space before ( in function call'
+                         '  [whitespace/parens] [4]')
+        self.assert_lint('#elif (foo(bar))', '')
+        self.assert_lint('#elif (foo(bar) && foo(baz))', '')
+        self.assert_lint('typedef foo (*foo)(foo)', '')
+        self.assert_lint('typedef foo (*foo12bar_)(foo)', '')
+        self.assert_lint('typedef foo (Foo::*bar)(foo)', '')
+        self.assert_lint('foo (Foo::*bar)(',
+                         'Extra space before ( in function call'
+                         '  [whitespace/parens] [4]')
+        self.assert_lint('typedef foo (Foo::*bar)(', '')
+        self.assert_lint('(foo)(bar)', '')
+        self.assert_lint('Foo (*foo)(bar)', '')
+        self.assert_lint('Foo (*foo)(Bar bar,', '')
+        self.assert_lint('char (*p)[sizeof(foo)] = &foo', '')
+        self.assert_lint('char (&ref)[sizeof(foo)] = &foo', '')
+        self.assert_lint('const char32 (*table[])[6];', '')
+
+    def test_spacing_before_braces(self):
+        self.assert_lint('if (foo){', 'Missing space before {'
+                         '  [whitespace/braces] [5]')
+        self.assert_lint('for{', 'Missing space before {'
+                         '  [whitespace/braces] [5]')
+        self.assert_lint('for {', '')
+        self.assert_lint('EXPECT_DEBUG_DEATH({', '')
+
+    def test_spacing_between_braces(self):
+        self.assert_lint('    { }', '')
+        self.assert_lint('    {}', 'Missing space inside { }.  [whitespace/braces] [5]')
+        self.assert_lint('    {   }', 'Too many spaces inside { }.  [whitespace/braces] [5]')
+
+    def test_spacing_around_else(self):
+        self.assert_lint('}else {', 'Missing space before else'
+                         '  [whitespace/braces] [5]')
+        self.assert_lint('} else{', 'Missing space before {'
+                         '  [whitespace/braces] [5]')
+        self.assert_lint('} else {', '')
+        self.assert_lint('} else if', '')
+
+    def test_spacing_for_binary_ops(self):
+        self.assert_lint('if (foo<=bar) {', 'Missing spaces around <='
+                         '  [whitespace/operators] [3]')
+        self.assert_lint('if (foo<bar) {', 'Missing spaces around <'
+                         '  [whitespace/operators] [3]')
+        self.assert_lint('if (foo<bar->baz) {', 'Missing spaces around <'
+                         '  [whitespace/operators] [3]')
+        self.assert_lint('if (foo<bar->bar) {', 'Missing spaces around <'
+                         '  [whitespace/operators] [3]')
+        self.assert_lint('typedef hash_map<Foo, Bar', 'Missing spaces around <'
+                         '  [whitespace/operators] [3]')
+        self.assert_lint('typedef hash_map<FoooooType, BaaaaarType,', '')
+        self.assert_lint('a<Foo> t+=b;', 'Missing spaces around +='
+                         '  [whitespace/operators] [3]')
+        self.assert_lint('a<Foo> t-=b;', 'Missing spaces around -='
+                         '  [whitespace/operators] [3]')
+        self.assert_lint('a<Foo*> t*=b;', 'Missing spaces around *='
+                         '  [whitespace/operators] [3]')
+        self.assert_lint('a<Foo*> t/=b;', 'Missing spaces around /='
+                         '  [whitespace/operators] [3]')
+        self.assert_lint('a<Foo*> t|=b;', 'Missing spaces around |='
+                         '  [whitespace/operators] [3]')
+        self.assert_lint('a<Foo*> t&=b;', 'Missing spaces around &='
+                         '  [whitespace/operators] [3]')
+        self.assert_lint('a<Foo*> t<<=b;', 'Missing spaces around <<='
+                         '  [whitespace/operators] [3]')
+        self.assert_lint('a<Foo*> t>>=b;', 'Missing spaces around >>='
+                         '  [whitespace/operators] [3]')
+        self.assert_lint('a<Foo*> t>>=&b|c;', 'Missing spaces around >>='
+                         '  [whitespace/operators] [3]')
+        self.assert_lint('a<Foo*> t<<=*b/c;', 'Missing spaces around <<='
+                         '  [whitespace/operators] [3]')
+        self.assert_lint('a<Foo> t -= b;', '')
+        self.assert_lint('a<Foo> t += b;', '')
+        self.assert_lint('a<Foo*> t *= b;', '')
+        self.assert_lint('a<Foo*> t /= b;', '')
+        self.assert_lint('a<Foo*> t |= b;', '')
+        self.assert_lint('a<Foo*> t &= b;', '')
+        self.assert_lint('a<Foo*> t <<= b;', '')
+        self.assert_lint('a<Foo*> t >>= b;', '')
+        self.assert_lint('a<Foo*> t >>= &b|c;', 'Missing spaces around |'
+                         '  [whitespace/operators] [3]')
+        self.assert_lint('a<Foo*> t <<= *b/c;', 'Missing spaces around /'
+                         '  [whitespace/operators] [3]')
+        self.assert_lint('a<Foo*> t <<= b/c; //Test', [
+                         'Should have a space between // and comment  '
+                         '[whitespace/comments] [4]', 'Missing'
+                         ' spaces around /  [whitespace/operators] [3]'])
+        self.assert_lint('a<Foo*> t <<= b||c;  //Test', ['One space before end'
+                         ' of line comments  [whitespace/comments] [5]',
+                         'Should have a space between // and comment  '
+                         '[whitespace/comments] [4]',
+                         'Missing spaces around ||  [whitespace/operators] [3]'])
+        self.assert_lint('a<Foo*> t <<= b&&c; // Test', 'Missing spaces around'
+                         ' &&  [whitespace/operators] [3]')
+        self.assert_lint('a<Foo*> t <<= b&&&c; // Test', 'Missing spaces around'
+                         ' &&  [whitespace/operators] [3]')
+        self.assert_lint('a<Foo*> t <<= b&&*c; // Test', 'Missing spaces around'
+                         ' &&  [whitespace/operators] [3]')
+        self.assert_lint('a<Foo*> t <<= b && *c; // Test', '')
+        self.assert_lint('a<Foo*> t <<= b && &c; // Test', '')
+        self.assert_lint('a<Foo*> t <<= b || &c;  /*Test', 'Complex multi-line '
+                         '/*...*/-style comment found. Lint may give bogus '
+                         'warnings.  Consider replacing these with //-style'
+                         ' comments, with #if 0...#endif, or with more clearly'
+                         ' structured multi-line comments.  [readability/multiline_comment] [5]')
+        self.assert_lint('a<Foo&> t <<= &b | &c;', '')
+        self.assert_lint('a<Foo*> t <<= &b & &c; // Test', '')
+        self.assert_lint('a<Foo*> t <<= *b / &c; // Test', '')
+        self.assert_lint('if (a=b == 1)', 'Missing spaces around =  [whitespace/operators] [4]')
+        self.assert_lint('a = 1<<20', 'Missing spaces around <<  [whitespace/operators] [3]')
+        self.assert_lint('if (a = b == 1)', '')
+        self.assert_lint('a = 1 << 20', '')
+        self.assert_multi_line_lint('#include <sys/io.h>\n', '')
+        self.assert_multi_line_lint('#import <foo/bar.h>\n', '')
+
+    def test_operator_methods(self):
+        self.assert_lint('String operator+(const String&, const String&);', '')
+        self.assert_lint('String operator/(const String&, const String&);', '')
+        self.assert_lint('bool operator==(const String&, const String&);', '')
+        self.assert_lint('String& operator-=(const String&, const String&);', '')
+        self.assert_lint('String& operator+=(const String&, const String&);', '')
+        self.assert_lint('String& operator*=(const String&, const String&);', '')
+        self.assert_lint('String& operator%=(const String&, const String&);', '')
+        self.assert_lint('String& operator&=(const String&, const String&);', '')
+        self.assert_lint('String& operator<<=(const String&, const String&);', '')
+        self.assert_lint('String& operator>>=(const String&, const String&);', '')
+        self.assert_lint('String& operator|=(const String&, const String&);', '')
+        self.assert_lint('String& operator^=(const String&, const String&);', '')
+
+    def test_spacing_before_last_semicolon(self):
+        self.assert_lint('call_function() ;',
+                         'Extra space before last semicolon. If this should be an '
+                         'empty statement, use { } instead.'
+                         '  [whitespace/semicolon] [5]')
+        self.assert_lint('while (true) ;',
+                         'Extra space before last semicolon. If this should be an '
+                         'empty statement, use { } instead.'
+                         '  [whitespace/semicolon] [5]')
+        self.assert_lint('default:;',
+                         'Semicolon defining empty statement. Use { } instead.'
+                         '  [whitespace/semicolon] [5]')
+        self.assert_lint('        ;',
+                         'Line contains only semicolon. If this should be an empty '
+                         'statement, use { } instead.'
+                         '  [whitespace/semicolon] [5]')
+        self.assert_lint('for (int i = 0; ;', '')
+
+    # Static or global STL strings.
+    def test_static_or_global_stlstrings(self):
+        self.assert_lint('string foo;',
+                         'For a static/global string constant, use a C style '
+                         'string instead: "char foo[]".'
+                         '  [runtime/string] [4]')
+        self.assert_lint('string kFoo = "hello"; // English',
+                         'For a static/global string constant, use a C style '
+                         'string instead: "char kFoo[]".'
+                         '  [runtime/string] [4]')
+        self.assert_lint('static string foo;',
+                         'For a static/global string constant, use a C style '
+                         'string instead: "static char foo[]".'
+                         '  [runtime/string] [4]')
+        self.assert_lint('static const string foo;',
+                         'For a static/global string constant, use a C style '
+                         'string instead: "static const char foo[]".'
+                         '  [runtime/string] [4]')
+        self.assert_lint('string Foo::bar;',
+                         'For a static/global string constant, use a C style '
+                         'string instead: "char Foo::bar[]".'
+                         '  [runtime/string] [4]')
+        # Rare case.
+        self.assert_lint('string foo("foobar");',
+                         'For a static/global string constant, use a C style '
+                         'string instead: "char foo[]".'
+                         '  [runtime/string] [4]')
+        # Should not catch local or member variables.
+        self.assert_lint('    string foo', '')
+        # Should not catch functions.
+        self.assert_lint('string EmptyString() { return ""; }', '')
+        self.assert_lint('string EmptyString () { return ""; }', '')
+        self.assert_lint('string VeryLongNameFunctionSometimesEndsWith(\n'
+                         '    VeryLongNameType veryLongNameVariable) { }', '')
+        self.assert_lint('template<>\n'
+                         'string FunctionTemplateSpecialization<SomeType>(\n'
+                         '    int x) { return ""; }', '')
+        self.assert_lint('template<>\n'
+                         'string FunctionTemplateSpecialization<vector<A::B>* >(\n'
+                         '    int x) { return ""; }', '')
+
+        # should not catch methods of template classes.
+        self.assert_lint('string Class<Type>::Method() const\n'
+                         '{\n'
+                         '    return "";\n'
+                         '}\n', '')
+        self.assert_lint('string Class<Type>::Method(\n'
+                         '    int arg) const\n'
+                         '{\n'
+                         '    return "";\n'
+                         '}\n', '')
+
+    def test_no_spaces_in_function_calls(self):
+        self.assert_lint('TellStory(1, 3);',
+                         '')
+        self.assert_lint('TellStory(1, 3 );',
+                         'Extra space before )'
+                         '  [whitespace/parens] [2]')
+        self.assert_lint('TellStory(1 /* wolf */, 3 /* pigs */);',
+                         '')
+        self.assert_multi_line_lint('#endif\n    );',
+                                    '')
+
+    def test_one_spaces_between_code_and_comments(self):
+        self.assert_lint('} // namespace foo',
+                         '')
+        self.assert_lint('}// namespace foo',
+                         'One space before end of line comments'
+                         '  [whitespace/comments] [5]')
+        self.assert_lint('printf("foo"); // Outside quotes.',
+                         '')
+        self.assert_lint('int i = 0; // Having one space is fine.','')
+        self.assert_lint('int i = 0;  // Having two spaces is bad.',
+                         'One space before end of line comments'
+                         '  [whitespace/comments] [5]')
+        self.assert_lint('int i = 0;   // Having three spaces is bad.',
+                         'One space before end of line comments'
+                         '  [whitespace/comments] [5]')
+        self.assert_lint('// Top level comment', '')
+        self.assert_lint('    // Line starts with four spaces.', '')
+        self.assert_lint('foo();\n'
+                         '{ // A scope is opening.', '')
+        self.assert_lint('    foo();\n'
+                         '    { // An indented scope is opening.', '')
+        self.assert_lint('if (foo) { // not a pure scope',
+                         '')
+        self.assert_lint('printf("// In quotes.")', '')
+        self.assert_lint('printf("\\"%s // In quotes.")', '')
+        self.assert_lint('printf("%s", "// In quotes.")', '')
+
+    def test_one_spaces_after_punctuation_in_comments(self):
+        self.assert_lint('int a; // This is a sentence.',
+                         '')
+        self.assert_lint('int a; // This is a sentence.  ',
+                         'Line ends in whitespace.  Consider deleting these extra spaces.  [whitespace/end_of_line] [4]')
+        self.assert_lint('int a; // This is a sentence. This is a another sentence.',
+                         '')
+        self.assert_lint('int a; // This is a sentence.  This is a another sentence.',
+                         'Should have only a single space after a punctuation in a comment.  [whitespace/comments] [5]')
+        self.assert_lint('int a; // This is a sentence!  This is a another sentence.',
+                         'Should have only a single space after a punctuation in a comment.  [whitespace/comments] [5]')
+        self.assert_lint('int a; // Why did I write this?  This is a another sentence.',
+                         'Should have only a single space after a punctuation in a comment.  [whitespace/comments] [5]')
+        self.assert_lint('int a; // Elementary,  my dear.',
+                         'Should have only a single space after a punctuation in a comment.  [whitespace/comments] [5]')
+        self.assert_lint('int a; // The following should be clear:  Is it?',
+                         'Should have only a single space after a punctuation in a comment.  [whitespace/comments] [5]')
+        self.assert_lint('int a; // Look at the follow semicolon;  I hope this gives an error.',
+                         'Should have only a single space after a punctuation in a comment.  [whitespace/comments] [5]')
+
+    def test_space_after_comment_marker(self):
+        self.assert_lint('//', '')
+        self.assert_lint('//x', 'Should have a space between // and comment'
+                         '  [whitespace/comments] [4]')
+        self.assert_lint('// x', '')
+        self.assert_lint('//----', '')
+        self.assert_lint('//====', '')
+        self.assert_lint('//////', '')
+        self.assert_lint('////// x', '')
+        self.assert_lint('/// x', '')
+        self.assert_lint('////x', 'Should have a space between // and comment'
+                         '  [whitespace/comments] [4]')
+
+    def test_newline_at_eof(self):
+        def do_test(self, data, is_missing_eof):
+            error_collector = ErrorCollector(self.assert_)
+            self.process_file_data('foo.cpp', 'cpp', data.split('\n'),
+                                   error_collector)
+            # The warning appears only once.
+            self.assertEquals(
+                int(is_missing_eof),
+                error_collector.results().count(
+                    'Could not find a newline character at the end of the file.'
+                    '  [whitespace/ending_newline] [5]'))
+
+        do_test(self, '// Newline\n// at EOF\n', False)
+        do_test(self, '// No newline\n// at EOF', True)
+
+    def test_invalid_utf8(self):
+        def do_test(self, raw_bytes, has_invalid_utf8):
+            error_collector = ErrorCollector(self.assert_)
+            self.process_file_data('foo.cpp', 'cpp',
+                                   unicode(raw_bytes, 'utf8', 'replace').split('\n'),
+                                   error_collector)
+            # The warning appears only once.
+            self.assertEquals(
+                int(has_invalid_utf8),
+                error_collector.results().count(
+                    'Line contains invalid UTF-8'
+                    ' (or Unicode replacement character).'
+                    '  [readability/utf8] [5]'))
+
+        do_test(self, 'Hello world\n', False)
+        do_test(self, '\xe9\x8e\xbd\n', False)
+        do_test(self, '\xe9x\x8e\xbd\n', True)
+        # This is the encoding of the replacement character itself (which
+        # you can see by evaluating codecs.getencoder('utf8')(u'\ufffd')).
+        do_test(self, '\xef\xbf\xbd\n', True)
+
+    def test_is_blank_line(self):
+        self.assert_(cpp_style.is_blank_line(''))
+        self.assert_(cpp_style.is_blank_line(' '))
+        self.assert_(cpp_style.is_blank_line(' \t\r\n'))
+        self.assert_(not cpp_style.is_blank_line('int a;'))
+        self.assert_(not cpp_style.is_blank_line('{'))
+
+    def test_blank_lines_check(self):
+        self.assert_blank_lines_check(['{\n', '\n', '\n', '}\n'], 1, 1)
+        self.assert_blank_lines_check(['  if (foo) {\n', '\n', '  }\n'], 1, 1)
+        self.assert_blank_lines_check(
+            ['\n', '// {\n', '\n', '\n', '// Comment\n', '{\n', '}\n'], 0, 0)
+        self.assert_blank_lines_check(['\n', 'run("{");\n', '\n'], 0, 0)
+        self.assert_blank_lines_check(['\n', '  if (foo) { return 0; }\n', '\n'], 0, 0)
+
+    def test_allow_blank_line_before_closing_namespace(self):
+        error_collector = ErrorCollector(self.assert_)
+        self.process_file_data('foo.cpp', 'cpp',
+                               ['namespace {', '', '}  // namespace'],
+                               error_collector)
+        self.assertEquals(0, error_collector.results().count(
+            'Blank line at the end of a code block.  Is this needed?'
+            '  [whitespace/blank_line] [3]'))
+
+    def test_allow_blank_line_before_if_else_chain(self):
+        error_collector = ErrorCollector(self.assert_)
+        self.process_file_data('foo.cpp', 'cpp',
+                               ['if (hoge) {',
+                                '',  # No warning
+                                '} else if (piyo) {',
+                                '',  # No warning
+                                '} else if (piyopiyo) {',
+                                '  hoge = true;',  # No warning
+                                '} else {',
+                                '',  # Warning on this line
+                                '}'],
+                               error_collector)
+        self.assertEquals(1, error_collector.results().count(
+            'Blank line at the end of a code block.  Is this needed?'
+            '  [whitespace/blank_line] [3]'))
+
+    def test_else_on_same_line_as_closing_braces(self):
+        error_collector = ErrorCollector(self.assert_)
+        self.process_file_data('foo.cpp', 'cpp',
+                               ['if (hoge) {',
+                                '',
+                                '}',
+                                ' else {'  # Warning on this line
+                                '',
+                                '}'],
+                               error_collector)
+        self.assertEquals(1, error_collector.results().count(
+            'An else should appear on the same line as the preceding }'
+            '  [whitespace/newline] [4]'))
+
+    def test_else_clause_not_on_same_line_as_else(self):
+        self.assert_lint('    else DoSomethingElse();',
+                         'Else clause should never be on same line as else '
+                         '(use 2 lines)  [whitespace/newline] [4]')
+        self.assert_lint('    else ifDoSomethingElse();',
+                         'Else clause should never be on same line as else '
+                         '(use 2 lines)  [whitespace/newline] [4]')
+        self.assert_lint('    else if (blah) {', '')
+        self.assert_lint('    variable_ends_in_else = true;', '')
+
+    def test_comma(self):
+        self.assert_lint('a = f(1,2);',
+                         'Missing space after ,  [whitespace/comma] [3]')
+        self.assert_lint('int tmp=a,a=b,b=tmp;',
+                         ['Missing spaces around =  [whitespace/operators] [4]',
+                          'Missing space after ,  [whitespace/comma] [3]'])
+        self.assert_lint('f(a, /* name */ b);', '')
+        self.assert_lint('f(a, /* name */b);', '')
+
+    def test_declaration(self):
+        self.assert_lint('int a;', '')
+        self.assert_lint('int   a;', 'Extra space between int and a  [whitespace/declaration] [3]')
+        self.assert_lint('int*  a;', 'Extra space between int* and a  [whitespace/declaration] [3]')
+        self.assert_lint('else if { }', '')
+        self.assert_lint('else   if { }', 'Extra space between else and if  [whitespace/declaration] [3]')
+
+    def test_pointer_reference_marker_location(self):
+        self.assert_lint('int* b;', '', 'foo.cpp')
+        self.assert_lint('int *b;',
+                         'Declaration has space between type name and * in int *b  [whitespace/declaration] [3]',
+                         'foo.cpp')
+        self.assert_lint('return *b;', '', 'foo.cpp')
+        self.assert_lint('delete *b;', '', 'foo.cpp')
+        self.assert_lint('int *b;', '', 'foo.c')
+        self.assert_lint('int* b;',
+                         'Declaration has space between * and variable name in int* b  [whitespace/declaration] [3]',
+                         'foo.c')
+        self.assert_lint('int& b;', '', 'foo.cpp')
+        self.assert_lint('int &b;',
+                         'Declaration has space between type name and & in int &b  [whitespace/declaration] [3]',
+                         'foo.cpp')
+        self.assert_lint('return &b;', '', 'foo.cpp')
+
+    def test_indent(self):
+        self.assert_lint('static int noindent;', '')
+        self.assert_lint('    int fourSpaceIndent;', '')
+        self.assert_lint(' int oneSpaceIndent;',
+                         'Weird number of spaces at line-start.  '
+                         'Are you using a 4-space indent?  [whitespace/indent] [3]')
+        self.assert_lint('   int threeSpaceIndent;',
+                         'Weird number of spaces at line-start.  '
+                         'Are you using a 4-space indent?  [whitespace/indent] [3]')
+        self.assert_lint(' char* oneSpaceIndent = "public:";',
+                         'Weird number of spaces at line-start.  '
+                         'Are you using a 4-space indent?  [whitespace/indent] [3]')
+        self.assert_lint(' public:',
+                         'Weird number of spaces at line-start.  '
+                         'Are you using a 4-space indent?  [whitespace/indent] [3]')
+        self.assert_lint('  public:',
+                         'Weird number of spaces at line-start.  '
+                         'Are you using a 4-space indent?  [whitespace/indent] [3]')
+        self.assert_lint('   public:',
+                         'Weird number of spaces at line-start.  '
+                         'Are you using a 4-space indent?  [whitespace/indent] [3]')
+        self.assert_multi_line_lint(
+            'class Foo {\n'
+            'public:\n'
+            '    enum Bar {\n'
+            '        Alpha,\n'
+            '        Beta,\n'
+            '#if ENABLED_BETZ\n'
+            '        Charlie,\n'
+            '#endif\n'
+            '    };\n'
+            '};',
+            '')
+
+    def test_not_alabel(self):
+        self.assert_lint('MyVeryLongNamespace::MyVeryLongClassName::', '')
+
+    def test_tab(self):
+        self.assert_lint('\tint a;',
+                         'Tab found; better to use spaces  [whitespace/tab] [1]')
+        self.assert_lint('int a = 5;\t// set a to 5',
+                         'Tab found; better to use spaces  [whitespace/tab] [1]')
+
+    def test_unnamed_namespaces_in_headers(self):
+        self.assert_language_rules_check(
+            'foo.h', 'namespace {',
+            'Do not use unnamed namespaces in header files.  See'
+            ' http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Namespaces'
+            ' for more information.  [build/namespaces] [4]')
+        # namespace registration macros are OK.
+        self.assert_language_rules_check('foo.h', 'namespace {  \\', '')
+        # named namespaces are OK.
+        self.assert_language_rules_check('foo.h', 'namespace foo {', '')
+        self.assert_language_rules_check('foo.h', 'namespace foonamespace {', '')
+        self.assert_language_rules_check('foo.cpp', 'namespace {', '')
+        self.assert_language_rules_check('foo.cpp', 'namespace foo {', '')
+
+    def test_build_class(self):
+        # Test that the linter can parse to the end of class definitions,
+        # and that it will report when it can't.
+        # Use multi-line linter because it performs the ClassState check.
+        self.assert_multi_line_lint(
+            'class Foo {',
+            'Failed to find complete declaration of class Foo'
+            '  [build/class] [5]')
+        # Don't warn on forward declarations of various types.
+        self.assert_multi_line_lint(
+            'class Foo;',
+            '')
+        self.assert_multi_line_lint(
+            '''\
+            struct Foo*
+                foo = NewFoo();''',
+            '')
+        # Here is an example where the linter gets confused, even though
+        # the code doesn't violate the style guide.
+        self.assert_multi_line_lint(
+            'class Foo\n'
+            '#ifdef DERIVE_FROM_GOO\n'
+            '    : public Goo {\n'
+            '#else\n'
+            '    : public Hoo {\n'
+            '#endif\n'
+            '};',
+            'Failed to find complete declaration of class Foo'
+            '  [build/class] [5]')
+
+    def test_build_end_comment(self):
+        # The crosstool compiler we currently use will fail to compile the
+        # code in this test, so we might consider removing the lint check.
+        self.assert_lint('#endif Not a comment',
+                         'Uncommented text after #endif is non-standard.'
+                         '  Use a comment.'
+                         '  [build/endif_comment] [5]')
+
+    def test_build_forward_decl(self):
+        # The crosstool compiler we currently use will fail to compile the
+        # code in this test, so we might consider removing the lint check.
+        self.assert_lint('class Foo::Goo;',
+                         'Inner-style forward declarations are invalid.'
+                         '  Remove this line.'
+                         '  [build/forward_decl] [5]')
+
+    def test_build_header_guard(self):
+        file_path = 'mydir/Foo.h'
+
+        # We can't rely on our internal stuff to get a sane path on the open source
+        # side of things, so just parse out the suggested header guard. This
+        # doesn't allow us to test the suggested header guard, but it does let us
+        # test all the other header tests.
+        error_collector = ErrorCollector(self.assert_)
+        self.process_file_data(file_path, 'h', [], error_collector)
+        expected_guard = ''
+        matcher = re.compile(
+            'No \#ifndef header guard found\, suggested CPP variable is\: ([A-Za-z_0-9]+) ')
+        for error in error_collector.result_list():
+            matches = matcher.match(error)
+            if matches:
+                expected_guard = matches.group(1)
+                break
+
+        # Make sure we extracted something for our header guard.
+        self.assertNotEqual(expected_guard, '')
+
+        # Wrong guard
+        error_collector = ErrorCollector(self.assert_)
+        self.process_file_data(file_path, 'h',
+                               ['#ifndef FOO_H', '#define FOO_H'], error_collector)
+        self.assertEquals(
+            1,
+            error_collector.result_list().count(
+                '#ifndef header guard has wrong style, please use: %s'
+                '  [build/header_guard] [5]' % expected_guard),
+            error_collector.result_list())
+
+        # No define
+        error_collector = ErrorCollector(self.assert_)
+        self.process_file_data(file_path, 'h',
+                               ['#ifndef %s' % expected_guard], error_collector)
+        self.assertEquals(
+            1,
+            error_collector.result_list().count(
+                'No #ifndef header guard found, suggested CPP variable is: %s'
+                '  [build/header_guard] [5]' % expected_guard),
+            error_collector.result_list())
+
+        # Mismatched define
+        error_collector = ErrorCollector(self.assert_)
+        self.process_file_data(file_path, 'h',
+                               ['#ifndef %s' % expected_guard,
+                                '#define FOO_H'],
+                               error_collector)
+        self.assertEquals(
+            1,
+            error_collector.result_list().count(
+                'No #ifndef header guard found, suggested CPP variable is: %s'
+                '  [build/header_guard] [5]' % expected_guard),
+            error_collector.result_list())
+
+        # No header guard errors
+        error_collector = ErrorCollector(self.assert_)
+        self.process_file_data(file_path, 'h',
+                               ['#ifndef %s' % expected_guard,
+                                '#define %s' % expected_guard,
+                                '#endif // %s' % expected_guard],
+                               error_collector)
+        for line in error_collector.result_list():
+            if line.find('build/header_guard') != -1:
+                self.fail('Unexpected error: %s' % line)
+
+        # Completely incorrect header guard
+        error_collector = ErrorCollector(self.assert_)
+        self.process_file_data(file_path, 'h',
+                               ['#ifndef FOO',
+                                '#define FOO',
+                                '#endif  // FOO'],
+                               error_collector)
+        self.assertEquals(
+            1,
+            error_collector.result_list().count(
+                '#ifndef header guard has wrong style, please use: %s'
+                '  [build/header_guard] [5]' % expected_guard),
+            error_collector.result_list())
+
+        # Special case for flymake
+        error_collector = ErrorCollector(self.assert_)
+        self.process_file_data('mydir/Foo_flymake.h', 'h',
+                               ['#ifndef %s' % expected_guard,
+                                '#define %s' % expected_guard,
+                                '#endif // %s' % expected_guard],
+                               error_collector)
+        for line in error_collector.result_list():
+            if line.find('build/header_guard') != -1:
+                self.fail('Unexpected error: %s' % line)
+
+        error_collector = ErrorCollector(self.assert_)
+        self.process_file_data('mydir/Foo_flymake.h', 'h', [], error_collector)
+        self.assertEquals(
+            1,
+            error_collector.result_list().count(
+                'No #ifndef header guard found, suggested CPP variable is: %s'
+                '  [build/header_guard] [5]' % expected_guard),
+            error_collector.result_list())
+
+        # Verify that we don't blindly suggest the WTF prefix for all headers.
+        self.assertFalse(expected_guard.startswith('WTF_'))
+
+        # Allow the WTF_ prefix for files in that directory.
+        header_guard_filter = FilterConfiguration(('-', '+build/header_guard'))
+        error_collector = ErrorCollector(self.assert_, header_guard_filter)
+        self.process_file_data('Source/JavaScriptCore/wtf/TestName.h', 'h',
+                               ['#ifndef WTF_TestName_h', '#define WTF_TestName_h'],
+                               error_collector)
+        self.assertEquals(0, len(error_collector.result_list()),
+                          error_collector.result_list())
+
+        # Also allow the non WTF_ prefix for files in that directory.
+        error_collector = ErrorCollector(self.assert_, header_guard_filter)
+        self.process_file_data('Source/JavaScriptCore/wtf/TestName.h', 'h',
+                               ['#ifndef TestName_h', '#define TestName_h'],
+                               error_collector)
+        self.assertEquals(0, len(error_collector.result_list()),
+                          error_collector.result_list())
+
+        # Verify that we suggest the WTF prefix version.
+        error_collector = ErrorCollector(self.assert_, header_guard_filter)
+        self.process_file_data('Source/JavaScriptCore/wtf/TestName.h', 'h',
+                               ['#ifndef BAD_TestName_h', '#define BAD_TestName_h'],
+                               error_collector)
+        self.assertEquals(
+            1,
+            error_collector.result_list().count(
+                '#ifndef header guard has wrong style, please use: WTF_TestName_h'
+                '  [build/header_guard] [5]'),
+            error_collector.result_list())
+
+    def test_build_printf_format(self):
+        self.assert_lint(
+            r'printf("\%%d", value);',
+            '%, [, (, and { are undefined character escapes.  Unescape them.'
+            '  [build/printf_format] [3]')
+
+        self.assert_lint(
+            r'snprintf(buffer, sizeof(buffer), "\[%d", value);',
+            '%, [, (, and { are undefined character escapes.  Unescape them.'
+            '  [build/printf_format] [3]')
+
+        self.assert_lint(
+            r'fprintf(file, "\(%d", value);',
+            '%, [, (, and { are undefined character escapes.  Unescape them.'
+            '  [build/printf_format] [3]')
+
+        self.assert_lint(
+            r'vsnprintf(buffer, sizeof(buffer), "\\\{%d", ap);',
+            '%, [, (, and { are undefined character escapes.  Unescape them.'
+            '  [build/printf_format] [3]')
+
+        # Don't warn if double-slash precedes the symbol
+        self.assert_lint(r'printf("\\%%%d", value);',
+                         '')
+
+    def test_runtime_printf_format(self):
+        self.assert_lint(
+            r'fprintf(file, "%q", value);',
+            '%q in format strings is deprecated.  Use %ll instead.'
+            '  [runtime/printf_format] [3]')
+
+        self.assert_lint(
+            r'aprintf(file, "The number is %12q", value);',
+            '%q in format strings is deprecated.  Use %ll instead.'
+            '  [runtime/printf_format] [3]')
+
+        self.assert_lint(
+            r'printf(file, "The number is" "%-12q", value);',
+            '%q in format strings is deprecated.  Use %ll instead.'
+            '  [runtime/printf_format] [3]')
+
+        self.assert_lint(
+            r'printf(file, "The number is" "%+12q", value);',
+            '%q in format strings is deprecated.  Use %ll instead.'
+            '  [runtime/printf_format] [3]')
+
+        self.assert_lint(
+            r'printf(file, "The number is" "% 12q", value);',
+            '%q in format strings is deprecated.  Use %ll instead.'
+            '  [runtime/printf_format] [3]')
+
+        self.assert_lint(
+            r'snprintf(file, "Never mix %d and %1$d parmaeters!", value);',
+            '%N$ formats are unconventional.  Try rewriting to avoid them.'
+            '  [runtime/printf_format] [2]')
+
+    def assert_lintLogCodeOnError(self, code, expected_message):
+        # Special assert_lint which logs the input code on error.
+        result = self.perform_single_line_lint(code, 'foo.cpp')
+        if result != expected_message:
+            self.fail('For code: "%s"\nGot: "%s"\nExpected: "%s"'
+                      % (code, result, expected_message))
+
+    def test_build_storage_class(self):
+        qualifiers = [None, 'const', 'volatile']
+        signs = [None, 'signed', 'unsigned']
+        types = ['void', 'char', 'int', 'float', 'double',
+                 'schar', 'int8', 'uint8', 'int16', 'uint16',
+                 'int32', 'uint32', 'int64', 'uint64']
+        storage_classes = ['auto', 'extern', 'register', 'static', 'typedef']
+
+        build_storage_class_error_message = (
+            'Storage class (static, extern, typedef, etc) should be first.'
+            '  [build/storage_class] [5]')
+
+        # Some explicit cases. Legal in C++, deprecated in C99.
+        self.assert_lint('const int static foo = 5;',
+                         build_storage_class_error_message)
+
+        self.assert_lint('char static foo;',
+                         build_storage_class_error_message)
+
+        self.assert_lint('double const static foo = 2.0;',
+                         build_storage_class_error_message)
+
+        self.assert_lint('uint64 typedef unsignedLongLong;',
+                         build_storage_class_error_message)
+
+        self.assert_lint('int register foo = 0;',
+                         build_storage_class_error_message)
+
+        # Since there are a very large number of possibilities, randomly
+        # construct declarations.
+        # Make sure that the declaration is logged if there's an error.
+        # Seed generator with an integer for absolute reproducibility.
+        random.seed(25)
+        for unused_i in range(10):
+            # Build up random list of non-storage-class declaration specs.
+            other_decl_specs = [random.choice(qualifiers), random.choice(signs),
+                                random.choice(types)]
+            # remove None
+            other_decl_specs = filter(lambda x: x is not None, other_decl_specs)
+
+            # shuffle
+            random.shuffle(other_decl_specs)
+
+            # insert storage class after the first
+            storage_class = random.choice(storage_classes)
+            insertion_point = random.randint(1, len(other_decl_specs))
+            decl_specs = (other_decl_specs[0:insertion_point]
+                          + [storage_class]
+                          + other_decl_specs[insertion_point:])
+
+            self.assert_lintLogCodeOnError(
+                ' '.join(decl_specs) + ';',
+                build_storage_class_error_message)
+
+            # but no error if storage class is first
+            self.assert_lintLogCodeOnError(
+                storage_class + ' ' + ' '.join(other_decl_specs),
+                '')
+
+    def test_legal_copyright(self):
+        legal_copyright_message = (
+            'No copyright message found.  '
+            'You should have a line: "Copyright [year] <Copyright Owner>"'
+            '  [legal/copyright] [5]')
+
+        copyright_line = '// Copyright 2008 Google Inc. All Rights Reserved.'
+
+        file_path = 'mydir/googleclient/foo.cpp'
+
+        # There should be a copyright message in the first 10 lines
+        error_collector = ErrorCollector(self.assert_)
+        self.process_file_data(file_path, 'cpp', [], error_collector)
+        self.assertEquals(
+            1,
+            error_collector.result_list().count(legal_copyright_message))
+
+        error_collector = ErrorCollector(self.assert_)
+        self.process_file_data(
+            file_path, 'cpp',
+            ['' for unused_i in range(10)] + [copyright_line],
+            error_collector)
+        self.assertEquals(
+            1,
+            error_collector.result_list().count(legal_copyright_message))
+
+        # Test that warning isn't issued if Copyright line appears early enough.
+        error_collector = ErrorCollector(self.assert_)
+        self.process_file_data(file_path, 'cpp', [copyright_line], error_collector)
+        for message in error_collector.result_list():
+            if message.find('legal/copyright') != -1:
+                self.fail('Unexpected error: %s' % message)
+
+        error_collector = ErrorCollector(self.assert_)
+        self.process_file_data(
+            file_path, 'cpp',
+            ['' for unused_i in range(9)] + [copyright_line],
+            error_collector)
+        for message in error_collector.result_list():
+            if message.find('legal/copyright') != -1:
+                self.fail('Unexpected error: %s' % message)
+
+    def test_invalid_increment(self):
+        self.assert_lint('*count++;',
+                         'Changing pointer instead of value (or unused value of '
+                         'operator*).  [runtime/invalid_increment] [5]')
+
+    # Integral bitfields must be declared with either signed or unsigned keyword.
+    def test_plain_integral_bitfields(self):
+        errmsg = ('Please declare integral type bitfields with either signed or unsigned.  [runtime/bitfields] [5]')
+
+        self.assert_lint('int a : 30;', errmsg)
+        self.assert_lint('mutable short a : 14;', errmsg)
+        self.assert_lint('const char a : 6;', errmsg)
+        self.assert_lint('long int a : 30;', errmsg)
+        self.assert_lint('int a = 1 ? 0 : 30;', '')
+
+class CleansedLinesTest(unittest.TestCase):
+    def test_init(self):
+        lines = ['Line 1',
+                 'Line 2',
+                 'Line 3 // Comment test',
+                 'Line 4 "foo"']
+
+        clean_lines = cpp_style.CleansedLines(lines)
+        self.assertEquals(lines, clean_lines.raw_lines)
+        self.assertEquals(4, clean_lines.num_lines())
+
+        self.assertEquals(['Line 1',
+                           'Line 2',
+                           'Line 3 ',
+                           'Line 4 "foo"'],
+                          clean_lines.lines)
+
+        self.assertEquals(['Line 1',
+                           'Line 2',
+                           'Line 3 ',
+                           'Line 4 ""'],
+                          clean_lines.elided)
+
+    def test_init_empty(self):
+        clean_lines = cpp_style.CleansedLines([])
+        self.assertEquals([], clean_lines.raw_lines)
+        self.assertEquals(0, clean_lines.num_lines())
+
+    def test_collapse_strings(self):
+        collapse = cpp_style.CleansedLines.collapse_strings
+        self.assertEquals('""', collapse('""'))             # ""     (empty)
+        self.assertEquals('"""', collapse('"""'))           # """    (bad)
+        self.assertEquals('""', collapse('"xyz"'))          # "xyz"  (string)
+        self.assertEquals('""', collapse('"\\\""'))         # "\""   (string)
+        self.assertEquals('""', collapse('"\'"'))           # "'"    (string)
+        self.assertEquals('"\"', collapse('"\"'))           # "\"    (bad)
+        self.assertEquals('""', collapse('"\\\\"'))         # "\\"   (string)
+        self.assertEquals('"', collapse('"\\\\\\"'))        # "\\\"  (bad)
+        self.assertEquals('""', collapse('"\\\\\\\\"'))     # "\\\\" (string)
+
+        self.assertEquals('\'\'', collapse('\'\''))         # ''     (empty)
+        self.assertEquals('\'\'', collapse('\'a\''))        # 'a'    (char)
+        self.assertEquals('\'\'', collapse('\'\\\'\''))     # '\''   (char)
+        self.assertEquals('\'', collapse('\'\\\''))         # '\'    (bad)
+        self.assertEquals('', collapse('\\012'))            # '\012' (char)
+        self.assertEquals('', collapse('\\xfF0'))           # '\xfF0' (char)
+        self.assertEquals('', collapse('\\n'))              # '\n' (char)
+        self.assertEquals('\#', collapse('\\#'))            # '\#' (bad)
+
+        self.assertEquals('StringReplace(body, "", "");',
+                          collapse('StringReplace(body, "\\\\", "\\\\\\\\");'))
+        self.assertEquals('\'\' ""',
+                          collapse('\'"\' "foo"'))
+
+
+class OrderOfIncludesTest(CppStyleTestBase):
+    def setUp(self):
+        self.include_state = cpp_style._IncludeState()
+
+        # Cheat os.path.abspath called in FileInfo class.
+        self.os_path_abspath_orig = os.path.abspath
+        os.path.abspath = lambda value: value
+
+    def tearDown(self):
+        os.path.abspath = self.os_path_abspath_orig
+
+    def test_try_drop_common_suffixes(self):
+        self.assertEqual('foo/foo', cpp_style._drop_common_suffixes('foo/foo-inl.h'))
+        self.assertEqual('foo/bar/foo',
+                         cpp_style._drop_common_suffixes('foo/bar/foo_inl.h'))
+        self.assertEqual('foo/foo', cpp_style._drop_common_suffixes('foo/foo.cpp'))
+        self.assertEqual('foo/foo_unusualinternal',
+                         cpp_style._drop_common_suffixes('foo/foo_unusualinternal.h'))
+        self.assertEqual('',
+                         cpp_style._drop_common_suffixes('_test.cpp'))
+        self.assertEqual('test',
+                         cpp_style._drop_common_suffixes('test.cpp'))
+
+
+class OrderOfIncludesTest(CppStyleTestBase):
+    def setUp(self):
+        self.include_state = cpp_style._IncludeState()
+
+        # Cheat os.path.abspath called in FileInfo class.
+        self.os_path_abspath_orig = os.path.abspath
+        self.os_path_isfile_orig = os.path.isfile
+        os.path.abspath = lambda value: value
+
+    def tearDown(self):
+        os.path.abspath = self.os_path_abspath_orig
+        os.path.isfile = self.os_path_isfile_orig
+
+    def test_check_next_include_order__no_config(self):
+        self.assertEqual('Header file should not contain WebCore config.h.',
+                         self.include_state.check_next_include_order(cpp_style._CONFIG_HEADER, True, True))
+
+    def test_check_next_include_order__no_self(self):
+        self.assertEqual('Header file should not contain itself.',
+                         self.include_state.check_next_include_order(cpp_style._PRIMARY_HEADER, True, True))
+        # Test actual code to make sure that header types are correctly assigned.
+        self.assert_language_rules_check('Foo.h',
+                                         '#include "Foo.h"\n',
+                                         'Header file should not contain itself. Should be: alphabetically sorted.'
+                                         '  [build/include_order] [4]')
+        self.assert_language_rules_check('FooBar.h',
+                                         '#include "Foo.h"\n',
+                                         '')
+
+    def test_check_next_include_order__likely_then_config(self):
+        self.assertEqual('Found header this file implements before WebCore config.h.',
+                         self.include_state.check_next_include_order(cpp_style._PRIMARY_HEADER, False, True))
+        self.assertEqual('Found WebCore config.h after a header this file implements.',
+                         self.include_state.check_next_include_order(cpp_style._CONFIG_HEADER, False, True))
+
+    def test_check_next_include_order__other_then_config(self):
+        self.assertEqual('Found other header before WebCore config.h.',
+                         self.include_state.check_next_include_order(cpp_style._OTHER_HEADER, False, True))
+        self.assertEqual('Found WebCore config.h after other header.',
+                         self.include_state.check_next_include_order(cpp_style._CONFIG_HEADER, False, True))
+
+    def test_check_next_include_order__config_then_other_then_likely(self):
+        self.assertEqual('', self.include_state.check_next_include_order(cpp_style._CONFIG_HEADER, False, True))
+        self.assertEqual('Found other header before a header this file implements.',
+                         self.include_state.check_next_include_order(cpp_style._OTHER_HEADER, False, True))
+        self.assertEqual('Found header this file implements after other header.',
+                         self.include_state.check_next_include_order(cpp_style._PRIMARY_HEADER, False, True))
+
+    def test_check_alphabetical_include_order(self):
+        self.assert_language_rules_check('foo.h',
+                                         '#include "a.h"\n'
+                                         '#include "c.h"\n'
+                                         '#include "b.h"\n',
+                                         'Alphabetical sorting problem.  [build/include_order] [4]')
+
+        self.assert_language_rules_check('foo.h',
+                                         '#include "a.h"\n'
+                                         '#include "b.h"\n'
+                                         '#include "c.h"\n',
+                                         '')
+
+        self.assert_language_rules_check('foo.h',
+                                         '#include <assert.h>\n'
+                                         '#include "bar.h"\n',
+                                         'Alphabetical sorting problem.  [build/include_order] [4]')
+
+        self.assert_language_rules_check('foo.h',
+                                         '#include "bar.h"\n'
+                                         '#include <assert.h>\n',
+                                         '')
+
+    def test_check_alphabetical_include_order_errors_reported_for_both_lines(self):
+        # If one of the two lines of out of order headers are filtered, the error should be
+        # reported on the other line.
+        self.assert_language_rules_check('foo.h',
+                                         '#include "a.h"\n'
+                                         '#include "c.h"\n'
+                                         '#include "b.h"\n',
+                                         'Alphabetical sorting problem.  [build/include_order] [4]',
+                                         lines_to_check=[2])
+
+        self.assert_language_rules_check('foo.h',
+                                         '#include "a.h"\n'
+                                         '#include "c.h"\n'
+                                         '#include "b.h"\n',
+                                         'Alphabetical sorting problem.  [build/include_order] [4]',
+                                         lines_to_check=[3])
+
+        # If no lines are filtered, the error should be reported only once.
+        self.assert_language_rules_check('foo.h',
+                                         '#include "a.h"\n'
+                                         '#include "c.h"\n'
+                                         '#include "b.h"\n',
+                                         'Alphabetical sorting problem.  [build/include_order] [4]')
+
+    def test_check_line_break_after_own_header(self):
+        self.assert_language_rules_check('foo.cpp',
+                                         '#include "config.h"\n'
+                                         '#include "foo.h"\n'
+                                         '#include "bar.h"\n',
+                                         'You should add a blank line after implementation file\'s own header.  [build/include_order] [4]')
+
+        self.assert_language_rules_check('foo.cpp',
+                                         '#include "config.h"\n'
+                                         '#include "foo.h"\n'
+                                         '\n'
+                                         '#include "bar.h"\n',
+                                         '')
+
+    def test_check_preprocessor_in_include_section(self):
+        self.assert_language_rules_check('foo.cpp',
+                                         '#include "config.h"\n'
+                                         '#include "foo.h"\n'
+                                         '\n'
+                                         '#ifdef BAZ\n'
+                                         '#include "baz.h"\n'
+                                         '#else\n'
+                                         '#include "foobar.h"\n'
+                                         '#endif"\n'
+                                         '#include "bar.h"\n', # No flag because previous is in preprocessor section
+                                         '')
+
+        self.assert_language_rules_check('foo.cpp',
+                                         '#include "config.h"\n'
+                                         '#include "foo.h"\n'
+                                         '\n'
+                                         '#ifdef BAZ\n'
+                                         '#include "baz.h"\n'
+                                         '#endif"\n'
+                                         '#include "bar.h"\n'
+                                         '#include "a.h"\n', # Should still flag this.
+                                         'Alphabetical sorting problem.  [build/include_order] [4]')
+
+        self.assert_language_rules_check('foo.cpp',
+                                         '#include "config.h"\n'
+                                         '#include "foo.h"\n'
+                                         '\n'
+                                         '#ifdef BAZ\n'
+                                         '#include "baz.h"\n'
+                                         '#include "bar.h"\n' #Should still flag this
+                                         '#endif"\n',
+                                         'Alphabetical sorting problem.  [build/include_order] [4]')
+
+        self.assert_language_rules_check('foo.cpp',
+                                         '#include "config.h"\n'
+                                         '#include "foo.h"\n'
+                                         '\n'
+                                         '#ifdef BAZ\n'
+                                         '#include "baz.h"\n'
+                                         '#endif"\n'
+                                         '#ifdef FOOBAR\n'
+                                         '#include "foobar.h"\n'
+                                         '#endif"\n'
+                                         '#include "bar.h"\n'
+                                         '#include "a.h"\n', # Should still flag this.
+                                         'Alphabetical sorting problem.  [build/include_order] [4]')
+
+        # Check that after an already included error, the sorting rules still work.
+        self.assert_language_rules_check('foo.cpp',
+                                         '#include "config.h"\n'
+                                         '#include "foo.h"\n'
+                                         '\n'
+                                         '#include "foo.h"\n'
+                                         '#include "g.h"\n',
+                                         '"foo.h" already included at foo.cpp:2  [build/include] [4]')
+
+    def test_primary_header(self):
+        # File with non-existing primary header should not produce errors.
+        self.assert_language_rules_check('foo.cpp',
+                                         '#include "config.h"\n'
+                                         '\n'
+                                         '#include "bar.h"\n',
+                                         '')
+        # Pretend that header files exist.
+        os.path.isfile = lambda filename: True
+        # Missing include for existing primary header -> error.
+        self.assert_language_rules_check('foo.cpp',
+                                         '#include "config.h"\n'
+                                         '\n'
+                                         '#include "bar.h"\n',
+                                         'Found other header before a header this file implements. '
+                                         'Should be: config.h, primary header, blank line, and then '
+                                         'alphabetically sorted.  [build/include_order] [4]')
+        # Having include for existing primary header -> no error.
+        self.assert_language_rules_check('foo.cpp',
+                                         '#include "config.h"\n'
+                                         '#include "foo.h"\n'
+                                         '\n'
+                                         '#include "bar.h"\n',
+                                         '')
+
+        os.path.isfile = self.os_path_isfile_orig
+
+    def test_public_primary_header(self):
+        # System header is not considered a primary header.
+        self.assert_language_rules_check('foo.cpp',
+                                         '#include "config.h"\n'
+                                         '#include <other/foo.h>\n'
+                                         '\n'
+                                         '#include "a.h"\n',
+                                         'Alphabetical sorting problem.  [build/include_order] [4]')
+
+        # ...except that it starts with public/.
+        self.assert_language_rules_check('foo.cpp',
+                                         '#include "config.h"\n'
+                                         '#include <public/foo.h>\n'
+                                         '\n'
+                                         '#include "a.h"\n',
+                                         '')
+
+        # Even if it starts with public/ its base part must match with the source file name.
+        self.assert_language_rules_check('foo.cpp',
+                                         '#include "config.h"\n'
+                                         '#include <public/foop.h>\n'
+                                         '\n'
+                                         '#include "a.h"\n',
+                                         'Alphabetical sorting problem.  [build/include_order] [4]')
+
+    def test_check_wtf_includes(self):
+        self.assert_language_rules_check('foo.cpp',
+                                         '#include "config.h"\n'
+                                         '#include "foo.h"\n'
+                                         '\n'
+                                         '#include <wtf/Assertions.h>\n',
+                                         '')
+        self.assert_language_rules_check('foo.cpp',
+                                         '#include "config.h"\n'
+                                         '#include "foo.h"\n'
+                                         '\n'
+                                         '#include "wtf/Assertions.h"\n',
+                                         'wtf includes should be <wtf/file.h> instead of "wtf/file.h".'
+                                         '  [build/include] [4]')
+
+    def test_check_cc_includes(self):
+        self.assert_language_rules_check('bar/chromium/foo.cpp',
+                                         '#include "config.h"\n'
+                                         '#include "foo.h"\n'
+                                         '\n'
+                                         '#include "cc/CCProxy.h"\n',
+                                         'cc includes should be "CCFoo.h" instead of "cc/CCFoo.h".'
+                                         '  [build/include] [4]')
+
+    def test_classify_include(self):
+        classify_include = cpp_style._classify_include
+        include_state = cpp_style._IncludeState()
+        self.assertEqual(cpp_style._CONFIG_HEADER,
+                         classify_include('foo/foo.cpp',
+                                          'config.h',
+                                          False, include_state))
+        self.assertEqual(cpp_style._PRIMARY_HEADER,
+                         classify_include('foo/internal/foo.cpp',
+                                          'foo/public/foo.h',
+                                          False, include_state))
+        self.assertEqual(cpp_style._PRIMARY_HEADER,
+                         classify_include('foo/internal/foo.cpp',
+                                          'foo/other/public/foo.h',
+                                          False, include_state))
+        self.assertEqual(cpp_style._OTHER_HEADER,
+                         classify_include('foo/internal/foo.cpp',
+                                          'foo/other/public/foop.h',
+                                          False, include_state))
+        self.assertEqual(cpp_style._OTHER_HEADER,
+                         classify_include('foo/foo.cpp',
+                                          'string',
+                                          True, include_state))
+        self.assertEqual(cpp_style._PRIMARY_HEADER,
+                         classify_include('fooCustom.cpp',
+                                          'foo.h',
+                                          False, include_state))
+        self.assertEqual(cpp_style._PRIMARY_HEADER,
+                         classify_include('PrefixFooCustom.cpp',
+                                          'Foo.h',
+                                          False, include_state))
+        self.assertEqual(cpp_style._MOC_HEADER,
+                         classify_include('foo.cpp',
+                                          'foo.moc',
+                                          False, include_state))
+        self.assertEqual(cpp_style._MOC_HEADER,
+                         classify_include('foo.cpp',
+                                          'moc_foo.cpp',
+                                          False, include_state))
+        # <public/foo.h> must be considered as primary even if is_system is True.
+        self.assertEqual(cpp_style._PRIMARY_HEADER,
+                         classify_include('foo/foo.cpp',
+                                          'public/foo.h',
+                                          True, include_state))
+        self.assertEqual(cpp_style._OTHER_HEADER,
+                         classify_include('foo.cpp',
+                                          'foo.h',
+                                          True, include_state))
+        self.assertEqual(cpp_style._OTHER_HEADER,
+                         classify_include('foo.cpp',
+                                          'public/foop.h',
+                                          True, include_state))
+        # Qt private APIs use _p.h suffix.
+        self.assertEqual(cpp_style._PRIMARY_HEADER,
+                         classify_include('foo.cpp',
+                                          'foo_p.h',
+                                          False, include_state))
+        # Tricky example where both includes might be classified as primary.
+        self.assert_language_rules_check('ScrollbarThemeWince.cpp',
+                                         '#include "config.h"\n'
+                                         '#include "ScrollbarThemeWince.h"\n'
+                                         '\n'
+                                         '#include "Scrollbar.h"\n',
+                                         '')
+        self.assert_language_rules_check('ScrollbarThemeWince.cpp',
+                                         '#include "config.h"\n'
+                                         '#include "Scrollbar.h"\n'
+                                         '\n'
+                                         '#include "ScrollbarThemeWince.h"\n',
+                                         'Found header this file implements after a header this file implements.'
+                                         ' Should be: config.h, primary header, blank line, and then alphabetically sorted.'
+                                         '  [build/include_order] [4]')
+        self.assert_language_rules_check('ResourceHandleWin.cpp',
+                                         '#include "config.h"\n'
+                                         '#include "ResourceHandle.h"\n'
+                                         '\n'
+                                         '#include "ResourceHandleWin.h"\n',
+                                         '')
+
+    def test_try_drop_common_suffixes(self):
+        self.assertEqual('foo/foo', cpp_style._drop_common_suffixes('foo/foo-inl.h'))
+        self.assertEqual('foo/bar/foo',
+                         cpp_style._drop_common_suffixes('foo/bar/foo_inl.h'))
+        self.assertEqual('foo/foo', cpp_style._drop_common_suffixes('foo/foo.cpp'))
+        self.assertEqual('foo/foo_unusualinternal',
+                         cpp_style._drop_common_suffixes('foo/foo_unusualinternal.h'))
+        self.assertEqual('',
+                         cpp_style._drop_common_suffixes('_test.cpp'))
+        self.assertEqual('test',
+                         cpp_style._drop_common_suffixes('test.cpp'))
+        self.assertEqual('test',
+                         cpp_style._drop_common_suffixes('test.cpp'))
+
+class CheckForFunctionLengthsTest(CppStyleTestBase):
+    def setUp(self):
+        # Reducing these thresholds for the tests speeds up tests significantly.
+        self.old_normal_trigger = cpp_style._FunctionState._NORMAL_TRIGGER
+        self.old_test_trigger = cpp_style._FunctionState._TEST_TRIGGER
+
+        cpp_style._FunctionState._NORMAL_TRIGGER = 10
+        cpp_style._FunctionState._TEST_TRIGGER = 25
+
+    def tearDown(self):
+        cpp_style._FunctionState._NORMAL_TRIGGER = self.old_normal_trigger
+        cpp_style._FunctionState._TEST_TRIGGER = self.old_test_trigger
+
+    # FIXME: Eliminate the need for this function.
+    def set_min_confidence(self, min_confidence):
+        """Set new test confidence and return old test confidence."""
+        old_min_confidence = self.min_confidence
+        self.min_confidence = min_confidence
+        return old_min_confidence
+
+    def assert_function_lengths_check(self, code, expected_message):
+        """Check warnings for long function bodies are as expected.
+
+        Args:
+          code: C++ source code expected to generate a warning message.
+          expected_message: Message expected to be generated by the C++ code.
+        """
+        self.assertEquals(expected_message,
+                          self.perform_function_lengths_check(code))
+
+    def trigger_lines(self, error_level):
+        """Return number of lines needed to trigger a function length warning.
+
+        Args:
+          error_level: --v setting for cpp_style.
+
+        Returns:
+          Number of lines needed to trigger a function length warning.
+        """
+        return cpp_style._FunctionState._NORMAL_TRIGGER * 2 ** error_level
+
+    def trigger_test_lines(self, error_level):
+        """Return number of lines needed to trigger a test function length warning.
+
+        Args:
+          error_level: --v setting for cpp_style.
+
+        Returns:
+          Number of lines needed to trigger a test function length warning.
+        """
+        return cpp_style._FunctionState._TEST_TRIGGER * 2 ** error_level
+
+    def assert_function_length_check_definition(self, lines, error_level):
+        """Generate long function definition and check warnings are as expected.
+
+        Args:
+          lines: Number of lines to generate.
+          error_level:  --v setting for cpp_style.
+        """
+        trigger_level = self.trigger_lines(self.min_confidence)
+        self.assert_function_lengths_check(
+            'void test(int x)' + self.function_body(lines),
+            ('Small and focused functions are preferred: '
+             'test() has %d non-comment lines '
+             '(error triggered by exceeding %d lines).'
+             '  [readability/fn_size] [%d]'
+             % (lines, trigger_level, error_level)))
+
+    def assert_function_length_check_definition_ok(self, lines):
+        """Generate shorter function definition and check no warning is produced.
+
+        Args:
+          lines: Number of lines to generate.
+        """
+        self.assert_function_lengths_check(
+            'void test(int x)' + self.function_body(lines),
+            '')
+
+    def assert_function_length_check_at_error_level(self, error_level):
+        """Generate and check function at the trigger level for --v setting.
+
+        Args:
+          error_level: --v setting for cpp_style.
+        """
+        self.assert_function_length_check_definition(self.trigger_lines(error_level),
+                                                     error_level)
+
+    def assert_function_length_check_below_error_level(self, error_level):
+        """Generate and check function just below the trigger level for --v setting.
+
+        Args:
+          error_level: --v setting for cpp_style.
+        """
+        self.assert_function_length_check_definition(self.trigger_lines(error_level) - 1,
+                                                     error_level - 1)
+
+    def assert_function_length_check_above_error_level(self, error_level):
+        """Generate and check function just above the trigger level for --v setting.
+
+        Args:
+          error_level: --v setting for cpp_style.
+        """
+        self.assert_function_length_check_definition(self.trigger_lines(error_level) + 1,
+                                                     error_level)
+
+    def function_body(self, number_of_lines):
+        return ' {\n' + '    this_is_just_a_test();\n' * number_of_lines + '}'
+
+    def function_body_with_blank_lines(self, number_of_lines):
+        return ' {\n' + '    this_is_just_a_test();\n\n' * number_of_lines + '}'
+
+    def function_body_with_no_lints(self, number_of_lines):
+        return ' {\n' + '    this_is_just_a_test();  // NOLINT\n' * number_of_lines + '}'
+
+    # Test line length checks.
+    def test_function_length_check_declaration(self):
+        self.assert_function_lengths_check(
+            'void test();',  # Not a function definition
+            '')
+
+    def test_function_length_check_declaration_with_block_following(self):
+        self.assert_function_lengths_check(
+            ('void test();\n'
+             + self.function_body(66)),  # Not a function definition
+            '')
+
+    def test_function_length_check_class_definition(self):
+        self.assert_function_lengths_check(  # Not a function definition
+            'class Test' + self.function_body(66) + ';',
+            '')
+
+    def test_function_length_check_trivial(self):
+        self.assert_function_lengths_check(
+            'void test() {}',  # Not counted
+            '')
+
+    def test_function_length_check_empty(self):
+        self.assert_function_lengths_check(
+            'void test() {\n}',
+            '')
+
+    def test_function_length_check_definition_below_severity0(self):
+        old_min_confidence = self.set_min_confidence(0)
+        self.assert_function_length_check_definition_ok(self.trigger_lines(0) - 1)
+        self.set_min_confidence(old_min_confidence)
+
+    def test_function_length_check_definition_at_severity0(self):
+        old_min_confidence = self.set_min_confidence(0)
+        self.assert_function_length_check_definition_ok(self.trigger_lines(0))
+        self.set_min_confidence(old_min_confidence)
+
+    def test_function_length_check_definition_above_severity0(self):
+        old_min_confidence = self.set_min_confidence(0)
+        self.assert_function_length_check_above_error_level(0)
+        self.set_min_confidence(old_min_confidence)
+
+    def test_function_length_check_definition_below_severity1v0(self):
+        old_min_confidence = self.set_min_confidence(0)
+        self.assert_function_length_check_below_error_level(1)
+        self.set_min_confidence(old_min_confidence)
+
+    def test_function_length_check_definition_at_severity1v0(self):
+        old_min_confidence = self.set_min_confidence(0)
+        self.assert_function_length_check_at_error_level(1)
+        self.set_min_confidence(old_min_confidence)
+
+    def test_function_length_check_definition_below_severity1(self):
+        self.assert_function_length_check_definition_ok(self.trigger_lines(1) - 1)
+
+    def test_function_length_check_definition_at_severity1(self):
+        self.assert_function_length_check_definition_ok(self.trigger_lines(1))
+
+    def test_function_length_check_definition_above_severity1(self):
+        self.assert_function_length_check_above_error_level(1)
+
+    def test_function_length_check_definition_severity1_plus_indented(self):
+        error_level = 1
+        error_lines = self.trigger_lines(error_level) + 1
+        trigger_level = self.trigger_lines(self.min_confidence)
+        indent_spaces = '    '
+        self.assert_function_lengths_check(
+            re.sub(r'(?m)^(.)', indent_spaces + r'\1',
+                   'void test_indent(int x)\n' + self.function_body(error_lines)),
+            ('Small and focused functions are preferred: '
+             'test_indent() has %d non-comment lines '
+             '(error triggered by exceeding %d lines).'
+             '  [readability/fn_size] [%d]')
+            % (error_lines, trigger_level, error_level))
+
+    def test_function_length_check_definition_severity1_plus_blanks(self):
+        error_level = 1
+        error_lines = self.trigger_lines(error_level) + 1
+        trigger_level = self.trigger_lines(self.min_confidence)
+        self.assert_function_lengths_check(
+            'void test_blanks(int x)' + self.function_body(error_lines),
+            ('Small and focused functions are preferred: '
+             'test_blanks() has %d non-comment lines '
+             '(error triggered by exceeding %d lines).'
+             '  [readability/fn_size] [%d]')
+            % (error_lines, trigger_level, error_level))
+
+    def test_function_length_check_complex_definition_severity1(self):
+        error_level = 1
+        error_lines = self.trigger_lines(error_level) + 1
+        trigger_level = self.trigger_lines(self.min_confidence)
+        self.assert_function_lengths_check(
+            ('my_namespace::my_other_namespace::MyVeryLongTypeName<Type1, bool func(const Element*)>*\n'
+             'my_namespace::my_other_namespace<Type3, Type4>::~MyFunction<Type5<Type6, Type7> >(int arg1, char* arg2)'
+             + self.function_body(error_lines)),
+            ('Small and focused functions are preferred: '
+             'my_namespace::my_other_namespace<Type3, Type4>::~MyFunction<Type5<Type6, Type7> >()'
+             ' has %d non-comment lines '
+             '(error triggered by exceeding %d lines).'
+             '  [readability/fn_size] [%d]')
+            % (error_lines, trigger_level, error_level))
+
+    def test_function_length_check_definition_severity1_for_test(self):
+        error_level = 1
+        error_lines = self.trigger_test_lines(error_level) + 1
+        trigger_level = self.trigger_test_lines(self.min_confidence)
+        self.assert_function_lengths_check(
+            'TEST_F(Test, Mutator)' + self.function_body(error_lines),
+            ('Small and focused functions are preferred: '
+             'TEST_F(Test, Mutator) has %d non-comment lines '
+             '(error triggered by exceeding %d lines).'
+             '  [readability/fn_size] [%d]')
+            % (error_lines, trigger_level, error_level))
+
+    def test_function_length_check_definition_severity1_for_split_line_test(self):
+        error_level = 1
+        error_lines = self.trigger_test_lines(error_level) + 1
+        trigger_level = self.trigger_test_lines(self.min_confidence)
+        self.assert_function_lengths_check(
+            ('TEST_F(GoogleUpdateRecoveryRegistryProtectedTest,\n'
+             '    FixGoogleUpdate_AllValues_MachineApp)'  # note: 4 spaces
+             + self.function_body(error_lines)),
+            ('Small and focused functions are preferred: '
+             'TEST_F(GoogleUpdateRecoveryRegistryProtectedTest, '  # 1 space
+             'FixGoogleUpdate_AllValues_MachineApp) has %d non-comment lines '
+             '(error triggered by exceeding %d lines).'
+             '  [readability/fn_size] [%d]')
+            % (error_lines, trigger_level, error_level))
+
+    def test_function_length_check_definition_severity1_for_bad_test_doesnt_break(self):
+        error_level = 1
+        error_lines = self.trigger_test_lines(error_level) + 1
+        trigger_level = self.trigger_test_lines(self.min_confidence)
+        # Since the function name isn't valid, the function detection algorithm
+        # will skip it, so no error is produced.
+        self.assert_function_lengths_check(
+            ('TEST_F('
+             + self.function_body(error_lines)),
+            '')
+
+    def test_function_length_check_definition_severity1_with_embedded_no_lints(self):
+        error_level = 1
+        error_lines = self.trigger_lines(error_level) + 1
+        trigger_level = self.trigger_lines(self.min_confidence)
+        self.assert_function_lengths_check(
+            'void test(int x)' + self.function_body_with_no_lints(error_lines),
+            ('Small and focused functions are preferred: '
+             'test() has %d non-comment lines '
+             '(error triggered by exceeding %d lines).'
+             '  [readability/fn_size] [%d]')
+            % (error_lines, trigger_level, error_level))
+
+    def test_function_length_check_definition_severity1_with_no_lint(self):
+        self.assert_function_lengths_check(
+            ('void test(int x)' + self.function_body(self.trigger_lines(1))
+             + '  // NOLINT -- long function'),
+            '')
+
+    def test_function_length_check_definition_below_severity2(self):
+        self.assert_function_length_check_below_error_level(2)
+
+    def test_function_length_check_definition_severity2(self):
+        self.assert_function_length_check_at_error_level(2)
+
+    def test_function_length_check_definition_above_severity2(self):
+        self.assert_function_length_check_above_error_level(2)
+
+    def test_function_length_check_definition_below_severity3(self):
+        self.assert_function_length_check_below_error_level(3)
+
+    def test_function_length_check_definition_severity3(self):
+        self.assert_function_length_check_at_error_level(3)
+
+    def test_function_length_check_definition_above_severity3(self):
+        self.assert_function_length_check_above_error_level(3)
+
+    def test_function_length_check_definition_below_severity4(self):
+        self.assert_function_length_check_below_error_level(4)
+
+    def test_function_length_check_definition_severity4(self):
+        self.assert_function_length_check_at_error_level(4)
+
+    def test_function_length_check_definition_above_severity4(self):
+        self.assert_function_length_check_above_error_level(4)
+
+    def test_function_length_check_definition_below_severity5(self):
+        self.assert_function_length_check_below_error_level(5)
+
+    def test_function_length_check_definition_at_severity5(self):
+        self.assert_function_length_check_at_error_level(5)
+
+    def test_function_length_check_definition_above_severity5(self):
+        self.assert_function_length_check_above_error_level(5)
+
+    def test_function_length_check_definition_huge_lines(self):
+        # 5 is the limit
+        self.assert_function_length_check_definition(self.trigger_lines(6), 5)
+
+    def test_function_length_not_determinable(self):
+        # Macro invocation without terminating semicolon.
+        self.assert_function_lengths_check(
+            'MACRO(arg)',
+            '')
+
+        # Macro with underscores
+        self.assert_function_lengths_check(
+            'MACRO_WITH_UNDERSCORES(arg1, arg2, arg3)',
+            '')
+
+        self.assert_function_lengths_check(
+            'NonMacro(arg)',
+            'Lint failed to find start of function body.'
+            '  [readability/fn_size] [5]')
+
+
+class NoNonVirtualDestructorsTest(CppStyleTestBase):
+
+    def test_no_error(self):
+        self.assert_multi_line_lint(
+            '''\
+                class Foo {
+                    virtual ~Foo();
+                    virtual void foo();
+                };''',
+            '')
+
+        self.assert_multi_line_lint(
+            '''\
+                class Foo {
+                    virtual inline ~Foo();
+                    virtual void foo();
+                };''',
+            '')
+
+        self.assert_multi_line_lint(
+            '''\
+                class Foo {
+                    inline virtual ~Foo();
+                    virtual void foo();
+                };''',
+            '')
+
+        self.assert_multi_line_lint(
+            '''\
+                class Foo::Goo {
+                    virtual ~Goo();
+                    virtual void goo();
+                };''',
+            '')
+        self.assert_multi_line_lint(
+            'class Foo { void foo(); };',
+            'More than one command on the same line  [whitespace/newline] [4]')
+        self.assert_multi_line_lint(
+            'class MyClass {\n'
+            '    int getIntValue() { ASSERT(m_ptr); return *m_ptr; }\n'
+            '};\n',
+            '')
+        self.assert_multi_line_lint(
+            'class MyClass {\n'
+            '    int getIntValue()\n'
+            '    {\n'
+            '        ASSERT(m_ptr); return *m_ptr;\n'
+            '    }\n'
+            '};\n',
+            'More than one command on the same line  [whitespace/newline] [4]')
+
+        self.assert_multi_line_lint(
+            '''\
+                class Qualified::Goo : public Foo {
+                    virtual void goo();
+                };''',
+            '')
+
+    def test_no_destructor_when_virtual_needed(self):
+        self.assert_multi_line_lint_re(
+            '''\
+                class Foo {
+                    virtual void foo();
+                };''',
+            'The class Foo probably needs a virtual destructor')
+
+    def test_destructor_non_virtual_when_virtual_needed(self):
+        self.assert_multi_line_lint_re(
+            '''\
+                class Foo {
+                    ~Foo();
+                    virtual void foo();
+                };''',
+            'The class Foo probably needs a virtual destructor')
+
+    def test_no_warn_when_derived(self):
+        self.assert_multi_line_lint(
+            '''\
+                class Foo : public Goo {
+                    virtual void foo();
+                };''',
+            '')
+
+    def test_internal_braces(self):
+        self.assert_multi_line_lint_re(
+            '''\
+                class Foo {
+                    enum Goo {
+                        GOO
+                    };
+                    virtual void foo();
+                };''',
+            'The class Foo probably needs a virtual destructor')
+
+    def test_inner_class_needs_virtual_destructor(self):
+        self.assert_multi_line_lint_re(
+            '''\
+                class Foo {
+                    class Goo {
+                        virtual void goo();
+                    };
+                };''',
+            'The class Goo probably needs a virtual destructor')
+
+    def test_outer_class_needs_virtual_destructor(self):
+        self.assert_multi_line_lint_re(
+            '''\
+                class Foo {
+                    class Goo {
+                    };
+                    virtual void foo();
+                };''',
+            'The class Foo probably needs a virtual destructor')
+
+    def test_qualified_class_needs_virtual_destructor(self):
+        self.assert_multi_line_lint_re(
+            '''\
+                class Qualified::Foo {
+                    virtual void foo();
+                };''',
+            'The class Qualified::Foo probably needs a virtual destructor')
+
+    def test_multi_line_declaration_no_error(self):
+        self.assert_multi_line_lint_re(
+            '''\
+                class Foo
+                    : public Goo {
+                    virtual void foo();
+                };''',
+            '')
+
+    def test_multi_line_declaration_with_error(self):
+        self.assert_multi_line_lint(
+            '''\
+                class Foo
+                {
+                    virtual void foo();
+                };''',
+            ['This { should be at the end of the previous line  '
+             '[whitespace/braces] [4]',
+             'The class Foo probably needs a virtual destructor due to having '
+             'virtual method(s), one declared at line 3.  [runtime/virtual] [4]'])
+
+
+class PassPtrTest(CppStyleTestBase):
+    # For http://webkit.org/coding/RefPtr.html
+
+    def assert_pass_ptr_check(self, code, expected_message):
+        """Check warnings for Pass*Ptr are as expected.
+
+        Args:
+          code: C++ source code expected to generate a warning message.
+          expected_message: Message expected to be generated by the C++ code.
+        """
+        self.assertEquals(expected_message,
+                          self.perform_pass_ptr_check(code))
+
+    def test_pass_ref_ptr_in_function(self):
+        self.assert_pass_ptr_check(
+            'int myFunction()\n'
+            '{\n'
+            '    PassRefPtr<Type1> variable = variable2;\n'
+            '}',
+            'Local variables should never be PassRefPtr (see '
+            'http://webkit.org/coding/RefPtr.html).  [readability/pass_ptr] [5]')
+
+    def test_pass_own_ptr_in_function(self):
+        self.assert_pass_ptr_check(
+            'int myFunction()\n'
+            '{\n'
+            '    PassOwnPtr<Type1> variable = variable2;\n'
+            '}',
+            'Local variables should never be PassOwnPtr (see '
+            'http://webkit.org/coding/RefPtr.html).  [readability/pass_ptr] [5]')
+
+    def test_pass_other_type_ptr_in_function(self):
+        self.assert_pass_ptr_check(
+            'int myFunction()\n'
+            '{\n'
+            '    PassOtherTypePtr<Type1> variable;\n'
+            '}',
+            'Local variables should never be PassOtherTypePtr (see '
+            'http://webkit.org/coding/RefPtr.html).  [readability/pass_ptr] [5]')
+
+    def test_pass_ref_ptr_return_value(self):
+        self.assert_pass_ptr_check(
+            'PassRefPtr<Type1>\n'
+            'myFunction(int)\n'
+            '{\n'
+            '}',
+            '')
+        self.assert_pass_ptr_check(
+            'PassRefPtr<Type1> myFunction(int)\n'
+            '{\n'
+            '}',
+            '')
+        self.assert_pass_ptr_check(
+            'PassRefPtr<Type1> myFunction();\n',
+            '')
+        self.assert_pass_ptr_check(
+            'OwnRefPtr<Type1> myFunction();\n',
+            '')
+        self.assert_pass_ptr_check(
+            'RefPtr<Type1> myFunction(int)\n'
+            '{\n'
+            '}',
+            'The return type should use PassRefPtr instead of RefPtr.  [readability/pass_ptr] [5]')
+        self.assert_pass_ptr_check(
+            'OwnPtr<Type1> myFunction(int)\n'
+            '{\n'
+            '}',
+            'The return type should use PassOwnPtr instead of OwnPtr.  [readability/pass_ptr] [5]')
+
+    def test_ref_ptr_parameter_value(self):
+        self.assert_pass_ptr_check(
+            'int myFunction(PassRefPtr<Type1>)\n'
+            '{\n'
+            '}',
+            '')
+        self.assert_pass_ptr_check(
+            'int myFunction(RefPtr<Type1>)\n'
+            '{\n'
+            '}',
+            'The parameter type should use PassRefPtr instead of RefPtr.  [readability/pass_ptr] [5]')
+        self.assert_pass_ptr_check(
+            'int myFunction(RefPtr<Type1>&)\n'
+            '{\n'
+            '}',
+            '')
+        self.assert_pass_ptr_check(
+            'int myFunction(RefPtr<Type1>*)\n'
+            '{\n'
+            '}',
+            '')
+
+    def test_own_ptr_parameter_value(self):
+        self.assert_pass_ptr_check(
+            'int myFunction(PassOwnPtr<Type1>)\n'
+            '{\n'
+            '}',
+            '')
+        self.assert_pass_ptr_check(
+            'int myFunction(OwnPtr<Type1>)\n'
+            '{\n'
+            '}',
+            'The parameter type should use PassOwnPtr instead of OwnPtr.  [readability/pass_ptr] [5]')
+        self.assert_pass_ptr_check(
+            'int myFunction(OwnPtr<Type1>& simple)\n'
+            '{\n'
+            '}',
+            '')
+
+    def test_ref_ptr_member_variable(self):
+        self.assert_pass_ptr_check(
+            'class Foo {'
+            '    RefPtr<Type1> m_other;\n'
+            '};\n',
+            '')
+
+
+class LeakyPatternTest(CppStyleTestBase):
+
+    def assert_leaky_pattern_check(self, code, expected_message):
+        """Check warnings for leaky patterns are as expected.
+
+        Args:
+          code: C++ source code expected to generate a warning message.
+          expected_message: Message expected to be generated by the C++ code.
+        """
+        self.assertEquals(expected_message,
+                          self.perform_leaky_pattern_check(code))
+
+    def test_get_dc(self):
+        self.assert_leaky_pattern_check(
+            'HDC hdc = GetDC(hwnd);',
+            'Use the class HWndDC instead of calling GetDC to avoid potential '
+            'memory leaks.  [runtime/leaky_pattern] [5]')
+
+    def test_get_dc(self):
+        self.assert_leaky_pattern_check(
+            'HDC hdc = GetDCEx(hwnd, 0, 0);',
+            'Use the class HWndDC instead of calling GetDCEx to avoid potential '
+            'memory leaks.  [runtime/leaky_pattern] [5]')
+
+    def test_own_get_dc(self):
+        self.assert_leaky_pattern_check(
+            'HWndDC hdc(hwnd);',
+            '')
+
+    def test_create_dc(self):
+        self.assert_leaky_pattern_check(
+            'HDC dc2 = ::CreateDC();',
+            'Use adoptPtr and OwnPtr<HDC> when calling CreateDC to avoid potential '
+            'memory leaks.  [runtime/leaky_pattern] [5]')
+
+        self.assert_leaky_pattern_check(
+            'adoptPtr(CreateDC());',
+            '')
+
+    def test_create_compatible_dc(self):
+        self.assert_leaky_pattern_check(
+            'HDC dc2 = CreateCompatibleDC(dc);',
+            'Use adoptPtr and OwnPtr<HDC> when calling CreateCompatibleDC to avoid potential '
+            'memory leaks.  [runtime/leaky_pattern] [5]')
+        self.assert_leaky_pattern_check(
+            'adoptPtr(CreateCompatibleDC(dc));',
+            '')
+
+
+class WebKitStyleTest(CppStyleTestBase):
+
+    # for http://webkit.org/coding/coding-style.html
+    def test_indentation(self):
+        # 1. Use spaces, not tabs. Tabs should only appear in files that
+        #    require them for semantic meaning, like Makefiles.
+        self.assert_multi_line_lint(
+            'class Foo {\n'
+            '    int goo;\n'
+            '};',
+            '')
+        self.assert_multi_line_lint(
+            'class Foo {\n'
+            '\tint goo;\n'
+            '};',
+            'Tab found; better to use spaces  [whitespace/tab] [1]')
+
+        # 2. The indent size is 4 spaces.
+        self.assert_multi_line_lint(
+            'class Foo {\n'
+            '    int goo;\n'
+            '};',
+            '')
+        self.assert_multi_line_lint(
+            'class Foo {\n'
+            '   int goo;\n'
+            '};',
+            'Weird number of spaces at line-start.  Are you using a 4-space indent?  [whitespace/indent] [3]')
+
+        # 3. In a header, code inside a namespace should not be indented.
+        self.assert_multi_line_lint(
+            'namespace WebCore {\n\n'
+            'class Document {\n'
+            '    int myVariable;\n'
+            '};\n'
+            '}',
+            '',
+            'foo.h')
+        self.assert_multi_line_lint(
+            'namespace OuterNamespace {\n'
+            '    namespace InnerNamespace {\n'
+            '    class Document {\n'
+            '};\n'
+            '};\n'
+            '}',
+            'Code inside a namespace should not be indented.  [whitespace/indent] [4]',
+            'foo.h')
+        self.assert_multi_line_lint(
+            'namespace OuterNamespace {\n'
+            '    class Document {\n'
+            '    namespace InnerNamespace {\n'
+            '};\n'
+            '};\n'
+            '}',
+            'Code inside a namespace should not be indented.  [whitespace/indent] [4]',
+            'foo.h')
+        self.assert_multi_line_lint(
+            'namespace WebCore {\n'
+            '#if 0\n'
+            '    class Document {\n'
+            '};\n'
+            '#endif\n'
+            '}',
+            'Code inside a namespace should not be indented.  [whitespace/indent] [4]',
+            'foo.h')
+        self.assert_multi_line_lint(
+            'namespace WebCore {\n'
+            'class Document {\n'
+            '};\n'
+            '}',
+            '',
+            'foo.h')
+
+        # 4. In an implementation file (files with the extension .cpp, .c
+        #    or .mm), code inside a namespace should not be indented.
+        self.assert_multi_line_lint(
+            'namespace WebCore {\n\n'
+            'Document::Foo()\n'
+            '    : foo(bar)\n'
+            '    , boo(far)\n'
+            '{\n'
+            '    stuff();\n'
+            '}',
+            '',
+            'foo.cpp')
+        self.assert_multi_line_lint(
+            'namespace OuterNamespace {\n'
+            'namespace InnerNamespace {\n'
+            'Document::Foo() { }\n'
+            '    void* p;\n'
+            '}\n'
+            '}\n',
+            'Code inside a namespace should not be indented.  [whitespace/indent] [4]',
+            'foo.cpp')
+        self.assert_multi_line_lint(
+            'namespace OuterNamespace {\n'
+            'namespace InnerNamespace {\n'
+            'Document::Foo() { }\n'
+            '}\n'
+            '    void* p;\n'
+            '}\n',
+            'Code inside a namespace should not be indented.  [whitespace/indent] [4]',
+            'foo.cpp')
+        self.assert_multi_line_lint(
+            'namespace WebCore {\n\n'
+            '    const char* foo = "start:;"\n'
+            '        "dfsfsfs";\n'
+            '}\n',
+            'Code inside a namespace should not be indented.  [whitespace/indent] [4]',
+            'foo.cpp')
+        self.assert_multi_line_lint(
+            'namespace WebCore {\n\n'
+            'const char* foo(void* a = ";", // ;\n'
+            '    void* b);\n'
+            '    void* p;\n'
+            '}\n',
+            'Code inside a namespace should not be indented.  [whitespace/indent] [4]',
+            'foo.cpp')
+        self.assert_multi_line_lint(
+            'namespace WebCore {\n\n'
+            'const char* foo[] = {\n'
+            '    "void* b);", // ;\n'
+            '    "asfdf",\n'
+            '    }\n'
+            '    void* p;\n'
+            '}\n',
+            'Code inside a namespace should not be indented.  [whitespace/indent] [4]',
+            'foo.cpp')
+        self.assert_multi_line_lint(
+            'namespace WebCore {\n\n'
+            'const char* foo[] = {\n'
+            '    "void* b);", // }\n'
+            '    "asfdf",\n'
+            '    }\n'
+            '}\n',
+            '',
+            'foo.cpp')
+        self.assert_multi_line_lint(
+            '    namespace WebCore {\n\n'
+            '    void Document::Foo()\n'
+            '    {\n'
+            'start: // infinite loops are fun!\n'
+            '        goto start;\n'
+            '    }',
+            'namespace should never be indented.  [whitespace/indent] [4]',
+            'foo.cpp')
+        self.assert_multi_line_lint(
+            'namespace WebCore {\n'
+            '    Document::Foo() { }\n'
+            '}',
+            'Code inside a namespace should not be indented.'
+            '  [whitespace/indent] [4]',
+            'foo.cpp')
+        self.assert_multi_line_lint(
+            'namespace WebCore {\n'
+            '#define abc(x) x; \\\n'
+            '    x\n'
+            '}',
+            '',
+            'foo.cpp')
+        self.assert_multi_line_lint(
+            'namespace WebCore {\n'
+            '#define abc(x) x; \\\n'
+            '    x\n'
+            '    void* x;'
+            '}',
+            'Code inside a namespace should not be indented.  [whitespace/indent] [4]',
+            'foo.cpp')
+
+        # 5. A case label should line up with its switch statement. The
+        #    case statement is indented.
+        self.assert_multi_line_lint(
+            '    switch (condition) {\n'
+            '    case fooCondition:\n'
+            '    case barCondition:\n'
+            '        i++;\n'
+            '        break;\n'
+            '    default:\n'
+            '        i--;\n'
+            '    }\n',
+            '')
+        self.assert_multi_line_lint(
+            '    switch (condition) {\n'
+            '    case fooCondition:\n'
+            '        switch (otherCondition) {\n'
+            '        default:\n'
+            '            return;\n'
+            '        }\n'
+            '    default:\n'
+            '        i--;\n'
+            '    }\n',
+            '')
+        self.assert_multi_line_lint(
+            '    switch (condition) {\n'
+            '    case fooCondition: break;\n'
+            '    default: return;\n'
+            '    }\n',
+            '')
+        self.assert_multi_line_lint(
+            '    switch (condition) {\n'
+            '        case fooCondition:\n'
+            '        case barCondition:\n'
+            '            i++;\n'
+            '            break;\n'
+            '        default:\n'
+            '            i--;\n'
+            '    }\n',
+            'A case label should not be indented, but line up with its switch statement.'
+            '  [whitespace/indent] [4]')
+        self.assert_multi_line_lint(
+            '    switch (condition) {\n'
+            '        case fooCondition:\n'
+            '            break;\n'
+            '    default:\n'
+            '            i--;\n'
+            '    }\n',
+            'A case label should not be indented, but line up with its switch statement.'
+            '  [whitespace/indent] [4]')
+        self.assert_multi_line_lint(
+            '    switch (condition) {\n'
+            '    case fooCondition:\n'
+            '    case barCondition:\n'
+            '        switch (otherCondition) {\n'
+            '            default:\n'
+            '            return;\n'
+            '        }\n'
+            '    default:\n'
+            '        i--;\n'
+            '    }\n',
+            'A case label should not be indented, but line up with its switch statement.'
+            '  [whitespace/indent] [4]')
+        self.assert_multi_line_lint(
+            '    switch (condition) {\n'
+            '    case fooCondition:\n'
+            '    case barCondition:\n'
+            '    i++;\n'
+            '    break;\n\n'
+            '    default:\n'
+            '    i--;\n'
+            '    }\n',
+            'Non-label code inside switch statements should be indented.'
+            '  [whitespace/indent] [4]')
+        self.assert_multi_line_lint(
+            '    switch (condition) {\n'
+            '    case fooCondition:\n'
+            '    case barCondition:\n'
+            '        switch (otherCondition) {\n'
+            '        default:\n'
+            '        return;\n'
+            '        }\n'
+            '    default:\n'
+            '        i--;\n'
+            '    }\n',
+            'Non-label code inside switch statements should be indented.'
+            '  [whitespace/indent] [4]')
+
+        # 6. Boolean expressions at the same nesting level that span
+        #   multiple lines should have their operators on the left side of
+        #   the line instead of the right side.
+        self.assert_multi_line_lint(
+            '    return attr->name() == srcAttr\n'
+            '        || attr->name() == lowsrcAttr;\n',
+            '')
+        self.assert_multi_line_lint(
+            '    return attr->name() == srcAttr ||\n'
+            '        attr->name() == lowsrcAttr;\n',
+            'Boolean expressions that span multiple lines should have their '
+            'operators on the left side of the line instead of the right side.'
+            '  [whitespace/operators] [4]')
+
+    def test_spacing(self):
+        # 1. Do not place spaces around unary operators.
+        self.assert_multi_line_lint(
+            'i++;',
+            '')
+        self.assert_multi_line_lint(
+            'i ++;',
+            'Extra space for operator  ++;  [whitespace/operators] [4]')
+
+        # 2. Do place spaces around binary and ternary operators.
+        self.assert_multi_line_lint(
+            'y = m * x + b;',
+            '')
+        self.assert_multi_line_lint(
+            'f(a, b);',
+            '')
+        self.assert_multi_line_lint(
+            'c = a | b;',
+            '')
+        self.assert_multi_line_lint(
+            'return condition ? 1 : 0;',
+            '')
+        self.assert_multi_line_lint(
+            'y=m*x+b;',
+            'Missing spaces around =  [whitespace/operators] [4]')
+        self.assert_multi_line_lint(
+            'f(a,b);',
+            'Missing space after ,  [whitespace/comma] [3]')
+        self.assert_multi_line_lint(
+            'c = a|b;',
+            'Missing spaces around |  [whitespace/operators] [3]')
+        # FIXME: We cannot catch this lint error.
+        # self.assert_multi_line_lint(
+        #     'return condition ? 1:0;',
+        #     '')
+
+        # 3. Place spaces between control statements and their parentheses.
+        self.assert_multi_line_lint(
+            '    if (condition)\n'
+            '        doIt();\n',
+            '')
+        self.assert_multi_line_lint(
+            '    if(condition)\n'
+            '        doIt();\n',
+            'Missing space before ( in if(  [whitespace/parens] [5]')
+
+        # 4. Do not place spaces between a function and its parentheses,
+        #    or between a parenthesis and its content.
+        self.assert_multi_line_lint(
+            'f(a, b);',
+            '')
+        self.assert_multi_line_lint(
+            'f (a, b);',
+            'Extra space before ( in function call  [whitespace/parens] [4]')
+        self.assert_multi_line_lint(
+            'f( a, b );',
+            ['Extra space after ( in function call  [whitespace/parens] [4]',
+             'Extra space before )  [whitespace/parens] [2]'])
+
+    def test_line_breaking(self):
+        # 1. Each statement should get its own line.
+        self.assert_multi_line_lint(
+            '    x++;\n'
+            '    y++;\n'
+            '    if (condition);\n'
+            '        doIt();\n',
+            '')
+        self.assert_multi_line_lint(
+            '    if (condition) \\\n'
+            '        doIt();\n',
+            '')
+        self.assert_multi_line_lint(
+            '    x++; y++;',
+            'More than one command on the same line  [whitespace/newline] [4]')
+        self.assert_multi_line_lint(
+            '    if (condition) doIt();\n',
+            'More than one command on the same line in if  [whitespace/parens] [4]')
+        # Ensure that having a # in the line doesn't hide the error.
+        self.assert_multi_line_lint(
+            '    x++; char a[] = "#";',
+            'More than one command on the same line  [whitespace/newline] [4]')
+        # Ignore preprocessor if's.
+        self.assert_multi_line_lint(
+            '#if (condition) || (condition2)\n',
+            '')
+
+        # 2. An else statement should go on the same line as a preceding
+        #   close brace if one is present, else it should line up with the
+        #   if statement.
+        self.assert_multi_line_lint(
+            'if (condition) {\n'
+            '    doSomething();\n'
+            '    doSomethingAgain();\n'
+            '} else {\n'
+            '    doSomethingElse();\n'
+            '    doSomethingElseAgain();\n'
+            '}\n',
+            '')
+        self.assert_multi_line_lint(
+            'if (condition)\n'
+            '    doSomething();\n'
+            'else\n'
+            '    doSomethingElse();\n',
+            '')
+        self.assert_multi_line_lint(
+            'if (condition)\n'
+            '    doSomething();\n'
+            'else {\n'
+            '    doSomethingElse();\n'
+            '    doSomethingElseAgain();\n'
+            '}\n',
+            '')
+        self.assert_multi_line_lint(
+            '#define TEST_ASSERT(expression) do { if (!(expression)) { TestsController::shared().testFailed(__FILE__, __LINE__, #expression); return; } } while (0)\n',
+            '')
+        self.assert_multi_line_lint(
+            '#define TEST_ASSERT(expression) do { if ( !(expression)) { TestsController::shared().testFailed(__FILE__, __LINE__, #expression); return; } } while (0)\n',
+            'Extra space after ( in if  [whitespace/parens] [5]')
+        # FIXME: currently we only check first conditional, so we cannot detect errors in next ones.
+        # self.assert_multi_line_lint(
+        #     '#define TEST_ASSERT(expression) do { if (!(expression)) { TestsController::shared().testFailed(__FILE__, __LINE__, #expression); return; } } while (0 )\n',
+        #     'Mismatching spaces inside () in if  [whitespace/parens] [5]')
+        self.assert_multi_line_lint(
+            'WTF_MAKE_NONCOPYABLE(ClassName); WTF_MAKE_FAST_ALLOCATED;\n',
+            '')
+        self.assert_multi_line_lint(
+            'if (condition) {\n'
+            '    doSomething();\n'
+            '    doSomethingAgain();\n'
+            '}\n'
+            'else {\n'
+            '    doSomethingElse();\n'
+            '    doSomethingElseAgain();\n'
+            '}\n',
+            'An else should appear on the same line as the preceding }  [whitespace/newline] [4]')
+        self.assert_multi_line_lint(
+            'if (condition) doSomething(); else doSomethingElse();\n',
+            ['More than one command on the same line  [whitespace/newline] [4]',
+             'Else clause should never be on same line as else (use 2 lines)  [whitespace/newline] [4]',
+             'More than one command on the same line in if  [whitespace/parens] [4]'])
+        self.assert_multi_line_lint(
+            'if (condition) doSomething(); else {\n'
+            '    doSomethingElse();\n'
+            '}\n',
+            ['More than one command on the same line in if  [whitespace/parens] [4]',
+             'One line control clauses should not use braces.  [whitespace/braces] [4]'])
+        self.assert_multi_line_lint(
+            'if (condition)\n'
+            '    doSomething();\n'
+            'else {\n'
+            '    doSomethingElse();\n'
+            '}\n',
+            'One line control clauses should not use braces.  [whitespace/braces] [4]')
+        self.assert_multi_line_lint(
+            'if (condition) {\n'
+            '    doSomething1();\n'
+            '    doSomething2();\n'
+            '} else {\n'
+            '    doSomethingElse();\n'
+            '}\n',
+            'One line control clauses should not use braces.  [whitespace/braces] [4]')
+        self.assert_multi_line_lint(
+            'void func()\n'
+            '{\n'
+            '    while (condition) { }\n'
+            '    return 0;\n'
+            '}\n',
+            '')
+        self.assert_multi_line_lint(
+            'void func()\n'
+            '{\n'
+            '    for (i = 0; i < 42; i++) { foobar(); }\n'
+            '    return 0;\n'
+            '}\n',
+            'More than one command on the same line in for  [whitespace/parens] [4]')
+
+        # 3. An else if statement should be written as an if statement
+        #    when the prior if concludes with a return statement.
+        self.assert_multi_line_lint(
+            'if (motivated) {\n'
+            '    if (liquid)\n'
+            '        return money;\n'
+            '} else if (tired)\n'
+            '    break;\n',
+            '')
+        self.assert_multi_line_lint(
+            'if (condition)\n'
+            '    doSomething();\n'
+            'else if (otherCondition)\n'
+            '    doSomethingElse();\n',
+            '')
+        self.assert_multi_line_lint(
+            'if (condition)\n'
+            '    doSomething();\n'
+            'else\n'
+            '    doSomethingElse();\n',
+            '')
+        self.assert_multi_line_lint(
+            'if (condition)\n'
+            '    returnValue = foo;\n'
+            'else if (otherCondition)\n'
+            '    returnValue = bar;\n',
+            '')
+        self.assert_multi_line_lint(
+            'if (condition)\n'
+            '    returnValue = foo;\n'
+            'else\n'
+            '    returnValue = bar;\n',
+            '')
+        self.assert_multi_line_lint(
+            'if (condition)\n'
+            '    doSomething();\n'
+            'else if (liquid)\n'
+            '    return money;\n'
+            'else if (broke)\n'
+            '    return favor;\n'
+            'else\n'
+            '    sleep(28800);\n',
+            '')
+        self.assert_multi_line_lint(
+            'if (liquid) {\n'
+            '    prepare();\n'
+            '    return money;\n'
+            '} else if (greedy) {\n'
+            '    keep();\n'
+            '    return nothing;\n'
+            '}\n',
+            'An else if statement should be written as an if statement when the '
+            'prior "if" concludes with a return, break, continue or goto statement.'
+            '  [readability/control_flow] [4]')
+        self.assert_multi_line_lint(
+            '    if (stupid) {\n'
+            'infiniteLoop:\n'
+            '        goto infiniteLoop;\n'
+            '    } else if (evil)\n'
+            '        goto hell;\n',
+            'An else if statement should be written as an if statement when the '
+            'prior "if" concludes with a return, break, continue or goto statement.'
+            '  [readability/control_flow] [4]')
+        self.assert_multi_line_lint(
+            'if (liquid)\n'
+            '{\n'
+            '    prepare();\n'
+            '    return money;\n'
+            '}\n'
+            'else if (greedy)\n'
+            '    keep();\n',
+            ['This { should be at the end of the previous line  [whitespace/braces] [4]',
+            'An else should appear on the same line as the preceding }  [whitespace/newline] [4]',
+            'An else if statement should be written as an if statement when the '
+            'prior "if" concludes with a return, break, continue or goto statement.'
+            '  [readability/control_flow] [4]'])
+        self.assert_multi_line_lint(
+            'if (gone)\n'
+            '    return;\n'
+            'else if (here)\n'
+            '    go();\n',
+            'An else if statement should be written as an if statement when the '
+            'prior "if" concludes with a return, break, continue or goto statement.'
+            '  [readability/control_flow] [4]')
+        self.assert_multi_line_lint(
+            'if (gone)\n'
+            '    return;\n'
+            'else\n'
+            '    go();\n',
+            'An else statement can be removed when the prior "if" concludes '
+            'with a return, break, continue or goto statement.'
+            '  [readability/control_flow] [4]')
+        self.assert_multi_line_lint(
+            'if (motivated) {\n'
+            '    prepare();\n'
+            '    continue;\n'
+            '} else {\n'
+            '    cleanUp();\n'
+            '    break;\n'
+            '}\n',
+            'An else statement can be removed when the prior "if" concludes '
+            'with a return, break, continue or goto statement.'
+            '  [readability/control_flow] [4]')
+        self.assert_multi_line_lint(
+            'if (tired)\n'
+            '    break;\n'
+            'else {\n'
+            '    prepare();\n'
+            '    continue;\n'
+            '}\n',
+            'An else statement can be removed when the prior "if" concludes '
+            'with a return, break, continue or goto statement.'
+            '  [readability/control_flow] [4]')
+
+    def test_braces(self):
+        # 1. Function definitions: place each brace on its own line.
+        self.assert_multi_line_lint(
+            'int main()\n'
+            '{\n'
+            '    doSomething();\n'
+            '}\n',
+            '')
+        self.assert_multi_line_lint(
+            'int main() {\n'
+            '    doSomething();\n'
+            '}\n',
+            'Place brace on its own line for function definitions.  [whitespace/braces] [4]')
+
+        # 2. Other braces: place the open brace on the line preceding the
+        #    code block; place the close brace on its own line.
+        self.assert_multi_line_lint(
+            'class MyClass {\n'
+            '    int foo;\n'
+            '};\n',
+            '')
+        self.assert_multi_line_lint(
+            'namespace WebCore {\n'
+            'int foo;\n'
+            '};\n',
+            '')
+        self.assert_multi_line_lint(
+            'for (int i = 0; i < 10; i++) {\n'
+            '    DoSomething();\n'
+            '};\n',
+            '')
+        self.assert_multi_line_lint(
+            'class MyClass\n'
+            '{\n'
+            '    int foo;\n'
+            '};\n',
+            'This { should be at the end of the previous line  [whitespace/braces] [4]')
+        self.assert_multi_line_lint(
+            'if (condition)\n'
+            '{\n'
+            '    int foo;\n'
+            '}\n',
+            'This { should be at the end of the previous line  [whitespace/braces] [4]')
+        self.assert_multi_line_lint(
+            'for (int i = 0; i < 10; i++)\n'
+            '{\n'
+            '    int foo;\n'
+            '}\n',
+            'This { should be at the end of the previous line  [whitespace/braces] [4]')
+        self.assert_multi_line_lint(
+            'while (true)\n'
+            '{\n'
+            '    int foo;\n'
+            '}\n',
+            'This { should be at the end of the previous line  [whitespace/braces] [4]')
+        self.assert_multi_line_lint(
+            'foreach (Foo* foo, foos)\n'
+            '{\n'
+            '    int bar;\n'
+            '}\n',
+            'This { should be at the end of the previous line  [whitespace/braces] [4]')
+        self.assert_multi_line_lint(
+            'switch (type)\n'
+            '{\n'
+            'case foo: return;\n'
+            '}\n',
+            'This { should be at the end of the previous line  [whitespace/braces] [4]')
+        self.assert_multi_line_lint(
+            'if (condition)\n'
+            '{\n'
+            '    int foo;\n'
+            '}\n',
+            'This { should be at the end of the previous line  [whitespace/braces] [4]')
+        self.assert_multi_line_lint(
+            'for (int i = 0; i < 10; i++)\n'
+            '{\n'
+            '    int foo;\n'
+            '}\n',
+            'This { should be at the end of the previous line  [whitespace/braces] [4]')
+        self.assert_multi_line_lint(
+            'while (true)\n'
+            '{\n'
+            '    int foo;\n'
+            '}\n',
+            'This { should be at the end of the previous line  [whitespace/braces] [4]')
+        self.assert_multi_line_lint(
+            'switch (type)\n'
+            '{\n'
+            'case foo: return;\n'
+            '}\n',
+            'This { should be at the end of the previous line  [whitespace/braces] [4]')
+        self.assert_multi_line_lint(
+            'else if (type)\n'
+            '{\n'
+            'case foo: return;\n'
+            '}\n',
+            'This { should be at the end of the previous line  [whitespace/braces] [4]')
+
+        # 3. One-line control clauses should not use braces unless
+        #    comments are included or a single statement spans multiple
+        #    lines.
+        self.assert_multi_line_lint(
+            'if (true) {\n'
+            '    int foo;\n'
+            '}\n',
+            'One line control clauses should not use braces.  [whitespace/braces] [4]')
+
+        self.assert_multi_line_lint(
+            'for (; foo; bar) {\n'
+            '    int foo;\n'
+            '}\n',
+            'One line control clauses should not use braces.  [whitespace/braces] [4]')
+
+        self.assert_multi_line_lint(
+            'foreach (foo, foos) {\n'
+            '    int bar;\n'
+            '}\n',
+            'One line control clauses should not use braces.  [whitespace/braces] [4]')
+
+        self.assert_multi_line_lint(
+            'while (true) {\n'
+            '    int foo;\n'
+            '}\n',
+            'One line control clauses should not use braces.  [whitespace/braces] [4]')
+
+        self.assert_multi_line_lint(
+            'if (true)\n'
+            '    int foo;\n'
+            'else {\n'
+            '    int foo;\n'
+            '}\n',
+            'One line control clauses should not use braces.  [whitespace/braces] [4]')
+
+        self.assert_multi_line_lint(
+            'if (true) {\n'
+            '    int foo;\n'
+            '} else\n'
+            '    int foo;\n',
+            'One line control clauses should not use braces.  [whitespace/braces] [4]')
+
+        self.assert_multi_line_lint(
+            'if (true) {\n'
+            '    // Some comment\n'
+            '    int foo;\n'
+            '}\n',
+            '')
+
+        self.assert_multi_line_lint(
+            'if (true) {\n'
+            '    myFunction(reallyLongParam1, reallyLongParam2,\n'
+            '               reallyLongParam3);\n'
+            '}\n',
+            'Weird number of spaces at line-start.  Are you using a 4-space indent?  [whitespace/indent] [3]')
+
+        self.assert_multi_line_lint(
+            'if (true) {\n'
+            '    myFunction(reallyLongParam1, reallyLongParam2,\n'
+            '            reallyLongParam3);\n'
+            '}\n',
+            'When wrapping a line, only indent 4 spaces.  [whitespace/indent] [3]')
+
+        # 4. Control clauses without a body should use empty braces.
+        self.assert_multi_line_lint(
+            'for ( ; current; current = current->next) { }\n',
+            '')
+        self.assert_multi_line_lint(
+            'for ( ; current;\n'
+            '     current = current->next) { }\n',
+            'Weird number of spaces at line-start.  Are you using a 4-space indent?  [whitespace/indent] [3]')
+        self.assert_multi_line_lint(
+            'for ( ; current; current = current->next);\n',
+            'Semicolon defining empty statement for this loop. Use { } instead.  [whitespace/semicolon] [5]')
+        self.assert_multi_line_lint(
+            'while (true);\n',
+            'Semicolon defining empty statement for this loop. Use { } instead.  [whitespace/semicolon] [5]')
+        self.assert_multi_line_lint(
+            '} while (true);\n',
+            '')
+
+    def test_null_false_zero(self):
+        # 1. In C++, the null pointer value should be written as 0. In C,
+        #    it should be written as NULL. In Objective-C and Objective-C++,
+        #    follow the guideline for C or C++, respectively, but use nil to
+        #    represent a null Objective-C object.
+        self.assert_lint(
+            'functionCall(NULL)',
+            'Use 0 instead of NULL.'
+            '  [readability/null] [5]',
+            'foo.cpp')
+        self.assert_lint(
+            "// Don't use NULL in comments since it isn't in code.",
+            'Use 0 or null instead of NULL (even in *comments*).'
+            '  [readability/null] [4]',
+            'foo.cpp')
+        self.assert_lint(
+            '"A string with NULL" // and a comment with NULL is tricky to flag correctly in cpp_style.',
+            'Use 0 or null instead of NULL (even in *comments*).'
+            '  [readability/null] [4]',
+            'foo.cpp')
+        self.assert_lint(
+            '"A string containing NULL is ok"',
+            '',
+            'foo.cpp')
+        self.assert_lint(
+            'if (aboutNULL)',
+            '',
+            'foo.cpp')
+        self.assert_lint(
+            'myVariable = NULLify',
+            '',
+            'foo.cpp')
+        # Make sure that the NULL check does not apply to C and Objective-C files.
+        self.assert_lint(
+            'functionCall(NULL)',
+            '',
+            'foo.c')
+        self.assert_lint(
+            'functionCall(NULL)',
+            '',
+            'foo.m')
+
+        # Make sure that the NULL check does not apply to g_object_{set,get} and
+        # g_str{join,concat}
+        self.assert_lint(
+            'g_object_get(foo, "prop", &bar, NULL);',
+            '')
+        self.assert_lint(
+            'g_object_set(foo, "prop", bar, NULL);',
+            '')
+        self.assert_lint(
+            'g_build_filename(foo, bar, NULL);',
+            '')
+        self.assert_lint(
+            'gst_bin_add_many(foo, bar, boo, NULL);',
+            '')
+        self.assert_lint(
+            'gst_bin_remove_many(foo, bar, boo, NULL);',
+            '')
+        self.assert_lint(
+            'gst_element_link_many(foo, bar, boo, NULL);',
+            '')
+        self.assert_lint(
+            'gst_element_unlink_many(foo, bar, boo, NULL);',
+            '')
+        self.assert_lint(
+            'gst_structure_get(foo, "value", G_TYPE_INT, &value, NULL);',
+            '')
+        self.assert_lint(
+            'gst_structure_set(foo, "value", G_TYPE_INT, value, NULL);',
+            '')
+        self.assert_lint(
+            'gst_structure_remove_fields(foo, "value", "bar", NULL);',
+            '')
+        self.assert_lint(
+            'gst_structure_new("foo", "value", G_TYPE_INT, value, NULL);',
+            '')
+        self.assert_lint(
+            'gst_structure_id_new(FOO, VALUE, G_TYPE_INT, value, NULL);',
+            '')
+        self.assert_lint(
+            'gst_structure_id_set(FOO, VALUE, G_TYPE_INT, value, NULL);',
+            '')
+        self.assert_lint(
+            'gst_structure_id_get(FOO, VALUE, G_TYPE_INT, &value, NULL);',
+            '')
+        self.assert_lint(
+            'gst_caps_new_simple(mime, "value", G_TYPE_INT, &value, NULL);',
+            '')
+        self.assert_lint(
+            'gst_caps_new_full(structure1, structure2, NULL);',
+            '')
+        self.assert_lint(
+            'gchar* result = g_strconcat("part1", "part2", "part3", NULL);',
+            '')
+        self.assert_lint(
+            'gchar* result = g_strconcat("part1", NULL);',
+            '')
+        self.assert_lint(
+            'gchar* result = g_strjoin(",", "part1", "part2", "part3", NULL);',
+            '')
+        self.assert_lint(
+            'gchar* result = g_strjoin(",", "part1", NULL);',
+            '')
+        self.assert_lint(
+            'gchar* result = gdk_pixbuf_save_to_callback(pixbuf, function, data, type, error, NULL);',
+            '')
+        self.assert_lint(
+            'gchar* result = gdk_pixbuf_save_to_buffer(pixbuf, function, data, type, error, NULL);',
+            '')
+        self.assert_lint(
+            'gchar* result = gdk_pixbuf_save_to_stream(pixbuf, function, data, type, error, NULL);',
+            '')
+        self.assert_lint(
+            'gtk_widget_style_get(style, "propertyName", &value, "otherName", &otherValue, NULL);',
+            '')
+        self.assert_lint(
+            'gtk_style_context_get_style(context, "propertyName", &value, "otherName", &otherValue, NULL);',
+            '')
+        self.assert_lint(
+            'gtk_widget_style_get_property(style, NULL, NULL);',
+            'Use 0 instead of NULL.  [readability/null] [5]',
+            'foo.cpp')
+        self.assert_lint(
+            'gtk_widget_style_get_valist(style, NULL, NULL);',
+            'Use 0 instead of NULL.  [readability/null] [5]',
+            'foo.cpp')
+
+        # 2. C++ and C bool values should be written as true and
+        #    false. Objective-C BOOL values should be written as YES and NO.
+        # FIXME: Implement this.
+
+        # 3. Tests for true/false, null/non-null, and zero/non-zero should
+        #    all be done without equality comparisons.
+        self.assert_lint(
+            'if (count == 0)',
+            'Tests for true/false, null/non-null, and zero/non-zero should all be done without equality comparisons.'
+            '  [readability/comparison_to_zero] [5]')
+        self.assert_lint_one_of_many_errors_re(
+            'if (string != NULL)',
+            r'Tests for true/false, null/non-null, and zero/non-zero should all be done without equality comparisons\.')
+        self.assert_lint(
+            'if (condition == true)',
+            'Tests for true/false, null/non-null, and zero/non-zero should all be done without equality comparisons.'
+            '  [readability/comparison_to_zero] [5]')
+        self.assert_lint(
+            'if (myVariable != /* Why would anyone put a comment here? */ false)',
+            'Tests for true/false, null/non-null, and zero/non-zero should all be done without equality comparisons.'
+            '  [readability/comparison_to_zero] [5]')
+
+        self.assert_lint(
+            'if (0 /* This comment also looks odd to me. */ != aLongerVariableName)',
+            'Tests for true/false, null/non-null, and zero/non-zero should all be done without equality comparisons.'
+            '  [readability/comparison_to_zero] [5]')
+        self.assert_lint_one_of_many_errors_re(
+            'if (NULL == thisMayBeNull)',
+            r'Tests for true/false, null/non-null, and zero/non-zero should all be done without equality comparisons\.')
+        self.assert_lint(
+            'if (true != anotherCondition)',
+            'Tests for true/false, null/non-null, and zero/non-zero should all be done without equality comparisons.'
+            '  [readability/comparison_to_zero] [5]')
+        self.assert_lint(
+            'if (false == myBoolValue)',
+            'Tests for true/false, null/non-null, and zero/non-zero should all be done without equality comparisons.'
+            '  [readability/comparison_to_zero] [5]')
+
+        self.assert_lint(
+            'if (fontType == trueType)',
+            '')
+        self.assert_lint(
+            'if (othertrue == fontType)',
+            '')
+        self.assert_lint(
+            'if (LIKELY(foo == 0))',
+            '')
+        self.assert_lint(
+            'if (UNLIKELY(foo == 0))',
+            '')
+        self.assert_lint(
+            'if ((a - b) == 0.5)',
+            '')
+        self.assert_lint(
+            'if (0.5 == (a - b))',
+            '')
+        self.assert_lint(
+            'if (LIKELY(foo == NULL))',
+            'Use 0 instead of NULL.  [readability/null] [5]')
+        self.assert_lint(
+            'if (UNLIKELY(foo == NULL))',
+            'Use 0 instead of NULL.  [readability/null] [5]')
+
+    def test_directive_indentation(self):
+        self.assert_lint(
+            "    #if FOO",
+            "preprocessor directives (e.g., #ifdef, #define, #import) should never be indented."
+            "  [whitespace/indent] [4]",
+            "foo.cpp")
+
+    def test_using_std(self):
+        self.assert_lint(
+            'using std::min;',
+            "Use 'using namespace std;' instead of 'using std::min;'."
+            "  [build/using_std] [4]",
+            'foo.cpp')
+
+    def test_max_macro(self):
+        self.assert_lint(
+            'int i = MAX(0, 1);',
+            '',
+            'foo.c')
+
+        self.assert_lint(
+            'int i = MAX(0, 1);',
+            'Use std::max() or std::max<type>() instead of the MAX() macro.'
+            '  [runtime/max_min_macros] [4]',
+            'foo.cpp')
+
+        self.assert_lint(
+            'inline int foo() { return MAX(0, 1); }',
+            'Use std::max() or std::max<type>() instead of the MAX() macro.'
+            '  [runtime/max_min_macros] [4]',
+            'foo.h')
+
+    def test_min_macro(self):
+        self.assert_lint(
+            'int i = MIN(0, 1);',
+            '',
+            'foo.c')
+
+        self.assert_lint(
+            'int i = MIN(0, 1);',
+            'Use std::min() or std::min<type>() instead of the MIN() macro.'
+            '  [runtime/max_min_macros] [4]',
+            'foo.cpp')
+
+        self.assert_lint(
+            'inline int foo() { return MIN(0, 1); }',
+            'Use std::min() or std::min<type>() instead of the MIN() macro.'
+            '  [runtime/max_min_macros] [4]',
+            'foo.h')
+
+    def test_ctype_fucntion(self):
+        self.assert_lint(
+            'int i = isascii(8);',
+            'Use equivelent function in <wtf/ASCIICType.h> instead of the '
+            'isascii() function.  [runtime/ctype_function] [4]',
+            'foo.cpp')
+
+    def test_names(self):
+        name_underscore_error_message = " is incorrectly named. Don't use underscores in your identifier names.  [readability/naming/underscores] [4]"
+        name_tooshort_error_message = " is incorrectly named. Don't use the single letter 'l' as an identifier name.  [readability/naming] [4]"
+
+        # Basic cases from WebKit style guide.
+        self.assert_lint('struct Data;', '')
+        self.assert_lint('size_t bufferSize;', '')
+        self.assert_lint('class HTMLDocument;', '')
+        self.assert_lint('String mimeType();', '')
+        self.assert_lint('size_t buffer_size;',
+                         'buffer_size' + name_underscore_error_message)
+        self.assert_lint('short m_length;', '')
+        self.assert_lint('short _length;',
+                         '_length' + name_underscore_error_message)
+        self.assert_lint('short length_;',
+                         'length_' + name_underscore_error_message)
+        self.assert_lint('unsigned _length;',
+                         '_length' + name_underscore_error_message)
+        self.assert_lint('unsigned long _length;',
+                         '_length' + name_underscore_error_message)
+        self.assert_lint('unsigned long long _length;',
+                         '_length' + name_underscore_error_message)
+
+        # Allow underscores in Objective C files.
+        self.assert_lint('unsigned long long _length;',
+                         '',
+                         'foo.m')
+        self.assert_lint('unsigned long long _length;',
+                         '',
+                         'foo.mm')
+        self.assert_lint('#import "header_file.h"\n'
+                         'unsigned long long _length;',
+                         '',
+                         'foo.h')
+        self.assert_lint('unsigned long long _length;\n'
+                         '@interface WebFullscreenWindow;',
+                         '',
+                         'foo.h')
+        self.assert_lint('unsigned long long _length;\n'
+                         '@implementation WebFullscreenWindow;',
+                         '',
+                         'foo.h')
+        self.assert_lint('unsigned long long _length;\n'
+                         '@class WebWindowFadeAnimation;',
+                         '',
+                         'foo.h')
+
+        # Variable name 'l' is easy to confuse with '1'
+        self.assert_lint('int l;', 'l' + name_tooshort_error_message)
+        self.assert_lint('size_t l;', 'l' + name_tooshort_error_message)
+        self.assert_lint('long long l;', 'l' + name_tooshort_error_message)
+
+        # Pointers, references, functions, templates, and adjectives.
+        self.assert_lint('char* under_score;',
+                         'under_score' + name_underscore_error_message)
+        self.assert_lint('const int UNDER_SCORE;',
+                         'UNDER_SCORE' + name_underscore_error_message)
+        self.assert_lint('static inline const char const& const under_score;',
+                         'under_score' + name_underscore_error_message)
+        self.assert_lint('WebCore::RenderObject* under_score;',
+                         'under_score' + name_underscore_error_message)
+        self.assert_lint('int func_name();',
+                         'func_name' + name_underscore_error_message)
+        self.assert_lint('RefPtr<RenderObject*> under_score;',
+                         'under_score' + name_underscore_error_message)
+        self.assert_lint('WTF::Vector<WTF::RefPtr<const RenderObject* const> > under_score;',
+                         'under_score' + name_underscore_error_message)
+        self.assert_lint('int under_score[];',
+                         'under_score' + name_underscore_error_message)
+        self.assert_lint('struct dirent* under_score;',
+                         'under_score' + name_underscore_error_message)
+        self.assert_lint('long under_score;',
+                         'under_score' + name_underscore_error_message)
+        self.assert_lint('long long under_score;',
+                         'under_score' + name_underscore_error_message)
+        self.assert_lint('long double under_score;',
+                         'under_score' + name_underscore_error_message)
+        self.assert_lint('long long int under_score;',
+                         'under_score' + name_underscore_error_message)
+
+        # Declarations in control statement.
+        self.assert_lint('if (int under_score = 42) {',
+                         'under_score' + name_underscore_error_message)
+        self.assert_lint('else if (int under_score = 42) {',
+                         'under_score' + name_underscore_error_message)
+        self.assert_lint('for (int under_score = 42; cond; i++) {',
+                         'under_score' + name_underscore_error_message)
+        self.assert_lint('while (foo & under_score = bar) {',
+                         'under_score' + name_underscore_error_message)
+        self.assert_lint('for (foo * under_score = p; cond; i++) {',
+                         'under_score' + name_underscore_error_message)
+        self.assert_lint('for (foo * under_score; cond; i++) {',
+                         'under_score' + name_underscore_error_message)
+        self.assert_lint('while (foo & value_in_thirdparty_library) {', '')
+        self.assert_lint('while (foo * value_in_thirdparty_library) {', '')
+        self.assert_lint('if (mli && S_OK == mli->foo()) {', '')
+
+        # More member variables and functions.
+        self.assert_lint('int SomeClass::s_validName', '')
+        self.assert_lint('int m_under_score;',
+                         'm_under_score' + name_underscore_error_message)
+        self.assert_lint('int SomeClass::s_under_score = 0;',
+                         'SomeClass::s_under_score' + name_underscore_error_message)
+        self.assert_lint('int SomeClass::under_score = 0;',
+                         'SomeClass::under_score' + name_underscore_error_message)
+
+        # Other statements.
+        self.assert_lint('return INT_MAX;', '')
+        self.assert_lint('return_t under_score;',
+                         'under_score' + name_underscore_error_message)
+        self.assert_lint('goto under_score;',
+                         'under_score' + name_underscore_error_message)
+        self.assert_lint('delete static_cast<Foo*>(p);', '')
+
+        # Multiple variables in one line.
+        self.assert_lint('void myFunction(int variable1, int another_variable);',
+                         'another_variable' + name_underscore_error_message)
+        self.assert_lint('int variable1, another_variable;',
+                         'another_variable' + name_underscore_error_message)
+        self.assert_lint('int first_variable, secondVariable;',
+                         'first_variable' + name_underscore_error_message)
+        self.assert_lint('void my_function(int variable_1, int variable_2);',
+                         ['my_function' + name_underscore_error_message,
+                          'variable_1' + name_underscore_error_message,
+                          'variable_2' + name_underscore_error_message])
+        self.assert_lint('for (int variable_1, variable_2;;) {',
+                         ['variable_1' + name_underscore_error_message,
+                          'variable_2' + name_underscore_error_message])
+
+        # There is an exception for op code functions but only in the JavaScriptCore directory.
+        self.assert_lint('void this_op_code(int var1, int var2)', '', 'Source/JavaScriptCore/foo.cpp')
+        self.assert_lint('void op_code(int var1, int var2)', '', 'Source/JavaScriptCore/foo.cpp')
+        self.assert_lint('void this_op_code(int var1, int var2)', 'this_op_code' + name_underscore_error_message)
+
+        # GObject requires certain magical names in class declarations.
+        self.assert_lint('void webkit_dom_object_init();', '')
+        self.assert_lint('void webkit_dom_object_class_init();', '')
+
+        # There is an exception for GTK+ API.
+        self.assert_lint('void webkit_web_view_load(int var1, int var2)', '', 'Source/Webkit/gtk/webkit/foo.cpp')
+        self.assert_lint('void webkit_web_view_load(int var1, int var2)', '', 'Source/Webkit2/UIProcess/gtk/foo.cpp')
+
+        # Test that this doesn't also apply to files not in a 'gtk' directory.
+        self.assert_lint('void webkit_web_view_load(int var1, int var2)',
+            'webkit_web_view_load is incorrectly named. Don\'t use underscores in your identifier names.'
+            '  [readability/naming/underscores] [4]', 'Source/Webkit/webkit/foo.cpp')
+        # Test that this doesn't also apply to names that don't start with 'webkit_'.
+        self.assert_lint_one_of_many_errors_re('void otherkit_web_view_load(int var1, int var2)',
+            'otherkit_web_view_load is incorrectly named. Don\'t use underscores in your identifier names.'
+            '  [readability/naming/underscores] [4]', 'Source/Webkit/webkit/foo.cpp')
+
+        # There is an exception for some unit tests that begin with "tst_".
+        self.assert_lint('void tst_QWebFrame::arrayObjectEnumerable(int var1, int var2)', '')
+
+        # The Qt API uses names that begin with "qt_" or "_q_".
+        self.assert_lint('void QTFrame::qt_drt_is_awesome(int var1, int var2)', '')
+        self.assert_lint('void QTFrame::_q_drt_is_awesome(int var1, int var2)', '')
+        self.assert_lint('void qt_drt_is_awesome(int var1, int var2);', '')
+        self.assert_lint('void _q_drt_is_awesome(int var1, int var2);', '')
+
+        # Cairo forward-declarations should not be a failure.
+        self.assert_lint('typedef struct _cairo cairo_t;', '')
+        self.assert_lint('typedef struct _cairo_surface cairo_surface_t;', '')
+        self.assert_lint('typedef struct _cairo_scaled_font cairo_scaled_font_t;', '')
+
+        # EFL forward-declarations should not be a failure.
+        self.assert_lint('typedef struct _Ecore_Evas Ecore_Evas;', '')
+        self.assert_lint('typedef struct _Ecore_Pipe Ecore_Pipe;', '')
+        self.assert_lint('typedef struct _Eina_Rectangle Eina_Rectangle;', '')
+        self.assert_lint('typedef struct _Evas_Object Evas_Object;', '')
+        self.assert_lint('typedef struct _Ewk_History_Item Ewk_History_Item;', '')
+
+        # NPAPI functions that start with NPN_, NPP_ or NP_ are allowed.
+        self.assert_lint('void NPN_Status(NPP, const char*)', '')
+        self.assert_lint('NPError NPP_SetWindow(NPP instance, NPWindow *window)', '')
+        self.assert_lint('NPObject* NP_Allocate(NPP, NPClass*)', '')
+
+        # const_iterator is allowed as well.
+        self.assert_lint('typedef VectorType::const_iterator const_iterator;', '')
+
+        # vm_throw is allowed as well.
+        self.assert_lint('int vm_throw;', '')
+
+        # Bitfields.
+        self.assert_lint('unsigned _fillRule : 1;',
+                         '_fillRule' + name_underscore_error_message)
+
+        # new operators in initialization.
+        self.assert_lint('OwnPtr<uint32_t> variable(new uint32_t);', '')
+        self.assert_lint('OwnPtr<uint32_t> variable(new (expr) uint32_t);', '')
+        self.assert_lint('OwnPtr<uint32_t> under_score(new uint32_t);',
+                         'under_score' + name_underscore_error_message)
+
+    def test_parameter_names(self):
+        # Leave meaningless variable names out of function declarations.
+        meaningless_variable_name_error_message = 'The parameter name "%s" adds no information, so it should be removed.  [readability/parameter_name] [5]'
+
+        parameter_error_rules = ('-',
+                                 '+readability/parameter_name')
+        # No variable name, so no error.
+        self.assertEquals('',
+                          self.perform_lint('void func(int);', 'test.cpp', parameter_error_rules))
+
+        # Verify that copying the name of the set function causes the error (with some odd casing).
+        self.assertEquals(meaningless_variable_name_error_message % 'itemCount',
+                          self.perform_lint('void setItemCount(size_t itemCount);', 'test.cpp', parameter_error_rules))
+        self.assertEquals(meaningless_variable_name_error_message % 'abcCount',
+                          self.perform_lint('void setABCCount(size_t abcCount);', 'test.cpp', parameter_error_rules))
+
+        # Verify that copying a type name will trigger the warning (even if the type is a template parameter).
+        self.assertEquals(meaningless_variable_name_error_message % 'context',
+                          self.perform_lint('void funct(PassRefPtr<ScriptExecutionContext> context);', 'test.cpp', parameter_error_rules))
+
+        # Verify that acronyms as variable names trigger the error (for both set functions and type names).
+        self.assertEquals(meaningless_variable_name_error_message % 'ec',
+                          self.perform_lint('void setExceptionCode(int ec);', 'test.cpp', parameter_error_rules))
+        self.assertEquals(meaningless_variable_name_error_message % 'ec',
+                          self.perform_lint('void funct(ExceptionCode ec);', 'test.cpp', parameter_error_rules))
+
+        # 'object' alone, appended, or as part of an acronym is meaningless.
+        self.assertEquals(meaningless_variable_name_error_message % 'object',
+                          self.perform_lint('void funct(RenderView object);', 'test.cpp', parameter_error_rules))
+        self.assertEquals(meaningless_variable_name_error_message % 'viewObject',
+                          self.perform_lint('void funct(RenderView viewObject);', 'test.cpp', parameter_error_rules))
+        self.assertEquals(meaningless_variable_name_error_message % 'rvo',
+                          self.perform_lint('void funct(RenderView rvo);', 'test.cpp', parameter_error_rules))
+
+        # Check that r, g, b, and a are allowed.
+        self.assertEquals('',
+                          self.perform_lint('void setRGBAValues(int r, int g, int b, int a);', 'test.cpp', parameter_error_rules))
+
+        # Verify that a simple substring match isn't done which would cause false positives.
+        self.assertEquals('',
+                          self.perform_lint('void setNateLateCount(size_t elate);', 'test.cpp', parameter_error_rules))
+        self.assertEquals('',
+                          self.perform_lint('void funct(NateLate elate);', 'test.cpp', parameter_error_rules))
+
+        # Don't have generate warnings for functions (only declarations).
+        self.assertEquals('',
+                          self.perform_lint('void funct(PassRefPtr<ScriptExecutionContext> context)\n'
+                                            '{\n'
+                                            '}\n', 'test.cpp', parameter_error_rules))
+
+    def test_comments(self):
+        # A comment at the beginning of a line is ok.
+        self.assert_lint('// comment', '')
+        self.assert_lint('    // comment', '')
+
+        self.assert_lint('}  // namespace WebCore',
+                         'One space before end of line comments'
+                         '  [whitespace/comments] [5]')
+
+    def test_webkit_export_check(self):
+        webkit_export_error_rules = ('-',
+                                  '+readability/webkit_export')
+        self.assertEquals('',
+                          self.perform_lint('WEBKIT_EXPORT int foo();\n',
+                                            'WebKit/chromium/public/test.h',
+                                            webkit_export_error_rules))
+        self.assertEquals('',
+                          self.perform_lint('WEBKIT_EXPORT int foo();\n',
+                                            'WebKit/chromium/tests/test.h',
+                                            webkit_export_error_rules))
+        self.assertEquals('WEBKIT_EXPORT should only be used in header files.  [readability/webkit_export] [5]',
+                          self.perform_lint('WEBKIT_EXPORT int foo();\n',
+                                            'WebKit/chromium/public/test.cpp',
+                                            webkit_export_error_rules))
+        self.assertEquals('WEBKIT_EXPORT should only appear in the chromium public (or tests) directory.  [readability/webkit_export] [5]',
+                          self.perform_lint('WEBKIT_EXPORT int foo();\n',
+                                            'WebKit/chromium/src/test.h',
+                                            webkit_export_error_rules))
+        self.assertEquals('WEBKIT_EXPORT should not be used on a function with a body.  [readability/webkit_export] [5]',
+                          self.perform_lint('WEBKIT_EXPORT int foo() { }\n',
+                                            'WebKit/chromium/public/test.h',
+                                            webkit_export_error_rules))
+        self.assertEquals('WEBKIT_EXPORT should not be used on a function with a body.  [readability/webkit_export] [5]',
+                          self.perform_lint('WEBKIT_EXPORT inline int foo()\n'
+                                            '{\n'
+                                            '}\n',
+                                            'WebKit/chromium/public/test.h',
+                                            webkit_export_error_rules))
+        self.assertEquals('WEBKIT_EXPORT should not be used with a pure virtual function.  [readability/webkit_export] [5]',
+                          self.perform_lint('{}\n'
+                                            'WEBKIT_EXPORT\n'
+                                            'virtual\n'
+                                            'int\n'
+                                            'foo() = 0;\n',
+                                            'WebKit/chromium/public/test.h',
+                                            webkit_export_error_rules))
+        self.assertEquals('',
+                          self.perform_lint('{}\n'
+                                            'WEBKIT_EXPORT\n'
+                                            'virtual\n'
+                                            'int\n'
+                                            'foo() = 0;\n',
+                                            'test.h',
+                                            webkit_export_error_rules))
+
+    def test_other(self):
+        # FIXME: Implement this.
+        pass
+
+
+class CppCheckerTest(unittest.TestCase):
+
+    """Tests CppChecker class."""
+
+    def mock_handle_style_error(self):
+        pass
+
+    def _checker(self):
+        return CppChecker("foo", "h", self.mock_handle_style_error, 3)
+
+    def test_init(self):
+        """Test __init__ constructor."""
+        checker = self._checker()
+        self.assertEquals(checker.file_extension, "h")
+        self.assertEquals(checker.file_path, "foo")
+        self.assertEquals(checker.handle_style_error, self.mock_handle_style_error)
+        self.assertEquals(checker.min_confidence, 3)
+
+    def test_eq(self):
+        """Test __eq__ equality function."""
+        checker1 = self._checker()
+        checker2 = self._checker()
+
+        # == calls __eq__.
+        self.assertTrue(checker1 == checker2)
+
+        def mock_handle_style_error2(self):
+            pass
+
+        # Verify that a difference in any argument cause equality to fail.
+        checker = CppChecker("foo", "h", self.mock_handle_style_error, 3)
+        self.assertFalse(checker == CppChecker("bar", "h", self.mock_handle_style_error, 3))
+        self.assertFalse(checker == CppChecker("foo", "c", self.mock_handle_style_error, 3))
+        self.assertFalse(checker == CppChecker("foo", "h", mock_handle_style_error2, 3))
+        self.assertFalse(checker == CppChecker("foo", "h", self.mock_handle_style_error, 4))
+
+    def test_ne(self):
+        """Test __ne__ inequality function."""
+        checker1 = self._checker()
+        checker2 = self._checker()
+
+        # != calls __ne__.
+        # By default, __ne__ always returns true on different objects.
+        # Thus, just check the distinguishing case to verify that the
+        # code defines __ne__.
+        self.assertFalse(checker1 != checker2)
+
+
+def tearDown():
+    """A global check to make sure all error-categories have been tested.
+
+    The main tearDown() routine is the only code we can guarantee will be
+    run after all other tests have been executed.
+    """
+    try:
+        if _run_verifyallcategoriesseen:
+            ErrorCollector(None).verify_all_categories_are_seen()
+    except NameError:
+        # If nobody set the global _run_verifyallcategoriesseen, then
+        # we assume we shouldn't run the test
+        pass
+
+if __name__ == '__main__':
+    import sys
+    # We don't want to run the verify_all_categories_are_seen() test unless
+    # we're running the full test suite: if we only run one test,
+    # obviously we're not going to see all the error categories.  So we
+    # only run verify_all_categories_are_seen() when no commandline flags
+    # are passed in.
+    global _run_verifyallcategoriesseen
+    _run_verifyallcategoriesseen = (len(sys.argv) == 1)
+
+    unittest.main()
diff --git a/Tools/Scripts/webkitpy/style/checkers/jsonchecker.py b/Tools/Scripts/webkitpy/style/checkers/jsonchecker.py
new file mode 100644
index 0000000..264cbee
--- /dev/null
+++ b/Tools/Scripts/webkitpy/style/checkers/jsonchecker.py
@@ -0,0 +1,49 @@
+# Copyright (C) 2011 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1.  Redistributions of source code must retain the above copyright
+#     notice, this list of conditions and the following disclaimer.
+# 2.  Redistributions in binary form must reproduce the above copyright
+#     notice, this list of conditions and the following disclaimer in the
+#     documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
+# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Checks WebKit style for JSON files."""
+
+import json
+import re
+
+
+class JSONChecker(object):
+    """Processes JSON lines for checking style."""
+
+    categories = set(('json/syntax',))
+
+    def __init__(self, file_path, handle_style_error):
+        self._handle_style_error = handle_style_error
+        self._handle_style_error.turn_off_line_filtering()
+
+    def check(self, lines):
+        try:
+            json.loads('\n'.join(lines) + '\n')
+        except ValueError, e:
+            self._handle_style_error(self.line_number_from_json_exception(e), 'json/syntax', 5, str(e))
+
+    @staticmethod
+    def line_number_from_json_exception(error):
+        match = re.search(r': line (?P<line>\d+) column \d+', str(error))
+        if not match:
+            return 0
+        return int(match.group('line'))
diff --git a/Tools/Scripts/webkitpy/style/checkers/jsonchecker_unittest.py b/Tools/Scripts/webkitpy/style/checkers/jsonchecker_unittest.py
new file mode 100755
index 0000000..973c673
--- /dev/null
+++ b/Tools/Scripts/webkitpy/style/checkers/jsonchecker_unittest.py
@@ -0,0 +1,116 @@
+#!/usr/bin/env python
+#
+# Copyright (C) 2010 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1.  Redistributions of source code must retain the above copyright
+#     notice, this list of conditions and the following disclaimer.
+# 2.  Redistributions in binary form must reproduce the above copyright
+#     notice, this list of conditions and the following disclaimer in the
+#     documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
+# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Unit test for jsonchecker.py."""
+
+import unittest
+
+import jsonchecker
+
+
+class MockErrorHandler(object):
+    def __init__(self, handle_style_error):
+        self.turned_off_filtering = False
+        self._handle_style_error = handle_style_error
+
+    def turn_off_line_filtering(self):
+        self.turned_off_filtering = True
+
+    def __call__(self, line_number, category, confidence, message):
+        self._handle_style_error(self, line_number, category, confidence, message)
+        return True
+
+
+class JSONCheckerTest(unittest.TestCase):
+    """Tests JSONChecker class."""
+
+    def test_line_number_from_json_exception(self):
+        tests = (
+            (0, 'No JSON object could be decoded'),
+            (2, 'Expecting property name: line 2 column 1 (char 2)'),
+            (3, 'Expecting object: line 3 column 1 (char 15)'),
+            (9, 'Expecting property name: line 9 column 21 (char 478)'),
+        )
+        for expected_line, message in tests:
+            self.assertEqual(expected_line, jsonchecker.JSONChecker.line_number_from_json_exception(ValueError(message)))
+
+    def assert_no_error(self, json_data):
+        def handle_style_error(mock_error_handler, line_number, category, confidence, message):
+            self.fail('Unexpected error: %d %s %d %s' % (line_number, category, confidence, message))
+
+        error_handler = MockErrorHandler(handle_style_error)
+        checker = jsonchecker.JSONChecker('foo.json', error_handler)
+        checker.check(json_data.split('\n'))
+        self.assertTrue(error_handler.turned_off_filtering)
+
+    def assert_error(self, expected_line_number, expected_category, json_data):
+        def handle_style_error(mock_error_handler, line_number, category, confidence, message):
+            mock_error_handler.had_error = True
+            self.assertEquals(expected_line_number, line_number)
+            self.assertEquals(expected_category, category)
+            self.assertTrue(category in jsonchecker.JSONChecker.categories)
+
+        error_handler = MockErrorHandler(handle_style_error)
+        error_handler.had_error = False
+
+        checker = jsonchecker.JSONChecker('foo.json', error_handler)
+        checker.check(json_data.split('\n'))
+        self.assertTrue(error_handler.had_error)
+        self.assertTrue(error_handler.turned_off_filtering)
+
+    def mock_handle_style_error(self):
+        pass
+
+    def test_conflict_marker(self):
+        self.assert_error(0, 'json/syntax', '<<<<<<< HEAD\n{\n}\n')
+
+    def test_single_quote(self):
+        self.assert_error(2, 'json/syntax', "{\n'slaves': []\n}\n")
+
+    def test_init(self):
+        error_handler = MockErrorHandler(self.mock_handle_style_error)
+        checker = jsonchecker.JSONChecker('foo.json', error_handler)
+        self.assertEquals(checker._handle_style_error, error_handler)
+
+    def test_no_error(self):
+        self.assert_no_error("""{
+    "slaves":     [ { "name": "test-slave", "platform": "*" },
+                    { "name": "apple-xserve-4", "platform": "mac-snowleopard" }
+                  ],
+
+    "builders":   [ { "name": "SnowLeopard Intel Release (Build)", "type": "Build", "builddir": "snowleopard-intel-release",
+                      "platform": "mac-snowleopard", "configuration": "release", "architectures": ["x86_64"],
+                      "slavenames": ["apple-xserve-4"]
+                    }
+                   ],
+
+    "schedulers": [ { "type": "PlatformSpecificScheduler", "platform": "mac-snowleopard", "branch": "trunk", "treeStableTimer": 45.0,
+                      "builderNames": ["SnowLeopard Intel Release (Build)", "SnowLeopard Intel Debug (Build)"]
+                    }
+                  ]
+}
+""")
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/Tools/Scripts/webkitpy/style/checkers/png.py b/Tools/Scripts/webkitpy/style/checkers/png.py
new file mode 100644
index 0000000..430d6f0
--- /dev/null
+++ b/Tools/Scripts/webkitpy/style/checkers/png.py
@@ -0,0 +1,75 @@
+# Copyright (C) 2012 Balazs Ankes (bank@inf.u-szeged.hu) University of Szeged
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1.  Redistributions of source code must retain the above copyright
+#     notice, this list of conditions and the following disclaimer.
+# 2.  Redistributions in binary form must reproduce the above copyright
+#     notice, this list of conditions and the following disclaimer in the
+#     documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""Supports checking WebKit style in png files."""
+
+import os
+import re
+
+from webkitpy.common import checksvnconfigfile
+from webkitpy.common import read_checksum_from_png
+from webkitpy.common.system.systemhost import SystemHost
+from webkitpy.common.checkout.scm.detection import SCMDetector
+
+class PNGChecker(object):
+    """Check svn:mime-type for checking style"""
+
+    categories = set(['image/png'])
+
+    def __init__(self, file_path, handle_style_error, scm=None, host=None):
+        self._file_path = file_path
+        self._handle_style_error = handle_style_error
+        self._host = host or SystemHost()
+        self._fs = self._host.filesystem
+        self._detector = scm or SCMDetector(self._fs, self._host.executive).detect_scm_system(self._fs.getcwd())
+
+    def check(self, inline=None):
+        errorstr = ""
+        config_file_path = ""
+        detection = self._detector.display_name()
+
+        if self._fs.exists(self._file_path) and self._file_path.endswith("-expected.png"):
+            with self._fs.open_binary_file_for_reading(self._file_path) as filehandle:
+                if not read_checksum_from_png.read_checksum(filehandle):
+                    self._handle_style_error(0, 'image/png', 5, "Image lacks a checksum. Generate pngs using run-webkit-tests to ensure they have a checksum.")
+
+        if detection == "git":
+            (file_missing, autoprop_missing, png_missing) = checksvnconfigfile.check(self._host, self._fs)
+            config_file_path = checksvnconfigfile.config_file_path(self._host, self._fs)
+
+            if file_missing:
+                self._handle_style_error(0, 'image/png', 5, "There is no SVN config file. (%s)" % config_file_path)
+            elif autoprop_missing and png_missing:
+                self._handle_style_error(0, 'image/png', 5, checksvnconfigfile.errorstr_autoprop(config_file_path) + checksvnconfigfile.errorstr_png(config_file_path))
+            elif autoprop_missing:
+                self._handle_style_error(0, 'image/png', 5, checksvnconfigfile.errorstr_autoprop(config_file_path))
+            elif png_missing:
+                self._handle_style_error(0, 'image/png', 5, checksvnconfigfile.errorstr_png(config_file_path))
+
+        elif detection == "svn":
+            prop_get = self._detector.propget("svn:mime-type", self._file_path)
+            if prop_get != "image/png":
+                errorstr = "Set the svn:mime-type property (svn propset svn:mime-type image/png %s)." % self._file_path
+                self._handle_style_error(0, 'image/png', 5, errorstr)
+
diff --git a/Tools/Scripts/webkitpy/style/checkers/png_unittest.py b/Tools/Scripts/webkitpy/style/checkers/png_unittest.py
new file mode 100644
index 0000000..764c285
--- /dev/null
+++ b/Tools/Scripts/webkitpy/style/checkers/png_unittest.py
@@ -0,0 +1,134 @@
+# Copyright (C) 2012 Balazs Ankes (bank@inf.u-szeged.hu) University of Szeged
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1.  Redistributions of source code must retain the above copyright
+#     notice, this list of conditions and the following disclaimer.
+# 2.  Redistributions in binary form must reproduce the above copyright
+#     notice, this list of conditions and the following disclaimer in the
+#     documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Unit test for png.py."""
+
+import unittest
+from png import PNGChecker
+from webkitpy.common.system.filesystem_mock import MockFileSystem
+from webkitpy.common.system.systemhost_mock import MockSystemHost
+
+
+class MockSCMDetector(object):
+
+    def __init__(self, scm, prop=None):
+        self._scm = scm
+        self._prop = prop
+
+    def display_name(self):
+        return self._scm
+
+    def propget(self, pname, path):
+        return self._prop
+
+
+class PNGCheckerTest(unittest.TestCase):
+    """Tests PNGChecker class."""
+
+    def test_init(self):
+        """Test __init__() method."""
+
+        def mock_handle_style_error(self):
+            pass
+
+        checker = PNGChecker("test/config", mock_handle_style_error, MockSCMDetector('git'), MockSystemHost())
+        self.assertEquals(checker._file_path, "test/config")
+        self.assertEquals(checker._handle_style_error, mock_handle_style_error)
+
+    def test_check(self):
+        errors = []
+
+        def mock_handle_style_error(line_number, category, confidence, message):
+            error = (line_number, category, confidence, message)
+            errors.append(error)
+
+        file_path = ''
+
+        fs = MockFileSystem()
+
+        scm = MockSCMDetector('svn')
+        checker = PNGChecker(file_path, mock_handle_style_error, scm, MockSystemHost(filesystem=fs))
+        checker.check()
+        self.assertEquals(len(errors), 1)
+        self.assertEquals(errors[0],
+                          (0, 'image/png', 5, 'Set the svn:mime-type property (svn propset svn:mime-type image/png ).'))
+
+        files = {'/Users/mock/.subversion/config': 'enable-auto-props = yes\n*.png = svn:mime-type=image/png'}
+        fs = MockFileSystem(files)
+        scm = MockSCMDetector('git')
+        errors = []
+        checker = PNGChecker("config", mock_handle_style_error, scm, MockSystemHost(os_name='linux', filesystem=fs))
+        checker.check()
+        self.assertEquals(len(errors), 0)
+
+        files = {'/Users/mock/.subversion/config': '#enable-auto-props = yes'}
+        fs = MockFileSystem(files)
+        scm = MockSCMDetector('git')
+        errors = []
+        checker = PNGChecker("config", mock_handle_style_error, scm, MockSystemHost(os_name='linux', filesystem=fs))
+        checker.check()
+        self.assertEquals(len(errors), 1)
+
+        files = {'/Users/mock/.subversion/config': 'enable-auto-props = yes\n#enable-auto-props = yes\n*.png = svn:mime-type=image/png'}
+        fs = MockFileSystem(files)
+        scm = MockSCMDetector('git')
+        errors = []
+        checker = PNGChecker("config", mock_handle_style_error, scm, MockSystemHost(os_name='linux', filesystem=fs))
+        checker.check()
+        self.assertEquals(len(errors), 0)
+
+        files = {'/Users/mock/.subversion/config': '#enable-auto-props = yes\nenable-auto-props = yes\n*.png = svn:mime-type=image/png'}
+        fs = MockFileSystem(files)
+        scm = MockSCMDetector('git')
+        errors = []
+        checker = PNGChecker("config", mock_handle_style_error, scm, MockSystemHost(os_name='linux', filesystem=fs))
+        checker.check()
+        self.assertEquals(len(errors), 0)
+
+        files = {'/Users/mock/.subversion/config': 'enable-auto-props = no'}
+        fs = MockFileSystem(files)
+        scm = MockSCMDetector('git')
+        errors = []
+        checker = PNGChecker("config", mock_handle_style_error, scm, MockSystemHost(os_name='linux', filesystem=fs))
+        checker.check()
+        self.assertEquals(len(errors), 1)
+
+        file_path = "foo.png"
+        fs.write_binary_file(file_path, "Dummy binary data")
+        scm = MockSCMDetector('git')
+        errors = []
+        checker = PNGChecker(file_path, mock_handle_style_error, scm, MockSystemHost(os_name='linux', filesystem=fs))
+        checker.check()
+        self.assertEquals(len(errors), 1)
+
+        file_path = "foo-expected.png"
+        fs.write_binary_file(file_path, "Dummy binary data")
+        scm = MockSCMDetector('git')
+        errors = []
+        checker = PNGChecker(file_path, mock_handle_style_error, scm, MockSystemHost(os_name='linux', filesystem=fs))
+        checker.check()
+        self.assertEquals(len(errors), 2)
+        self.assertEquals(errors[0], (0, 'image/png', 5, 'Image lacks a checksum. Generate pngs using run-webkit-tests to ensure they have a checksum.'))
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/Tools/Scripts/webkitpy/style/checkers/python.py b/Tools/Scripts/webkitpy/style/checkers/python.py
new file mode 100644
index 0000000..8cfd1b2
--- /dev/null
+++ b/Tools/Scripts/webkitpy/style/checkers/python.py
@@ -0,0 +1,56 @@
+# Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org)
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1.  Redistributions of source code must retain the above copyright
+#     notice, this list of conditions and the following disclaimer.
+# 2.  Redistributions in binary form must reproduce the above copyright
+#     notice, this list of conditions and the following disclaimer in the
+#     documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
+# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Supports checking WebKit style in Python files."""
+
+from webkitpy.thirdparty.autoinstalled import pep8
+
+
+class PythonChecker(object):
+
+    """Processes text lines for checking style."""
+
+    def __init__(self, file_path, handle_style_error):
+        self._file_path = file_path
+        self._handle_style_error = handle_style_error
+
+    def check(self, lines):
+        # Initialize pep8.options, which is necessary for
+        # Checker.check_all() to execute.
+        pep8.process_options(arglist=[self._file_path])
+
+        checker = pep8.Checker(self._file_path)
+
+        def _pep8_handle_error(line_number, offset, text, check):
+            # FIXME: Incorporate the character offset into the error output.
+            #        This will require updating the error handler __call__
+            #        signature to include an optional "offset" parameter.
+            pep8_code = text[:4]
+            pep8_message = text[5:]
+
+            category = "pep8/" + pep8_code
+
+            self._handle_style_error(line_number, category, 5, pep8_message)
+
+        checker.report_error = _pep8_handle_error
+
+        errors = checker.check_all()
diff --git a/Tools/Scripts/webkitpy/style/checkers/python_unittest.py b/Tools/Scripts/webkitpy/style/checkers/python_unittest.py
new file mode 100644
index 0000000..e003eb8
--- /dev/null
+++ b/Tools/Scripts/webkitpy/style/checkers/python_unittest.py
@@ -0,0 +1,62 @@
+# Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org)
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1.  Redistributions of source code must retain the above copyright
+#     notice, this list of conditions and the following disclaimer.
+# 2.  Redistributions in binary form must reproduce the above copyright
+#     notice, this list of conditions and the following disclaimer in the
+#     documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
+# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Unit tests for python.py."""
+
+import os
+import unittest
+
+from python import PythonChecker
+
+
+class PythonCheckerTest(unittest.TestCase):
+
+    """Tests the PythonChecker class."""
+
+    def test_init(self):
+        """Test __init__() method."""
+        def _mock_handle_style_error(self):
+            pass
+
+        checker = PythonChecker("foo.txt", _mock_handle_style_error)
+        self.assertEquals(checker._file_path, "foo.txt")
+        self.assertEquals(checker._handle_style_error,
+                          _mock_handle_style_error)
+
+    def test_check(self):
+        """Test check() method."""
+        errors = []
+
+        def _mock_handle_style_error(line_number, category, confidence,
+                                     message):
+            error = (line_number, category, confidence, message)
+            errors.append(error)
+
+        current_dir = os.path.dirname(__file__)
+        file_path = os.path.join(current_dir, "python_unittest_input.py")
+
+        checker = PythonChecker(file_path, _mock_handle_style_error)
+        checker.check(lines=[])
+
+        self.assertEquals(len(errors), 1)
+        self.assertEquals(errors[0],
+                          (2, "pep8/W291", 5, "trailing whitespace"))
diff --git a/Tools/Scripts/webkitpy/style/checkers/python_unittest_input.py b/Tools/Scripts/webkitpy/style/checkers/python_unittest_input.py
new file mode 100644
index 0000000..9f1d118
--- /dev/null
+++ b/Tools/Scripts/webkitpy/style/checkers/python_unittest_input.py
@@ -0,0 +1,2 @@
+# This file is sample input for python_unittest.py and includes a single
+# error which is an extra space at the end of this line. 
diff --git a/Tools/Scripts/webkitpy/style/checkers/test_expectations.py b/Tools/Scripts/webkitpy/style/checkers/test_expectations.py
new file mode 100644
index 0000000..51b97be
--- /dev/null
+++ b/Tools/Scripts/webkitpy/style/checkers/test_expectations.py
@@ -0,0 +1,99 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Checks WebKit style for test_expectations files."""
+
+import logging
+import optparse
+import os
+import re
+import sys
+
+from common import TabChecker
+from webkitpy.common.host import Host
+from webkitpy.layout_tests.models.test_expectations import TestExpectationParser
+
+
+_log = logging.getLogger(__name__)
+
+
+class TestExpectationsChecker(object):
+    """Processes TestExpectations lines for validating the syntax."""
+
+    categories = set(['test/expectations'])
+
+    def _determine_port_from_expectations_path(self, host, expectations_path):
+        # Pass a configuration to avoid calling default_configuration() when initializing the port (takes 0.5 seconds on a Mac Pro!).
+        options_wk1 = optparse.Values({'configuration': 'Release', 'webkit_test_runner': False})
+        options_wk2 = optparse.Values({'configuration': 'Release', 'webkit_test_runner': True})
+        for port_name in host.port_factory.all_port_names():
+            ports = [host.port_factory.get(port_name, options=options_wk1), host.port_factory.get(port_name, options=options_wk2)]
+            for port in ports:
+                for test_expectation_file in port.expectations_files():
+                    if test_expectation_file.replace(port.path_from_webkit_base() + host.filesystem.sep, '') == expectations_path:
+                        return port
+        return None
+
+    def __init__(self, file_path, handle_style_error, host=None):
+        self._file_path = file_path
+        self._handle_style_error = handle_style_error
+        self._handle_style_error.turn_off_line_filtering()
+        self._tab_checker = TabChecker(file_path, handle_style_error)
+
+        # FIXME: host should be a required parameter, not an optional one.
+        host = host or Host()
+        host.initialize_scm()
+
+        self._port_obj = self._determine_port_from_expectations_path(host, file_path)
+
+        # Suppress error messages of test_expectations module since they will be reported later.
+        log = logging.getLogger("webkitpy.layout_tests.layout_package.test_expectations")
+        log.setLevel(logging.CRITICAL)
+
+    def _handle_error_message(self, lineno, message, confidence):
+        pass
+
+    def check_test_expectations(self, expectations_str, tests=None):
+        parser = TestExpectationParser(self._port_obj, tests, allow_rebaseline_modifier=False)
+        expectations = parser.parse('expectations', expectations_str)
+
+        level = 5
+        for expectation_line in expectations:
+            for warning in expectation_line.warnings:
+                self._handle_style_error(expectation_line.line_number, 'test/expectations', level, warning)
+
+    def check_tabs(self, lines):
+        self._tab_checker.check(lines)
+
+    def check(self, lines):
+        expectations = '\n'.join(lines)
+        if self._port_obj:
+            self.check_test_expectations(expectations_str=expectations, tests=None)
+
+        # Warn tabs in lines as well
+        self.check_tabs(lines)
diff --git a/Tools/Scripts/webkitpy/style/checkers/test_expectations_unittest.py b/Tools/Scripts/webkitpy/style/checkers/test_expectations_unittest.py
new file mode 100644
index 0000000..1516de7
--- /dev/null
+++ b/Tools/Scripts/webkitpy/style/checkers/test_expectations_unittest.py
@@ -0,0 +1,120 @@
+#!/usr/bin/python
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import os
+import sys
+import unittest
+
+from test_expectations import TestExpectationsChecker
+from webkitpy.common.host_mock import MockHost
+
+
+class ErrorCollector(object):
+    """An error handler class for unit tests."""
+
+    def __init__(self):
+        self._errors = []
+        self.turned_off_filtering = False
+
+    def turn_off_line_filtering(self):
+        self.turned_off_filtering = True
+
+    def __call__(self, lineno, category, confidence, message):
+        self._errors.append('%s  [%s] [%d]' % (message, category, confidence))
+        return True
+
+    def get_errors(self):
+        return ''.join(self._errors)
+
+    def reset_errors(self):
+        self._errors = []
+        self.turned_off_filtering = False
+
+
+class TestExpectationsTestCase(unittest.TestCase):
+    """TestCase for test_expectations.py"""
+
+    def setUp(self):
+        self._error_collector = ErrorCollector()
+        self._test_file = 'passes/text.html'
+
+    def _expect_port_for_expectations_path(self, expected_port_implementation, expectations_path):
+        host = MockHost()
+        checker = TestExpectationsChecker(expectations_path, ErrorCollector(), host=host)
+        port = checker._determine_port_from_expectations_path(host, expectations_path)
+        if port:
+            self.assertTrue(port.name().startswith(expected_port_implementation))
+        else:
+            self.assertEquals(None, expected_port_implementation)
+
+    def test_determine_port_from_expectations_path(self):
+        self._expect_port_for_expectations_path(None, '/')
+        self._expect_port_for_expectations_path(None, 'LayoutTests/chromium-mac/TestExpectations')
+        self._expect_port_for_expectations_path('chromium', 'LayoutTests/platform/chromium/TestExpectations')
+        self._expect_port_for_expectations_path(None, '/mock-checkout/LayoutTests/platform/win/TestExpectations')
+        self._expect_port_for_expectations_path('win', 'LayoutTests/platform/win/TestExpectations')
+        self._expect_port_for_expectations_path('efl', 'LayoutTests/platform/efl/TestExpectations')
+        self._expect_port_for_expectations_path('efl', 'LayoutTests/platform/efl-wk1/TestExpectations')
+        self._expect_port_for_expectations_path('efl', 'LayoutTests/platform/efl-wk2/TestExpectations')
+        self._expect_port_for_expectations_path('qt', 'LayoutTests/platform/qt-win/TestExpectations')
+        # FIXME: check-webkit-style doesn't know how to create port objects for all Qt version (4.8, 5.0) and
+        # will only check files based on the installed version of Qt.
+        #self._expect_port_for_expectations_path('qt', 'LayoutTests/platform/qt-5.0-wk2/TestExpectations')
+
+    def assert_lines_lint(self, lines, should_pass, expected_output=None):
+        self._error_collector.reset_errors()
+
+        host = MockHost()
+        checker = TestExpectationsChecker('test/TestExpectations',
+                                          self._error_collector, host=host)
+
+        # We should have failed to find a valid port object for that path.
+        self.assertEquals(checker._port_obj, None)
+
+        # Now use a test port so we can check the lines.
+        checker._port_obj = host.port_factory.get('test-mac-leopard')
+        checker.check_test_expectations(expectations_str='\n'.join(lines),
+                                        tests=[self._test_file])
+        checker.check_tabs(lines)
+        if should_pass:
+            self.assertEqual('', self._error_collector.get_errors())
+        elif expected_output:
+            self.assertEquals(expected_output, self._error_collector.get_errors())
+        else:
+            self.assertNotEquals('', self._error_collector.get_errors())
+        self.assertTrue(self._error_collector.turned_off_filtering)
+
+    def test_valid_expectations(self):
+        self.assert_lines_lint(["crbug.com/1234 [ Mac ] passes/text.html [ Pass Failure ]"], should_pass=True)
+
+    def test_invalid_expectations(self):
+        self.assert_lines_lint(["Bug(me) passes/text.html [ Give Up]"], should_pass=False)
+
+    def test_tab(self):
+        self.assert_lines_lint(["\twebkit.org/b/1 passes/text.html [ Pass ]"], should_pass=False, expected_output="Line contains tab character.  [whitespace/tab] [5]")
diff --git a/Tools/Scripts/webkitpy/style/checkers/text.py b/Tools/Scripts/webkitpy/style/checkers/text.py
new file mode 100644
index 0000000..1147658
--- /dev/null
+++ b/Tools/Scripts/webkitpy/style/checkers/text.py
@@ -0,0 +1,51 @@
+# Copyright (C) 2009 Google Inc. All rights reserved.
+# Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org)
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Checks WebKit style for text files."""
+
+from common import TabChecker
+
+class TextChecker(object):
+
+    """Processes text lines for checking style."""
+
+    def __init__(self, file_path, handle_style_error):
+        self.file_path = file_path
+        self.handle_style_error = handle_style_error
+        self._tab_checker = TabChecker(file_path, handle_style_error)
+
+    def check(self, lines):
+        self._tab_checker.check(lines)
+
+
+# FIXME: Remove this function (requires refactoring unit tests).
+def process_file_data(filename, lines, error):
+    checker = TextChecker(filename, error)
+    checker.check(lines)
+
diff --git a/Tools/Scripts/webkitpy/style/checkers/text_unittest.py b/Tools/Scripts/webkitpy/style/checkers/text_unittest.py
new file mode 100644
index 0000000..ced49a9
--- /dev/null
+++ b/Tools/Scripts/webkitpy/style/checkers/text_unittest.py
@@ -0,0 +1,94 @@
+#!/usr/bin/python
+# Copyright (C) 2009 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Unit test for text_style.py."""
+
+import unittest
+
+import text as text_style
+from text import TextChecker
+
+class TextStyleTestCase(unittest.TestCase):
+    """TestCase for text_style.py"""
+
+    def assertNoError(self, lines):
+        """Asserts that the specified lines has no errors."""
+        self.had_error = False
+
+        def error_for_test(line_number, category, confidence, message):
+            """Records if an error occurs."""
+            self.had_error = True
+
+        text_style.process_file_data('', lines, error_for_test)
+        self.assert_(not self.had_error, '%s should not have any errors.' % lines)
+
+    def assertError(self, lines, expected_line_number):
+        """Asserts that the specified lines has an error."""
+        self.had_error = False
+
+        def error_for_test(line_number, category, confidence, message):
+            """Checks if the expected error occurs."""
+            self.assertEquals(expected_line_number, line_number)
+            self.assertEquals('whitespace/tab', category)
+            self.had_error = True
+
+        text_style.process_file_data('', lines, error_for_test)
+        self.assert_(self.had_error, '%s should have an error [whitespace/tab].' % lines)
+
+
+    def test_no_error(self):
+        """Tests for no error cases."""
+        self.assertNoError([''])
+        self.assertNoError(['abc def', 'ggg'])
+
+
+    def test_error(self):
+        """Tests for error cases."""
+        self.assertError(['2009-12-16\tKent Tamura\t<tkent@chromium.org>'], 1)
+        self.assertError(['2009-12-16 Kent Tamura <tkent@chromium.org>',
+                          '',
+                          '\tReviewed by NOBODY.'], 3)
+
+
+class TextCheckerTest(unittest.TestCase):
+
+    """Tests TextChecker class."""
+
+    def mock_handle_style_error(self):
+        pass
+
+    def test_init(self):
+        """Test __init__ constructor."""
+        checker = TextChecker("foo.txt", self.mock_handle_style_error)
+        self.assertEquals(checker.file_path, "foo.txt")
+        self.assertEquals(checker.handle_style_error, self.mock_handle_style_error)
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/Tools/Scripts/webkitpy/style/checkers/watchlist.py b/Tools/Scripts/webkitpy/style/checkers/watchlist.py
new file mode 100644
index 0000000..d1a27f7
--- /dev/null
+++ b/Tools/Scripts/webkitpy/style/checkers/watchlist.py
@@ -0,0 +1,51 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""Checks WebKit style for the watchlist file."""
+
+
+from webkitpy.common.watchlist.watchlistparser import WatchListParser
+
+
+class WatchListChecker(object):
+
+    """Processes the watch list for checking style."""
+
+    def __init__(self, file_path, handle_style_error):
+        self._handle_style_error = handle_style_error
+        self._handle_style_error.turn_off_line_filtering()
+
+    def check(self, lines):
+        def log_to_style_error(message):
+            # Always report line 0 since we don't have anything better.
+            self._handle_style_error(0,
+                                     'watchlist/general', 5,
+                                     message)
+
+        WatchListParser(log_error=log_to_style_error).parse('\n'.join(lines))
diff --git a/Tools/Scripts/webkitpy/style/checkers/watchlist_unittest.py b/Tools/Scripts/webkitpy/style/checkers/watchlist_unittest.py
new file mode 100644
index 0000000..c8d29db
--- /dev/null
+++ b/Tools/Scripts/webkitpy/style/checkers/watchlist_unittest.py
@@ -0,0 +1,67 @@
+#!/usr/bin/env python
+#
+# Copyright (C) 2010 Apple Inc. All rights reserved.
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#    * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+'''Unit tests for watchlist.py.'''
+
+
+import unittest
+
+
+import watchlist
+
+
+class MockErrorHandler(object):
+    def __init__(self, handle_style_error):
+        self.turned_off_filtering = False
+        self._handle_style_error = handle_style_error
+
+    def turn_off_line_filtering(self):
+        self.turned_off_filtering = True
+
+    def __call__(self, line_number, category, confidence, message):
+        self._handle_style_error(self, line_number, category, confidence, message)
+        return True
+
+
+class WatchListTest(unittest.TestCase):
+    def test_basic_error_message(self):
+        def handle_style_error(mock_error_handler, line_number, category, confidence, message):
+            mock_error_handler.had_error = True
+            self.assertEquals(0, line_number)
+            self.assertEquals('watchlist/general', category)
+
+        error_handler = MockErrorHandler(handle_style_error)
+        error_handler.had_error = False
+        checker = watchlist.WatchListChecker('watchlist', error_handler)
+        checker.check(['{"DEFINTIONS": {}}'])
+        self.assertTrue(error_handler.had_error)
+        self.assertTrue(error_handler.turned_off_filtering)
diff --git a/Tools/Scripts/webkitpy/style/checkers/xcodeproj.py b/Tools/Scripts/webkitpy/style/checkers/xcodeproj.py
new file mode 100644
index 0000000..89c072d
--- /dev/null
+++ b/Tools/Scripts/webkitpy/style/checkers/xcodeproj.py
@@ -0,0 +1,61 @@
+#!/usr/bin/env python
+#
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1.  Redistributions of source code must retain the above copyright
+#     notice, this list of conditions and the following disclaimer.
+# 2.  Redistributions in binary form must reproduce the above copyright
+#     notice, this list of conditions and the following disclaimer in the
+#     documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Checks Xcode project files."""
+
+import re
+
+
+class XcodeProjectFileChecker(object):
+
+    """Processes Xcode project file lines for checking style."""
+
+    def __init__(self, file_path, handle_style_error):
+        self.file_path = file_path
+        self.handle_style_error = handle_style_error
+        self.handle_style_error.turn_off_line_filtering()
+        self._development_region_regex = re.compile('developmentRegion = (?P<region>.+);')
+
+    def _check_development_region(self, line_index, line):
+        """Returns True when developmentRegion is detected."""
+        matched = self._development_region_regex.search(line)
+        if not matched:
+            return False
+        if matched.group('region') != 'English':
+            self.handle_style_error(line_index,
+                                    'xcodeproj/settings', 5,
+                                    'developmentRegion is not English.')
+        return True
+
+    def check(self, lines):
+        development_region_is_detected = False
+        for line_index, line in enumerate(lines):
+            if self._check_development_region(line_index, line):
+                development_region_is_detected = True
+
+        if not development_region_is_detected:
+            self.handle_style_error(len(lines),
+                                    'xcodeproj/settings', 5,
+                                    'Missing "developmentRegion = English".')
diff --git a/Tools/Scripts/webkitpy/style/checkers/xcodeproj_unittest.py b/Tools/Scripts/webkitpy/style/checkers/xcodeproj_unittest.py
new file mode 100644
index 0000000..9799ec0
--- /dev/null
+++ b/Tools/Scripts/webkitpy/style/checkers/xcodeproj_unittest.py
@@ -0,0 +1,74 @@
+#!/usr/bin/env python
+#
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1.  Redistributions of source code must retain the above copyright
+#     notice, this list of conditions and the following disclaimer.
+# 2.  Redistributions in binary form must reproduce the above copyright
+#     notice, this list of conditions and the following disclaimer in the
+#     documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Unit test for xcodeproj.py."""
+
+import xcodeproj
+import unittest
+
+
+class TestErrorHandler(object):
+    """Error handler for XcodeProjectFileChecker unittests"""
+    def __init__(self, handler):
+        self.handler = handler
+
+    def turn_off_line_filtering(self):
+        pass
+
+    def __call__(self, line_number, category, confidence, message):
+        self.handler(self, line_number, category, confidence, message)
+        return True
+
+
+class XcodeProjectFileCheckerTest(unittest.TestCase):
+    """Tests XcodeProjectFileChecker class."""
+
+    def assert_no_error(self, lines):
+        def handler(error_handler, line_number, category, confidence, message):
+            self.fail('Unexpected error: %d %s %d %s' % (line_number, category, confidence, message))
+
+        error_handler = TestErrorHandler(handler)
+        checker = xcodeproj.XcodeProjectFileChecker('', error_handler)
+        checker.check(lines)
+
+    def assert_error(self, lines, expected_message):
+        self.had_error = False
+
+        def handler(error_handler, line_number, category, confidence, message):
+            self.assertEqual(expected_message, message)
+            self.had_error = True
+        error_handler = TestErrorHandler(handler)
+        checker = xcodeproj.XcodeProjectFileChecker('', error_handler)
+        checker.check(lines)
+        self.assert_(self.had_error, '%s should have error: %s.' % (lines, expected_message))
+
+    def test_detect_development_region(self):
+        self.assert_no_error(['developmentRegion = English;'])
+        self.assert_error([''], 'Missing "developmentRegion = English".')
+        self.assert_error(['developmentRegion = Japanese;'],
+                          'developmentRegion is not English.')
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/Tools/Scripts/webkitpy/style/checkers/xml.py b/Tools/Scripts/webkitpy/style/checkers/xml.py
new file mode 100644
index 0000000..ff4a415
--- /dev/null
+++ b/Tools/Scripts/webkitpy/style/checkers/xml.py
@@ -0,0 +1,45 @@
+# Copyright (C) 2010 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1.  Redistributions of source code must retain the above copyright
+#     notice, this list of conditions and the following disclaimer.
+# 2.  Redistributions in binary form must reproduce the above copyright
+#     notice, this list of conditions and the following disclaimer in the
+#     documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
+# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Checks WebKit style for XML files."""
+
+from __future__ import absolute_import
+
+from xml.parsers import expat
+
+
+class XMLChecker(object):
+    """Processes XML lines for checking style."""
+
+    def __init__(self, file_path, handle_style_error):
+        self._handle_style_error = handle_style_error
+        self._handle_style_error.turn_off_line_filtering()
+
+    def check(self, lines):
+        parser = expat.ParserCreate()
+        try:
+            for line in lines:
+                parser.Parse(line)
+                parser.Parse('\n')
+            parser.Parse('', True)
+        except expat.ExpatError, error:
+            self._handle_style_error(error.lineno, 'xml/syntax', 5, expat.ErrorString(error.code))
diff --git a/Tools/Scripts/webkitpy/style/checkers/xml_unittest.py b/Tools/Scripts/webkitpy/style/checkers/xml_unittest.py
new file mode 100644
index 0000000..e486f5f
--- /dev/null
+++ b/Tools/Scripts/webkitpy/style/checkers/xml_unittest.py
@@ -0,0 +1,92 @@
+#!/usr/bin/env python
+#
+# Copyright (C) 2010 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1.  Redistributions of source code must retain the above copyright
+#     notice, this list of conditions and the following disclaimer.
+# 2.  Redistributions in binary form must reproduce the above copyright
+#     notice, this list of conditions and the following disclaimer in the
+#     documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
+# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Unit test for xml.py."""
+
+import unittest
+
+import xml
+
+
+class MockErrorHandler(object):
+    def __init__(self, handle_style_error):
+        self.turned_off_filtering = False
+        self._handle_style_error = handle_style_error
+
+    def turn_off_line_filtering(self):
+        self.turned_off_filtering = True
+
+    def __call__(self, line_number, category, confidence, message):
+        self._handle_style_error(self, line_number, category, confidence, message)
+        return True
+
+
+class XMLCheckerTest(unittest.TestCase):
+    """Tests XMLChecker class."""
+
+    def assert_no_error(self, xml_data):
+        def handle_style_error(mock_error_handler, line_number, category, confidence, message):
+            self.fail('Unexpected error: %d %s %d %s' % (line_number, category, confidence, message))
+
+        error_handler = MockErrorHandler(handle_style_error)
+        checker = xml.XMLChecker('foo.xml', error_handler)
+        checker.check(xml_data.split('\n'))
+        self.assertTrue(error_handler.turned_off_filtering)
+
+    def assert_error(self, expected_line_number, expected_category, xml_data):
+        def handle_style_error(mock_error_handler, line_number, category, confidence, message):
+            mock_error_handler.had_error = True
+            self.assertEquals(expected_line_number, line_number)
+            self.assertEquals(expected_category, category)
+
+        error_handler = MockErrorHandler(handle_style_error)
+        error_handler.had_error = False
+
+        checker = xml.XMLChecker('foo.xml', error_handler)
+        checker.check(xml_data.split('\n'))
+        self.assertTrue(error_handler.had_error)
+        self.assertTrue(error_handler.turned_off_filtering)
+
+    def mock_handle_style_error(self):
+        pass
+
+    def test_conflict_marker(self):
+        self.assert_error(1, 'xml/syntax', '<<<<<<< HEAD\n<foo>\n</foo>\n')
+
+    def test_extra_closing_tag(self):
+        self.assert_error(3, 'xml/syntax', '<foo>\n</foo>\n</foo>\n')
+
+    def test_init(self):
+        error_handler = MockErrorHandler(self.mock_handle_style_error)
+        checker = xml.XMLChecker('foo.xml', error_handler)
+        self.assertEquals(checker._handle_style_error, error_handler)
+
+    def test_missing_closing_tag(self):
+        self.assert_error(3, 'xml/syntax', '<foo>\n<bar>\n</foo>\n')
+
+    def test_no_error(self):
+        self.assert_no_error('<foo>\n</foo>')
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/Tools/Scripts/webkitpy/style/error_handlers.py b/Tools/Scripts/webkitpy/style/error_handlers.py
new file mode 100644
index 0000000..99d5cb3
--- /dev/null
+++ b/Tools/Scripts/webkitpy/style/error_handlers.py
@@ -0,0 +1,164 @@
+# Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org)
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1.  Redistributions of source code must retain the above copyright
+#     notice, this list of conditions and the following disclaimer.
+# 2.  Redistributions in binary form must reproduce the above copyright
+#     notice, this list of conditions and the following disclaimer in the
+#     documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Defines style error handler classes.
+
+A style error handler is a function to call when a style error is
+found. Style error handlers can also have state. A class that represents
+a style error handler should implement the following methods.
+
+Methods:
+
+  __call__(self, line_number, category, confidence, message):
+
+    Handle the occurrence of a style error.
+
+    Check whether the error is reportable. If so, increment the total
+    error count and report the details. Note that error reporting can
+    be suppressed after reaching a certain number of reports.
+
+    Args:
+      line_number: The integer line number of the line containing the error.
+      category: The name of the category of the error, for example
+                "whitespace/newline".
+      confidence: An integer between 1 and 5 inclusive that represents the
+                  application's level of confidence in the error. The value
+                  5 means that we are certain of the problem, and the
+                  value 1 means that it could be a legitimate construct.
+      message: The error message to report.
+
+"""
+
+
+import sys
+
+
+class DefaultStyleErrorHandler(object):
+
+    """The default style error handler."""
+
+    def __init__(self, file_path, configuration, increment_error_count,
+                 line_numbers=None):
+        """Create a default style error handler.
+
+        Args:
+          file_path: The path to the file containing the error. This
+                     is used for reporting to the user.
+          configuration: A StyleProcessorConfiguration instance.
+          increment_error_count: A function that takes no arguments and
+                                 increments the total count of reportable
+                                 errors.
+          line_numbers: An array of line numbers of the lines for which
+                        style errors should be reported, or None if errors
+                        for all lines should be reported.  When it is not
+                        None, this array normally contains the line numbers
+                        corresponding to the modified lines of a patch.
+
+        """
+        if line_numbers is not None:
+            line_numbers = set(line_numbers)
+
+        self._file_path = file_path
+        self._configuration = configuration
+        self._increment_error_count = increment_error_count
+        self._line_numbers = line_numbers
+
+        # A string to integer dictionary cache of the number of reportable
+        # errors per category passed to this instance.
+        self._category_totals = {}
+
+    # Useful for unit testing.
+    def __eq__(self, other):
+        """Return whether this instance is equal to another."""
+        if self._configuration != other._configuration:
+            return False
+        if self._file_path != other._file_path:
+            return False
+        if self._increment_error_count != other._increment_error_count:
+            return False
+        if self._line_numbers != other._line_numbers:
+            return False
+
+        return True
+
+    # Useful for unit testing.
+    def __ne__(self, other):
+        # Python does not automatically deduce __ne__ from __eq__.
+        return not self.__eq__(other)
+
+    def _add_reportable_error(self, category):
+        """Increment the error count and return the new category total."""
+        self._increment_error_count() # Increment the total.
+
+        # Increment the category total.
+        if not category in self._category_totals:
+            self._category_totals[category] = 1
+        else:
+            self._category_totals[category] += 1
+
+        return self._category_totals[category]
+
+    def _max_reports(self, category):
+        """Return the maximum number of errors to report."""
+        if not category in self._configuration.max_reports_per_category:
+            return None
+        return self._configuration.max_reports_per_category[category]
+
+    def should_line_be_checked(self, line_number):
+        "Returns if a particular line should be checked"
+        # Was the line that was modified?
+        return self._line_numbers is None or line_number in self._line_numbers
+
+    def turn_off_line_filtering(self):
+        self._line_numbers = None
+
+    def __call__(self, line_number, category, confidence, message):
+        """Handle the occurrence of a style error.
+
+        See the docstring of this module for more information.
+
+        """
+        if not self.should_line_be_checked(line_number):
+            return False
+
+        if not self._configuration.is_reportable(category=category,
+                                                 confidence_in_error=confidence,
+                                                 file_path=self._file_path):
+            return False
+
+        category_total = self._add_reportable_error(category)
+
+        max_reports = self._max_reports(category)
+
+        if (max_reports is not None) and (category_total > max_reports):
+            # Then suppress displaying the error.
+            return False
+
+        self._configuration.write_style_error(category=category,
+                                              confidence_in_error=confidence,
+                                              file_path=self._file_path,
+                                              line_number=line_number,
+                                              message=message)
+        if category_total == max_reports:
+            self._configuration.stderr_write("Suppressing further [%s] reports "
+                                             "for this file.\n" % category)
+        return True
diff --git a/Tools/Scripts/webkitpy/style/error_handlers_unittest.py b/Tools/Scripts/webkitpy/style/error_handlers_unittest.py
new file mode 100644
index 0000000..864bc0f
--- /dev/null
+++ b/Tools/Scripts/webkitpy/style/error_handlers_unittest.py
@@ -0,0 +1,196 @@
+# Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org)
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1.  Redistributions of source code must retain the above copyright
+#     notice, this list of conditions and the following disclaimer.
+# 2.  Redistributions in binary form must reproduce the above copyright
+#     notice, this list of conditions and the following disclaimer in the
+#     documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Unit tests for error_handlers.py."""
+
+
+import unittest
+
+from checker import StyleProcessorConfiguration
+from error_handlers import DefaultStyleErrorHandler
+from filter import FilterConfiguration
+
+
+class DefaultStyleErrorHandlerTest(unittest.TestCase):
+
+    """Tests the DefaultStyleErrorHandler class."""
+
+    def setUp(self):
+        self._error_messages = []
+        self._error_count = 0
+
+    _category = "whitespace/tab"
+    """The category name for the tests in this class."""
+
+    _file_path = "foo.h"
+    """The file path for the tests in this class."""
+
+    def _mock_increment_error_count(self):
+        self._error_count += 1
+
+    def _mock_stderr_write(self, message):
+        self._error_messages.append(message)
+
+    def _style_checker_configuration(self):
+        """Return a StyleProcessorConfiguration instance for testing."""
+        base_rules = ["-whitespace", "+whitespace/tab"]
+        filter_configuration = FilterConfiguration(base_rules=base_rules)
+
+        return StyleProcessorConfiguration(
+                   filter_configuration=filter_configuration,
+                   max_reports_per_category={"whitespace/tab": 2},
+                   min_confidence=3,
+                   output_format="vs7",
+                   stderr_write=self._mock_stderr_write)
+
+    def _error_handler(self, configuration, line_numbers=None):
+        return DefaultStyleErrorHandler(configuration=configuration,
+                   file_path=self._file_path,
+                   increment_error_count=self._mock_increment_error_count,
+                   line_numbers=line_numbers)
+
+    def _check_initialized(self):
+        """Check that count and error messages are initialized."""
+        self.assertEquals(0, self._error_count)
+        self.assertEquals(0, len(self._error_messages))
+
+    def _call_error_handler(self, handle_error, confidence, line_number=100):
+        """Call the given error handler with a test error."""
+        handle_error(line_number=line_number,
+                     category=self._category,
+                     confidence=confidence,
+                     message="message")
+
+    def test_eq__true_return_value(self):
+        """Test the __eq__() method for the return value of True."""
+        handler1 = self._error_handler(configuration=None)
+        handler2 = self._error_handler(configuration=None)
+
+        self.assertTrue(handler1.__eq__(handler2))
+
+    def test_eq__false_return_value(self):
+        """Test the __eq__() method for the return value of False."""
+        def make_handler(configuration=self._style_checker_configuration(),
+                file_path='foo.txt', increment_error_count=lambda: True,
+                line_numbers=[100]):
+            return DefaultStyleErrorHandler(configuration=configuration,
+                       file_path=file_path,
+                       increment_error_count=increment_error_count,
+                       line_numbers=line_numbers)
+
+        handler = make_handler()
+
+        # Establish a baseline for our comparisons below.
+        self.assertTrue(handler.__eq__(make_handler()))
+
+        # Verify that a difference in any argument causes equality to fail.
+        self.assertFalse(handler.__eq__(make_handler(configuration=None)))
+        self.assertFalse(handler.__eq__(make_handler(file_path='bar.txt')))
+        self.assertFalse(handler.__eq__(make_handler(increment_error_count=None)))
+        self.assertFalse(handler.__eq__(make_handler(line_numbers=[50])))
+
+    def test_ne(self):
+        """Test the __ne__() method."""
+        # By default, __ne__ always returns true on different objects.
+        # Thus, check just the distinguishing case to verify that the
+        # code defines __ne__.
+        handler1 = self._error_handler(configuration=None)
+        handler2 = self._error_handler(configuration=None)
+
+        self.assertFalse(handler1.__ne__(handler2))
+
+    def test_non_reportable_error(self):
+        """Test __call__() with a non-reportable error."""
+        self._check_initialized()
+        configuration = self._style_checker_configuration()
+
+        confidence = 1
+        # Confirm the error is not reportable.
+        self.assertFalse(configuration.is_reportable(self._category,
+                                                     confidence,
+                                                     self._file_path))
+        error_handler = self._error_handler(configuration)
+        self._call_error_handler(error_handler, confidence)
+
+        self.assertEquals(0, self._error_count)
+        self.assertEquals([], self._error_messages)
+
+    # Also serves as a reportable error test.
+    def test_max_reports_per_category(self):
+        """Test error report suppression in __call__() method."""
+        self._check_initialized()
+        configuration = self._style_checker_configuration()
+        error_handler = self._error_handler(configuration)
+
+        confidence = 5
+
+        # First call: usual reporting.
+        self._call_error_handler(error_handler, confidence)
+        self.assertEquals(1, self._error_count)
+        self.assertEquals(1, len(self._error_messages))
+        self.assertEquals(self._error_messages,
+                          ["foo.h(100):  message  [whitespace/tab] [5]\n"])
+
+        # Second call: suppression message reported.
+        self._call_error_handler(error_handler, confidence)
+        # The "Suppressing further..." message counts as an additional
+        # message (but not as an addition to the error count).
+        self.assertEquals(2, self._error_count)
+        self.assertEquals(3, len(self._error_messages))
+        self.assertEquals(self._error_messages[-2],
+                          "foo.h(100):  message  [whitespace/tab] [5]\n")
+        self.assertEquals(self._error_messages[-1],
+                          "Suppressing further [whitespace/tab] reports "
+                          "for this file.\n")
+
+        # Third call: no report.
+        self._call_error_handler(error_handler, confidence)
+        self.assertEquals(3, self._error_count)
+        self.assertEquals(3, len(self._error_messages))
+
+    def test_line_numbers(self):
+        """Test the line_numbers parameter."""
+        self._check_initialized()
+        configuration = self._style_checker_configuration()
+        error_handler = self._error_handler(configuration,
+                                            line_numbers=[50])
+        confidence = 5
+
+        # Error on non-modified line: no error.
+        self._call_error_handler(error_handler, confidence, line_number=60)
+        self.assertEquals(0, self._error_count)
+        self.assertEquals([], self._error_messages)
+
+        # Error on modified line: error.
+        self._call_error_handler(error_handler, confidence, line_number=50)
+        self.assertEquals(1, self._error_count)
+        self.assertEquals(self._error_messages,
+                          ["foo.h(50):  message  [whitespace/tab] [5]\n"])
+
+        # Error on non-modified line after turning off line filtering: error.
+        error_handler.turn_off_line_filtering()
+        self._call_error_handler(error_handler, confidence, line_number=60)
+        self.assertEquals(2, self._error_count)
+        self.assertEquals(self._error_messages,
+                          ['foo.h(50):  message  [whitespace/tab] [5]\n',
+                           'foo.h(60):  message  [whitespace/tab] [5]\n',
+                           'Suppressing further [whitespace/tab] reports for this file.\n'])
diff --git a/Tools/Scripts/webkitpy/style/filereader.py b/Tools/Scripts/webkitpy/style/filereader.py
new file mode 100644
index 0000000..1181ad4
--- /dev/null
+++ b/Tools/Scripts/webkitpy/style/filereader.py
@@ -0,0 +1,154 @@
+# Copyright (C) 2009 Google Inc. All rights reserved.
+# Copyright (C) 2010 Chris Jerdonek (chris.jerdonek@gmail.com)
+# Copyright (C) 2010 ProFUSION embedded systems
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Supports reading and processing text files."""
+
+import codecs
+import logging
+import os
+import sys
+
+
+_log = logging.getLogger(__name__)
+
+
+class TextFileReader(object):
+
+    """Supports reading and processing text files.
+
+       Attributes:
+         file_count: The total number of files passed to this instance
+                     for processing, including non-text files and files
+                     that should be skipped.
+         delete_only_file_count: The total number of files that are not
+                                 processed this instance actually because
+                                 the files don't have any modified lines
+                                 but should be treated as processed.
+
+    """
+
+    def __init__(self, filesystem, processor):
+        """Create an instance.
+
+        Arguments:
+          processor: A ProcessorBase instance.
+
+        """
+        # FIXME: Although TextFileReader requires a FileSystem it circumvents it in two places!
+        self.filesystem = filesystem
+        self._processor = processor
+        self.file_count = 0
+        self.delete_only_file_count = 0
+
+    def _read_lines(self, file_path):
+        """Read the file at a path, and return its lines.
+
+        Raises:
+          IOError: If the file does not exist or cannot be read.
+
+        """
+        # Support the UNIX convention of using "-" for stdin.
+        if file_path == '-':
+            file = codecs.StreamReaderWriter(sys.stdin,
+                                             codecs.getreader('utf8'),
+                                             codecs.getwriter('utf8'),
+                                             'replace')
+        else:
+            # We do not open the file with universal newline support
+            # (codecs does not support it anyway), so the resulting
+            # lines contain trailing "\r" characters if we are reading
+            # a file with CRLF endings.
+            # FIXME: This should use self.filesystem
+            file = codecs.open(file_path, 'r', 'utf8', 'replace')
+
+        try:
+            contents = file.read()
+        finally:
+            file.close()
+
+        lines = contents.split('\n')
+        return lines
+
+    def process_file(self, file_path, **kwargs):
+        """Process the given file by calling the processor's process() method.
+
+        Args:
+          file_path: The path of the file to process.
+          **kwargs: Any additional keyword parameters that should be passed
+                    to the processor's process() method.  The process()
+                    method should support these keyword arguments.
+
+        Raises:
+          SystemExit: If no file at file_path exists.
+
+        """
+        self.file_count += 1
+
+        if not self.filesystem.exists(file_path) and file_path != "-":
+            _log.error("File does not exist: '%s'" % file_path)
+            sys.exit(1)  # FIXME: This should throw or return instead of exiting directly.
+
+        if not self._processor.should_process(file_path):
+            _log.debug("Skipping file: '%s'" % file_path)
+            return
+        _log.debug("Processing file: '%s'" % file_path)
+
+        try:
+            lines = self._read_lines(file_path)
+        except IOError, err:
+            message = ("Could not read file. Skipping: '%s'\n  %s" % (file_path, err))
+            _log.warn(message)
+            return
+
+        self._processor.process(lines, file_path, **kwargs)
+
+    def _process_directory(self, directory):
+        """Process all files in the given directory, recursively."""
+        # FIXME: We should consider moving to self.filesystem.files_under() (or adding walk() to FileSystem)
+        for dir_path, dir_names, file_names in os.walk(directory):
+            for file_name in file_names:
+                file_path = self.filesystem.join(dir_path, file_name)
+                self.process_file(file_path)
+
+    def process_paths(self, paths):
+        for path in paths:
+            if self.filesystem.isdir(path):
+                self._process_directory(directory=path)
+            else:
+                self.process_file(path)
+
+    def count_delete_only_file(self):
+        """Count up files that contains only deleted lines.
+
+        Files which has no modified or newly-added lines don't need
+        to check style, but should be treated as checked. For that
+        purpose, we just count up the number of such files.
+        """
+        self.delete_only_file_count += 1
diff --git a/Tools/Scripts/webkitpy/style/filereader_unittest.py b/Tools/Scripts/webkitpy/style/filereader_unittest.py
new file mode 100644
index 0000000..bcf94f3
--- /dev/null
+++ b/Tools/Scripts/webkitpy/style/filereader_unittest.py
@@ -0,0 +1,155 @@
+# Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org)
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1.  Redistributions of source code must retain the above copyright
+#     notice, this list of conditions and the following disclaimer.
+# 2.  Redistributions in binary form must reproduce the above copyright
+#     notice, this list of conditions and the following disclaimer in the
+#     documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
+# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from webkitpy.common.system.filesystem import FileSystem
+from webkitpy.common.system.logtesting import LoggingTestCase
+from webkitpy.style.checker import ProcessorBase
+from webkitpy.style.filereader import TextFileReader
+
+
+class TextFileReaderTest(LoggingTestCase):
+
+    class MockProcessor(ProcessorBase):
+
+        """A processor for test purposes.
+
+        This processor simply records the parameters passed to its process()
+        method for later checking by the unittest test methods.
+
+        """
+
+        def __init__(self):
+            self.processed = []
+            """The parameters passed for all calls to the process() method."""
+
+        def should_process(self, file_path):
+            return not file_path.endswith('should_not_process.txt')
+
+        def process(self, lines, file_path, test_kwarg=None):
+            self.processed.append((lines, file_path, test_kwarg))
+
+    def setUp(self):
+        LoggingTestCase.setUp(self)
+        # FIXME: This should be a MockFileSystem once TextFileReader is moved entirely on top of FileSystem.
+        self.filesystem = FileSystem()
+        self._temp_dir = str(self.filesystem.mkdtemp())
+        self._processor = TextFileReaderTest.MockProcessor()
+        self._file_reader = TextFileReader(self.filesystem, self._processor)
+
+    def tearDown(self):
+        LoggingTestCase.tearDown(self)
+        self.filesystem.rmtree(self._temp_dir)
+
+    def _create_file(self, rel_path, text):
+        """Create a file with given text and return the path to the file."""
+        # FIXME: There are better/more secure APIs for creating tmp file paths.
+        file_path = self.filesystem.join(self._temp_dir, rel_path)
+        self.filesystem.write_text_file(file_path, text)
+        return file_path
+
+    def _passed_to_processor(self):
+        """Return the parameters passed to MockProcessor.process()."""
+        return self._processor.processed
+
+    def _assert_file_reader(self, passed_to_processor, file_count):
+        """Assert the state of the file reader."""
+        self.assertEquals(passed_to_processor, self._passed_to_processor())
+        self.assertEquals(file_count, self._file_reader.file_count)
+
+    def test_process_file__does_not_exist(self):
+        try:
+            self._file_reader.process_file('does_not_exist.txt')
+        except SystemExit, err:
+            self.assertEquals(str(err), '1')
+        else:
+            self.fail('No Exception raised.')
+        self._assert_file_reader([], 1)
+        self.assertLog(["ERROR: File does not exist: 'does_not_exist.txt'\n"])
+
+    def test_process_file__is_dir(self):
+        temp_dir = self.filesystem.join(self._temp_dir, 'test_dir')
+        self.filesystem.maybe_make_directory(temp_dir)
+
+        self._file_reader.process_file(temp_dir)
+
+        # Because the log message below contains exception text, it is
+        # possible that the text varies across platforms.  For this reason,
+        # we check only the portion of the log message that we control,
+        # namely the text at the beginning.
+        log_messages = self.logMessages()
+        # We remove the message we are looking at to prevent the tearDown()
+        # from raising an exception when it asserts that no log messages
+        # remain.
+        message = log_messages.pop()
+
+        self.assertTrue(message.startswith("WARNING: Could not read file. Skipping: '%s'\n  " % temp_dir))
+
+        self._assert_file_reader([], 1)
+
+    def test_process_file__should_not_process(self):
+        file_path = self._create_file('should_not_process.txt', 'contents')
+
+        self._file_reader.process_file(file_path)
+        self._assert_file_reader([], 1)
+
+    def test_process_file__multiple_lines(self):
+        file_path = self._create_file('foo.txt', 'line one\r\nline two\n')
+
+        self._file_reader.process_file(file_path)
+        processed = [(['line one\r', 'line two', ''], file_path, None)]
+        self._assert_file_reader(processed, 1)
+
+    def test_process_file__file_stdin(self):
+        file_path = self._create_file('-', 'file contents')
+
+        self._file_reader.process_file(file_path=file_path, test_kwarg='foo')
+        processed = [(['file contents'], file_path, 'foo')]
+        self._assert_file_reader(processed, 1)
+
+    def test_process_file__with_kwarg(self):
+        file_path = self._create_file('foo.txt', 'file contents')
+
+        self._file_reader.process_file(file_path=file_path, test_kwarg='foo')
+        processed = [(['file contents'], file_path, 'foo')]
+        self._assert_file_reader(processed, 1)
+
+    def test_process_paths(self):
+        # We test a list of paths that contains both a file and a directory.
+        dir = self.filesystem.join(self._temp_dir, 'foo_dir')
+        self.filesystem.maybe_make_directory(dir)
+
+        file_path1 = self._create_file('file1.txt', 'foo')
+
+        rel_path = self.filesystem.join('foo_dir', 'file2.txt')
+        file_path2 = self._create_file(rel_path, 'bar')
+
+        self._file_reader.process_paths([dir, file_path1])
+        processed = [(['bar'], file_path2, None),
+                     (['foo'], file_path1, None)]
+        self._assert_file_reader(processed, 2)
+
+    def test_count_delete_only_file(self):
+        self._file_reader.count_delete_only_file()
+        delete_only_file_count = self._file_reader.delete_only_file_count
+        self.assertEquals(delete_only_file_count, 1)
diff --git a/Tools/Scripts/webkitpy/style/filter.py b/Tools/Scripts/webkitpy/style/filter.py
new file mode 100644
index 0000000..608a9e6
--- /dev/null
+++ b/Tools/Scripts/webkitpy/style/filter.py
@@ -0,0 +1,278 @@
+# Copyright (C) 2010 Chris Jerdonek (chris.jerdonek@gmail.com)
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1.  Redistributions of source code must retain the above copyright
+#     notice, this list of conditions and the following disclaimer.
+# 2.  Redistributions in binary form must reproduce the above copyright
+#     notice, this list of conditions and the following disclaimer in the
+#     documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Contains filter-related code."""
+
+
+def validate_filter_rules(filter_rules, all_categories):
+    """Validate the given filter rules, and raise a ValueError if not valid.
+
+    Args:
+      filter_rules: A list of boolean filter rules, for example--
+                    ["-whitespace", "+whitespace/braces"]
+      all_categories: A list of all available category names, for example--
+                      ["whitespace/tabs", "whitespace/braces"]
+
+    Raises:
+      ValueError: An error occurs if a filter rule does not begin
+                  with "+" or "-" or if a filter rule does not match
+                  the beginning of some category name in the list
+                  of all available categories.
+
+    """
+    for rule in filter_rules:
+        if not (rule.startswith('+') or rule.startswith('-')):
+            raise ValueError('Invalid filter rule "%s": every rule '
+                             "must start with + or -." % rule)
+
+        for category in all_categories:
+            if category.startswith(rule[1:]):
+                break
+        else:
+            raise ValueError('Suspected incorrect filter rule "%s": '
+                             "the rule does not match the beginning "
+                             "of any category name." % rule)
+
+
+class _CategoryFilter(object):
+
+    """Filters whether to check style categories."""
+
+    def __init__(self, filter_rules=None):
+        """Create a category filter.
+
+        Args:
+          filter_rules: A list of strings that are filter rules, which
+                        are strings beginning with the plus or minus
+                        symbol (+/-).  The list should include any
+                        default filter rules at the beginning.
+                        Defaults to the empty list.
+
+        Raises:
+          ValueError: Invalid filter rule if a rule does not start with
+                      plus ("+") or minus ("-").
+
+        """
+        if filter_rules is None:
+            filter_rules = []
+
+        self._filter_rules = filter_rules
+        self._should_check_category = {} # Cached dictionary of category to True/False
+
+    def __str__(self):
+        return ",".join(self._filter_rules)
+
+    # Useful for unit testing.
+    def __eq__(self, other):
+        """Return whether this CategoryFilter instance is equal to another."""
+        return self._filter_rules == other._filter_rules
+
+    # Useful for unit testing.
+    def __ne__(self, other):
+        # Python does not automatically deduce from __eq__().
+        return not (self == other)
+
+    def should_check(self, category):
+        """Return whether the category should be checked.
+
+        The rules for determining whether a category should be checked
+        are as follows.  By default all categories should be checked.
+        Then apply the filter rules in order from first to last, with
+        later flags taking precedence.
+
+        A filter rule applies to a category if the string after the
+        leading plus/minus (+/-) matches the beginning of the category
+        name.  A plus (+) means the category should be checked, while a
+        minus (-) means the category should not be checked.
+
+        """
+        if category in self._should_check_category:
+            return self._should_check_category[category]
+
+        should_check = True # All categories checked by default.
+        for rule in self._filter_rules:
+            if not category.startswith(rule[1:]):
+                continue
+            should_check = rule.startswith('+')
+        self._should_check_category[category] = should_check # Update cache.
+        return should_check
+
+
+class FilterConfiguration(object):
+
+    """Supports filtering with path-specific and user-specified rules."""
+
+    def __init__(self, base_rules=None, path_specific=None, user_rules=None):
+        """Create a FilterConfiguration instance.
+
+        Args:
+          base_rules: The starting list of filter rules to use for
+                      processing.  The default is the empty list, which
+                      by itself would mean that all categories should be
+                      checked.
+
+          path_specific: A list of (sub_paths, path_rules) pairs
+                         that stores the path-specific filter rules for
+                         appending to the base rules.
+                             The "sub_paths" value is a list of path
+                         substrings.  If a file path contains one of the
+                         substrings, then the corresponding path rules
+                         are appended.  The first substring match takes
+                         precedence, i.e. only the first match triggers
+                         an append.
+                             The "path_rules" value is a list of filter
+                         rules that can be appended to the base rules.
+
+          user_rules: A list of filter rules that is always appended
+                      to the base rules and any path rules.  In other
+                      words, the user rules take precedence over the
+                      everything.  In practice, the user rules are
+                      provided by the user from the command line.
+
+        """
+        if base_rules is None:
+            base_rules = []
+        if path_specific is None:
+            path_specific = []
+        if user_rules is None:
+            user_rules = []
+
+        self._base_rules = base_rules
+        self._path_specific = path_specific
+        self._path_specific_lower = None
+        """The backing store for self._get_path_specific_lower()."""
+
+        self._user_rules = user_rules
+
+        self._path_rules_to_filter = {}
+        """Cached dictionary of path rules to CategoryFilter instance."""
+
+        # The same CategoryFilter instance can be shared across
+        # multiple keys in this dictionary.  This allows us to take
+        # greater advantage of the caching done by
+        # CategoryFilter.should_check().
+        self._path_to_filter = {}
+        """Cached dictionary of file path to CategoryFilter instance."""
+
+    # Useful for unit testing.
+    def __eq__(self, other):
+        """Return whether this FilterConfiguration is equal to another."""
+        if self._base_rules != other._base_rules:
+            return False
+        if self._path_specific != other._path_specific:
+            return False
+        if self._user_rules != other._user_rules:
+            return False
+
+        return True
+
+    # Useful for unit testing.
+    def __ne__(self, other):
+        # Python does not automatically deduce this from __eq__().
+        return not self.__eq__(other)
+
+    # We use the prefix "_get" since the name "_path_specific_lower"
+    # is already taken up by the data attribute backing store.
+    def _get_path_specific_lower(self):
+        """Return a copy of self._path_specific with the paths lower-cased."""
+        if self._path_specific_lower is None:
+            self._path_specific_lower = []
+            for (sub_paths, path_rules) in self._path_specific:
+                sub_paths = map(str.lower, sub_paths)
+                self._path_specific_lower.append((sub_paths, path_rules))
+        return self._path_specific_lower
+
+    def _path_rules_from_path(self, path):
+        """Determine the path-specific rules to use, and return as a tuple.
+
+         This method returns a tuple rather than a list so the return
+         value can be passed to _filter_from_path_rules() without change.
+
+        """
+        path = path.lower()
+        for (sub_paths, path_rules) in self._get_path_specific_lower():
+            for sub_path in sub_paths:
+                if path.find(sub_path) > -1:
+                    return tuple(path_rules)
+        return () # Default to the empty tuple.
+
+    def _filter_from_path_rules(self, path_rules):
+        """Return the CategoryFilter associated to the given path rules.
+
+        Args:
+          path_rules: A tuple of path rules.  We require a tuple rather
+                      than a list so the value can be used as a dictionary
+                      key in self._path_rules_to_filter.
+
+        """
+        # We reuse the same CategoryFilter where possible to take
+        # advantage of the caching they do.
+        if path_rules not in self._path_rules_to_filter:
+            rules = list(self._base_rules) # Make a copy
+            rules.extend(path_rules)
+            rules.extend(self._user_rules)
+            self._path_rules_to_filter[path_rules] = _CategoryFilter(rules)
+
+        return self._path_rules_to_filter[path_rules]
+
+    def _filter_from_path(self, path):
+        """Return the CategoryFilter associated to a path."""
+        if path not in self._path_to_filter:
+            path_rules = self._path_rules_from_path(path)
+            filter = self._filter_from_path_rules(path_rules)
+            self._path_to_filter[path] = filter
+
+        return self._path_to_filter[path]
+
+    def should_check(self, category, path):
+        """Return whether the given category should be checked.
+
+        This method determines whether a category should be checked
+        by checking the category name against the filter rules for
+        the given path.
+
+        For a given path, the filter rules are the combination of
+        the base rules, the path-specific rules, and the user-provided
+        rules -- in that order.  As we will describe below, later rules
+        in the list take precedence.  The path-specific rules are the
+        rules corresponding to the first element of the "path_specific"
+        parameter that contains a string case-insensitively matching
+        some substring of the path.  If there is no such element,
+        there are no path-specific rules for that path.
+
+        Given a list of filter rules, the logic for determining whether
+        a category should be checked is as follows.  By default all
+        categories should be checked.  Then apply the filter rules in
+        order from first to last, with later flags taking precedence.
+
+        A filter rule applies to a category if the string after the
+        leading plus/minus (+/-) matches the beginning of the category
+        name.  A plus (+) means the category should be checked, while a
+        minus (-) means the category should not be checked.
+
+        Args:
+          category: The category name.
+          path: The path of the file being checked.
+
+        """
+        return self._filter_from_path(path).should_check(category)
+
diff --git a/Tools/Scripts/webkitpy/style/filter_unittest.py b/Tools/Scripts/webkitpy/style/filter_unittest.py
new file mode 100644
index 0000000..7b8a540
--- /dev/null
+++ b/Tools/Scripts/webkitpy/style/filter_unittest.py
@@ -0,0 +1,256 @@
+# Copyright (C) 2010 Chris Jerdonek (chris.jerdonek@gmail.com)
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1.  Redistributions of source code must retain the above copyright
+#     notice, this list of conditions and the following disclaimer.
+# 2.  Redistributions in binary form must reproduce the above copyright
+#     notice, this list of conditions and the following disclaimer in the
+#     documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Unit tests for filter.py."""
+
+import unittest
+
+from filter import _CategoryFilter as CategoryFilter
+from filter import validate_filter_rules
+from filter import FilterConfiguration
+
+# On Testing __eq__() and __ne__():
+#
+# In the tests below, we deliberately do not use assertEquals() or
+# assertNotEquals() to test __eq__() or __ne__().  We do this to be
+# very explicit about what we are testing, especially in the case
+# of assertNotEquals().
+#
+# Part of the reason is that it is not immediately clear what
+# expression the unittest module uses to assert "not equals" -- the
+# negation of __eq__() or __ne__(), which are not necessarily
+# equivalent expresions in Python.  For example, from Python's "Data
+# Model" documentation--
+#
+#   "There are no implied relationships among the comparison
+#    operators. The truth of x==y does not imply that x!=y is
+#    false.  Accordingly, when defining __eq__(), one should
+#    also define __ne__() so that the operators will behave as
+#    expected."
+#
+#   (from http://docs.python.org/reference/datamodel.html#object.__ne__ )
+
+class ValidateFilterRulesTest(unittest.TestCase):
+
+    """Tests validate_filter_rules() function."""
+
+    def test_validate_filter_rules(self):
+        all_categories = ["tabs", "whitespace", "build/include"]
+
+        bad_rules = [
+            "tabs",
+            "*tabs",
+            " tabs",
+            " +tabs",
+            "+whitespace/newline",
+            "+xxx",
+            ]
+
+        good_rules = [
+            "+tabs",
+            "-tabs",
+            "+build"
+            ]
+
+        for rule in bad_rules:
+            self.assertRaises(ValueError, validate_filter_rules,
+                              [rule], all_categories)
+
+        for rule in good_rules:
+            # This works: no error.
+            validate_filter_rules([rule], all_categories)
+
+
+class CategoryFilterTest(unittest.TestCase):
+
+    """Tests CategoryFilter class."""
+
+    def test_init(self):
+        """Test __init__ method."""
+        # Test that the attributes are getting set correctly.
+        filter = CategoryFilter(["+"])
+        self.assertEquals(["+"], filter._filter_rules)
+
+    def test_init_default_arguments(self):
+        """Test __init__ method default arguments."""
+        filter = CategoryFilter()
+        self.assertEquals([], filter._filter_rules)
+
+    def test_str(self):
+        """Test __str__ "to string" operator."""
+        filter = CategoryFilter(["+a", "-b"])
+        self.assertEquals(str(filter), "+a,-b")
+
+    def test_eq(self):
+        """Test __eq__ equality function."""
+        filter1 = CategoryFilter(["+a", "+b"])
+        filter2 = CategoryFilter(["+a", "+b"])
+        filter3 = CategoryFilter(["+b", "+a"])
+
+        # See the notes at the top of this module about testing
+        # __eq__() and __ne__().
+        self.assertTrue(filter1.__eq__(filter2))
+        self.assertFalse(filter1.__eq__(filter3))
+
+    def test_ne(self):
+        """Test __ne__ inequality function."""
+        # By default, __ne__ always returns true on different objects.
+        # Thus, just check the distinguishing case to verify that the
+        # code defines __ne__.
+        #
+        # Also, see the notes at the top of this module about testing
+        # __eq__() and __ne__().
+        self.assertFalse(CategoryFilter().__ne__(CategoryFilter()))
+
+    def test_should_check(self):
+        """Test should_check() method."""
+        filter = CategoryFilter()
+        self.assertTrue(filter.should_check("everything"))
+        # Check a second time to exercise cache.
+        self.assertTrue(filter.should_check("everything"))
+
+        filter = CategoryFilter(["-"])
+        self.assertFalse(filter.should_check("anything"))
+        # Check a second time to exercise cache.
+        self.assertFalse(filter.should_check("anything"))
+
+        filter = CategoryFilter(["-", "+ab"])
+        self.assertTrue(filter.should_check("abc"))
+        self.assertFalse(filter.should_check("a"))
+
+        filter = CategoryFilter(["+", "-ab"])
+        self.assertFalse(filter.should_check("abc"))
+        self.assertTrue(filter.should_check("a"))
+
+
+class FilterConfigurationTest(unittest.TestCase):
+
+    """Tests FilterConfiguration class."""
+
+    def _config(self, base_rules, path_specific, user_rules):
+        """Return a FilterConfiguration instance."""
+        return FilterConfiguration(base_rules=base_rules,
+                                   path_specific=path_specific,
+                                   user_rules=user_rules)
+
+    def test_init(self):
+        """Test __init__ method."""
+        # Test that the attributes are getting set correctly.
+        # We use parameter values that are different from the defaults.
+        base_rules = ["-"]
+        path_specific = [(["path"], ["+a"])]
+        user_rules = ["+"]
+
+        config = self._config(base_rules, path_specific, user_rules)
+
+        self.assertEquals(base_rules, config._base_rules)
+        self.assertEquals(path_specific, config._path_specific)
+        self.assertEquals(user_rules, config._user_rules)
+
+    def test_default_arguments(self):
+        # Test that the attributes are getting set correctly to the defaults.
+        config = FilterConfiguration()
+
+        self.assertEquals([], config._base_rules)
+        self.assertEquals([], config._path_specific)
+        self.assertEquals([], config._user_rules)
+
+    def test_eq(self):
+        """Test __eq__ method."""
+        # See the notes at the top of this module about testing
+        # __eq__() and __ne__().
+        self.assertTrue(FilterConfiguration().__eq__(FilterConfiguration()))
+
+        # Verify that a difference in any argument causes equality to fail.
+        config = FilterConfiguration()
+
+        # These parameter values are different from the defaults.
+        base_rules = ["-"]
+        path_specific = [(["path"], ["+a"])]
+        user_rules = ["+"]
+
+        self.assertFalse(config.__eq__(FilterConfiguration(
+                                           base_rules=base_rules)))
+        self.assertFalse(config.__eq__(FilterConfiguration(
+                                           path_specific=path_specific)))
+        self.assertFalse(config.__eq__(FilterConfiguration(
+                                           user_rules=user_rules)))
+
+    def test_ne(self):
+        """Test __ne__ method."""
+        # By default, __ne__ always returns true on different objects.
+        # Thus, just check the distinguishing case to verify that the
+        # code defines __ne__.
+        #
+        # Also, see the notes at the top of this module about testing
+        # __eq__() and __ne__().
+        self.assertFalse(FilterConfiguration().__ne__(FilterConfiguration()))
+
+    def test_base_rules(self):
+        """Test effect of base_rules on should_check()."""
+        base_rules = ["-b"]
+        path_specific = []
+        user_rules = []
+
+        config = self._config(base_rules, path_specific, user_rules)
+
+        self.assertTrue(config.should_check("a", "path"))
+        self.assertFalse(config.should_check("b", "path"))
+
+    def test_path_specific(self):
+        """Test effect of path_rules_specifier on should_check()."""
+        base_rules = ["-"]
+        path_specific = [(["path1"], ["+b"]),
+                         (["path2"], ["+c"])]
+        user_rules = []
+
+        config = self._config(base_rules, path_specific, user_rules)
+
+        self.assertFalse(config.should_check("c", "path1"))
+        self.assertTrue(config.should_check("c", "path2"))
+        # Test that first match takes precedence.
+        self.assertFalse(config.should_check("c", "path2/path1"))
+
+    def test_path_with_different_case(self):
+        """Test a path that differs only in case."""
+        base_rules = ["-"]
+        path_specific = [(["Foo/"], ["+whitespace"])]
+        user_rules = []
+
+        config = self._config(base_rules, path_specific, user_rules)
+
+        self.assertFalse(config.should_check("whitespace", "Fooo/bar.txt"))
+        self.assertTrue(config.should_check("whitespace", "Foo/bar.txt"))
+        # Test different case.
+        self.assertTrue(config.should_check("whitespace", "FOO/bar.txt"))
+
+    def test_user_rules(self):
+        """Test effect of user_rules on should_check()."""
+        base_rules = ["-"]
+        path_specific = []
+        user_rules = ["+b"]
+
+        config = self._config(base_rules, path_specific, user_rules)
+
+        self.assertFalse(config.should_check("a", "path"))
+        self.assertTrue(config.should_check("b", "path"))
+
diff --git a/Tools/Scripts/webkitpy/style/main.py b/Tools/Scripts/webkitpy/style/main.py
new file mode 100644
index 0000000..574368a
--- /dev/null
+++ b/Tools/Scripts/webkitpy/style/main.py
@@ -0,0 +1,162 @@
+# Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org)
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1.  Redistributions of source code must retain the above copyright
+#     notice, this list of conditions and the following disclaimer.
+# 2.  Redistributions in binary form must reproduce the above copyright
+#     notice, this list of conditions and the following disclaimer in the
+#     documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
+# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import codecs
+import logging
+import sys
+
+import webkitpy.style.checker as checker
+from webkitpy.style.patchreader import PatchReader
+from webkitpy.style.checker import StyleProcessor
+from webkitpy.style.filereader import TextFileReader
+from webkitpy.common.host import Host
+
+
+_log = logging.getLogger(__name__)
+
+
+def change_directory(filesystem, checkout_root, paths):
+    """Change the working directory to the WebKit checkout root, if possible.
+
+    If every path in the paths parameter is below the checkout root (or if
+    the paths parameter is empty or None), this method changes the current
+    working directory to the checkout root and converts the paths parameter
+    as described below.
+        This allows the paths being checked to be displayed relative to the
+    checkout root, and for path-specific style checks to work as expected.
+    Path-specific checks include whether files should be skipped, whether
+    custom style rules should apply to certain files, etc.
+
+    Returns:
+      paths: A copy of the paths parameter -- possibly converted, as follows.
+             If this method changed the current working directory to the
+             checkout root, then the list is the paths parameter converted to
+             normalized paths relative to the checkout root.
+
+    Args:
+      paths: A list of paths to the files that should be checked for style.
+             This argument can be None or the empty list if a git commit
+             or all changes under the checkout root should be checked.
+      checkout_root: The path to the root of the WebKit checkout.
+
+    """
+    if paths is not None:
+        paths = list(paths)
+
+    if paths:
+        # Then try converting all of the paths to paths relative to
+        # the checkout root.
+        rel_paths = []
+        for path in paths:
+            rel_path = filesystem.relpath(path, checkout_root)
+            if rel_path.startswith(filesystem.pardir):
+                # Then the path is not below the checkout root.  Since all
+                # paths should be interpreted relative to the same root,
+                # do not interpret any of the paths as relative to the
+                # checkout root.  Interpret all of them relative to the
+                # current working directory, and do not change the current
+                # working directory.
+                _log.warn(
+"""Path-dependent style checks may not work correctly:
+
+  One of the given paths is outside the WebKit checkout of the current
+  working directory:
+
+    Path: %s
+    Checkout root: %s
+
+  Pass only files below the checkout root to ensure correct results.
+  See the help documentation for more info.
+"""
+                          % (path, checkout_root))
+
+                return paths
+            rel_paths.append(rel_path)
+        # If we got here, the conversion was successful.
+        paths = rel_paths
+
+    _log.debug("Changing to checkout root: " + checkout_root)
+    filesystem.chdir(checkout_root)
+
+    return paths
+
+
+class CheckWebKitStyle(object):
+    def _engage_awesome_stderr_hacks(self):
+        # Change stderr to write with replacement characters so we don't die
+        # if we try to print something containing non-ASCII characters.
+        stderr = codecs.StreamReaderWriter(sys.stderr,
+                                           codecs.getreader('utf8'),
+                                           codecs.getwriter('utf8'),
+                                           'replace')
+        # Setting an "encoding" attribute on the stream is necessary to
+        # prevent the logging module from raising an error.  See
+        # the checker.configure_logging() function for more information.
+        stderr.encoding = "UTF-8"
+
+        # FIXME: Change webkitpy.style so that we do not need to overwrite
+        #        the global sys.stderr.  This involves updating the code to
+        #        accept a stream parameter where necessary, and not calling
+        #        sys.stderr explicitly anywhere.
+        sys.stderr = stderr
+        return stderr
+
+    def main(self):
+        args = sys.argv[1:]
+
+        host = Host()
+        host.initialize_scm()
+
+        stderr = self._engage_awesome_stderr_hacks()
+
+        # Checking for the verbose flag before calling check_webkit_style_parser()
+        # lets us enable verbose logging earlier.
+        is_verbose = "-v" in args or "--verbose" in args
+
+        checker.configure_logging(stream=stderr, is_verbose=is_verbose)
+        _log.debug("Verbose logging enabled.")
+
+        parser = checker.check_webkit_style_parser()
+        (paths, options) = parser.parse(args)
+
+        configuration = checker.check_webkit_style_configuration(options)
+
+        paths = change_directory(host.filesystem, checkout_root=host.scm().checkout_root, paths=paths)
+
+        style_processor = StyleProcessor(configuration)
+        file_reader = TextFileReader(host.filesystem, style_processor)
+
+        if paths and not options.diff_files:
+            file_reader.process_paths(paths)
+        else:
+            changed_files = paths if options.diff_files else None
+            patch = host.scm().create_patch(options.git_commit, changed_files=changed_files)
+            patch_checker = PatchReader(file_reader)
+            patch_checker.check(patch)
+
+        error_count = style_processor.error_count
+        file_count = file_reader.file_count
+        delete_only_file_count = file_reader.delete_only_file_count
+
+        _log.info("Total errors found: %d in %d files" % (error_count, file_count))
+        # We fail when style errors are found or there are no checked files.
+        return error_count > 0 or (file_count == 0 and delete_only_file_count == 0)
diff --git a/Tools/Scripts/webkitpy/style/main_unittest.py b/Tools/Scripts/webkitpy/style/main_unittest.py
new file mode 100644
index 0000000..5457833
--- /dev/null
+++ b/Tools/Scripts/webkitpy/style/main_unittest.py
@@ -0,0 +1,75 @@
+# Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org)
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1.  Redistributions of source code must retain the above copyright
+#     notice, this list of conditions and the following disclaimer.
+# 2.  Redistributions in binary form must reproduce the above copyright
+#     notice, this list of conditions and the following disclaimer in the
+#     documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
+# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from main import change_directory
+from webkitpy.common.system.filesystem_mock import MockFileSystem
+from webkitpy.common.system.logtesting import LogTesting
+
+
+class ChangeDirectoryTest(unittest.TestCase):
+    _original_directory = "/original"
+    _checkout_root = "/WebKit"
+
+    def setUp(self):
+        self._log = LogTesting.setUp(self)
+        self.filesystem = MockFileSystem(dirs=[self._original_directory, self._checkout_root], cwd=self._original_directory)
+
+    def tearDown(self):
+        self._log.tearDown()
+
+    def _change_directory(self, paths, checkout_root):
+        return change_directory(self.filesystem, paths=paths, checkout_root=checkout_root)
+
+    def _assert_result(self, actual_return_value, expected_return_value,
+                       expected_log_messages, expected_current_directory):
+        self.assertEquals(actual_return_value, expected_return_value)
+        self._log.assertMessages(expected_log_messages)
+        self.assertEquals(self.filesystem.getcwd(), expected_current_directory)
+
+    def test_paths_none(self):
+        paths = self._change_directory(checkout_root=self._checkout_root, paths=None)
+        self._assert_result(paths, None, [], self._checkout_root)
+
+    def test_paths_convertible(self):
+        paths = ["/WebKit/foo1.txt", "/WebKit/foo2.txt"]
+        paths = self._change_directory(checkout_root=self._checkout_root, paths=paths)
+        self._assert_result(paths, ["foo1.txt", "foo2.txt"], [], self._checkout_root)
+
+    def test_with_scm_paths_unconvertible(self):
+        paths = ["/WebKit/foo1.txt", "/outside/foo2.txt"]
+        paths = self._change_directory(checkout_root=self._checkout_root, paths=paths)
+        log_messages = [
+"""WARNING: Path-dependent style checks may not work correctly:
+
+  One of the given paths is outside the WebKit checkout of the current
+  working directory:
+
+    Path: /outside/foo2.txt
+    Checkout root: /WebKit
+
+  Pass only files below the checkout root to ensure correct results.
+  See the help documentation for more info.
+
+"""]
+        self._assert_result(paths, paths, log_messages, self._original_directory)
diff --git a/Tools/Scripts/webkitpy/style/optparser.py b/Tools/Scripts/webkitpy/style/optparser.py
new file mode 100644
index 0000000..f4e9923
--- /dev/null
+++ b/Tools/Scripts/webkitpy/style/optparser.py
@@ -0,0 +1,457 @@
+# Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org)
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1.  Redistributions of source code must retain the above copyright
+#     notice, this list of conditions and the following disclaimer.
+# 2.  Redistributions in binary form must reproduce the above copyright
+#     notice, this list of conditions and the following disclaimer in the
+#     documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Supports the parsing of command-line options for check-webkit-style."""
+
+import logging
+from optparse import OptionParser
+import os.path
+import sys
+
+from filter import validate_filter_rules
+# This module should not import anything from checker.py.
+
+_log = logging.getLogger(__name__)
+
+_USAGE = """usage: %prog [--help] [options] [path1] [path2] ...
+
+Overview:
+  Check coding style according to WebKit style guidelines:
+
+      http://webkit.org/coding/coding-style.html
+
+  Path arguments can be files and directories.  If neither a git commit nor
+  paths are passed, then all changes in your source control working directory
+  are checked.
+
+Style errors:
+  This script assigns to every style error a confidence score from 1-5 and
+  a category name.  A confidence score of 5 means the error is certainly
+  a problem, and 1 means it could be fine.
+
+  Category names appear in error messages in brackets, for example
+  [whitespace/indent].  See the options section below for an option that
+  displays all available categories and which are reported by default.
+
+Filters:
+  Use filters to configure what errors to report.  Filters are specified using
+  a comma-separated list of boolean filter rules.  The script reports errors
+  in a category if the category passes the filter, as described below.
+
+  All categories start out passing.  Boolean filter rules are then evaluated
+  from left to right, with later rules taking precedence.  For example, the
+  rule "+foo" passes any category that starts with "foo", and "-foo" fails
+  any such category.  The filter input "-whitespace,+whitespace/braces" fails
+  the category "whitespace/tab" and passes "whitespace/braces".
+
+  Examples: --filter=-whitespace,+whitespace/braces
+            --filter=-whitespace,-runtime/printf,+runtime/printf_format
+            --filter=-,+build/include_what_you_use
+
+Paths:
+  Certain style-checking behavior depends on the paths relative to
+  the WebKit source root of the files being checked.  For example,
+  certain types of errors may be handled differently for files in
+  WebKit/gtk/webkit/ (e.g. by suppressing "readability/naming" errors
+  for files in this directory).
+
+  Consequently, if the path relative to the source root cannot be
+  determined for a file being checked, then style checking may not
+  work correctly for that file.  This can occur, for example, if no
+  WebKit checkout can be found, or if the source root can be detected,
+  but one of the files being checked lies outside the source tree.
+
+  If a WebKit checkout can be detected and all files being checked
+  are in the source tree, then all paths will automatically be
+  converted to paths relative to the source root prior to checking.
+  This is also useful for display purposes.
+
+  Currently, this command can detect the source root only if the
+  command is run from within a WebKit checkout (i.e. if the current
+  working directory is below the root of a checkout).  In particular,
+  it is not recommended to run this script from a directory outside
+  a checkout.
+
+  Running this script from a top-level WebKit source directory and
+  checking only files in the source tree will ensure that all style
+  checking behaves correctly -- whether or not a checkout can be
+  detected.  This is because all file paths will already be relative
+  to the source root and so will not need to be converted."""
+
+_EPILOG = ("This script can miss errors and does not substitute for "
+           "code review.")
+
+
+# This class should not have knowledge of the flag key names.
+class DefaultCommandOptionValues(object):
+
+    """Stores the default check-webkit-style command-line options.
+
+    Attributes:
+      output_format: A string that is the default output format.
+      min_confidence: An integer that is the default minimum confidence level.
+
+    """
+
+    def __init__(self, min_confidence, output_format):
+        self.min_confidence = min_confidence
+        self.output_format = output_format
+
+
+# This class should not have knowledge of the flag key names.
+class CommandOptionValues(object):
+
+    """Stores the option values passed by the user via the command line.
+
+    Attributes:
+      is_verbose: A boolean value of whether verbose logging is enabled.
+
+      filter_rules: The list of filter rules provided by the user.
+                    These rules are appended to the base rules and
+                    path-specific rules and so take precedence over
+                    the base filter rules, etc.
+
+      git_commit: A string representing the git commit to check.
+                  The default is None.
+
+      min_confidence: An integer between 1 and 5 inclusive that is the
+                      minimum confidence level of style errors to report.
+                      The default is 1, which reports all errors.
+
+      output_format: A string that is the output format.  The supported
+                     output formats are "emacs" which emacs can parse
+                     and "vs7" which Microsoft Visual Studio 7 can parse.
+
+    """
+    def __init__(self,
+                 filter_rules=None,
+                 git_commit=None,
+                 diff_files=None,
+                 is_verbose=False,
+                 min_confidence=1,
+                 output_format="emacs"):
+        if filter_rules is None:
+            filter_rules = []
+
+        if (min_confidence < 1) or (min_confidence > 5):
+            raise ValueError('Invalid "min_confidence" parameter: value '
+                             "must be an integer between 1 and 5 inclusive. "
+                             'Value given: "%s".' % min_confidence)
+
+        if output_format not in ("emacs", "vs7"):
+            raise ValueError('Invalid "output_format" parameter: '
+                             'value must be "emacs" or "vs7". '
+                             'Value given: "%s".' % output_format)
+
+        self.filter_rules = filter_rules
+        self.git_commit = git_commit
+        self.diff_files = diff_files
+        self.is_verbose = is_verbose
+        self.min_confidence = min_confidence
+        self.output_format = output_format
+
+    # Useful for unit testing.
+    def __eq__(self, other):
+        """Return whether this instance is equal to another."""
+        if self.filter_rules != other.filter_rules:
+            return False
+        if self.git_commit != other.git_commit:
+            return False
+        if self.diff_files != other.diff_files:
+            return False
+        if self.is_verbose != other.is_verbose:
+            return False
+        if self.min_confidence != other.min_confidence:
+            return False
+        if self.output_format != other.output_format:
+            return False
+
+        return True
+
+    # Useful for unit testing.
+    def __ne__(self, other):
+        # Python does not automatically deduce this from __eq__().
+        return not self.__eq__(other)
+
+
+class ArgumentPrinter(object):
+
+    """Supports the printing of check-webkit-style command arguments."""
+
+    def _flag_pair_to_string(self, flag_key, flag_value):
+        return '--%(key)s=%(val)s' % {'key': flag_key, 'val': flag_value }
+
+    def to_flag_string(self, options):
+        """Return a flag string of the given CommandOptionValues instance.
+
+        This method orders the flag values alphabetically by the flag key.
+
+        Args:
+          options: A CommandOptionValues instance.
+
+        """
+        flags = {}
+        flags['min-confidence'] = options.min_confidence
+        flags['output'] = options.output_format
+        # Only include the filter flag if user-provided rules are present.
+        filter_rules = options.filter_rules
+        if filter_rules:
+            flags['filter'] = ",".join(filter_rules)
+        if options.git_commit:
+            flags['git-commit'] = options.git_commit
+        if options.diff_files:
+            flags['diff_files'] = options.diff_files
+
+        flag_string = ''
+        # Alphabetizing lets us unit test this method.
+        for key in sorted(flags.keys()):
+            flag_string += self._flag_pair_to_string(key, flags[key]) + ' '
+
+        return flag_string.strip()
+
+
+class ArgumentParser(object):
+
+    # FIXME: Move the documentation of the attributes to the __init__
+    #        docstring after making the attributes internal.
+    """Supports the parsing of check-webkit-style command arguments.
+
+    Attributes:
+      create_usage: A function that accepts a DefaultCommandOptionValues
+                    instance and returns a string of usage instructions.
+                    Defaults to the function that generates the usage
+                    string for check-webkit-style.
+      default_options: A DefaultCommandOptionValues instance that provides
+                       the default values for options not explicitly
+                       provided by the user.
+      stderr_write: A function that takes a string as a parameter and
+                    serves as stderr.write.  Defaults to sys.stderr.write.
+                    This parameter should be specified only for unit tests.
+
+    """
+
+    def __init__(self,
+                 all_categories,
+                 default_options,
+                 base_filter_rules=None,
+                 mock_stderr=None,
+                 usage=None):
+        """Create an ArgumentParser instance.
+
+        Args:
+          all_categories: The set of all available style categories.
+          default_options: See the corresponding attribute in the class
+                           docstring.
+        Keyword Args:
+          base_filter_rules: The list of filter rules at the beginning of
+                             the list of rules used to check style.  This
+                             list has the least precedence when checking
+                             style and precedes any user-provided rules.
+                             The class uses this parameter only for display
+                             purposes to the user.  Defaults to the empty list.
+          create_usage: See the documentation of the corresponding
+                        attribute in the class docstring.
+          stderr_write: See the documentation of the corresponding
+                        attribute in the class docstring.
+
+        """
+        if base_filter_rules is None:
+            base_filter_rules = []
+        stderr = sys.stderr if mock_stderr is None else mock_stderr
+        if usage is None:
+            usage = _USAGE
+
+        self._all_categories = all_categories
+        self._base_filter_rules = base_filter_rules
+
+        # FIXME: Rename these to reflect that they are internal.
+        self.default_options = default_options
+        self.stderr_write = stderr.write
+
+        self._parser = self._create_option_parser(stderr=stderr,
+            usage=usage,
+            default_min_confidence=self.default_options.min_confidence,
+            default_output_format=self.default_options.output_format)
+
+    def _create_option_parser(self, stderr, usage,
+                              default_min_confidence, default_output_format):
+        # Since the epilog string is short, it is not necessary to replace
+        # the epilog string with a mock epilog string when testing.
+        # For this reason, we use _EPILOG directly rather than passing it
+        # as an argument like we do for the usage string.
+        parser = OptionParser(usage=usage, epilog=_EPILOG)
+
+        filter_help = ('set a filter to control what categories of style '
+                       'errors to report.  Specify a filter using a comma-'
+                       'delimited list of boolean filter rules, for example '
+                       '"--filter -whitespace,+whitespace/braces".  To display '
+                       'all categories and which are enabled by default, pass '
+                       """no value (e.g. '-f ""' or '--filter=').""")
+        parser.add_option("-f", "--filter-rules", metavar="RULES",
+                          dest="filter_value", help=filter_help)
+
+        git_commit_help = ("check all changes in the given commit. "
+                           "Use 'commit_id..' to check all changes after commmit_id")
+        parser.add_option("-g", "--git-diff", "--git-commit",
+                          metavar="COMMIT", dest="git_commit", help=git_commit_help,)
+
+        diff_files_help = "diff the files passed on the command line rather than checking the style of every line"
+        parser.add_option("--diff-files", action="store_true", dest="diff_files", default=False, help=diff_files_help)
+
+        min_confidence_help = ("set the minimum confidence of style errors "
+                               "to report.  Can be an integer 1-5, with 1 "
+                               "displaying all errors.  Defaults to %default.")
+        parser.add_option("-m", "--min-confidence", metavar="INT",
+                          type="int", dest="min_confidence",
+                          default=default_min_confidence,
+                          help=min_confidence_help)
+
+        output_format_help = ('set the output format, which can be "emacs" '
+                              'or "vs7" (for Visual Studio).  '
+                              'Defaults to "%default".')
+        parser.add_option("-o", "--output-format", metavar="FORMAT",
+                          choices=["emacs", "vs7"],
+                          dest="output_format", default=default_output_format,
+                          help=output_format_help)
+
+        verbose_help = "enable verbose logging."
+        parser.add_option("-v", "--verbose", dest="is_verbose", default=False,
+                          action="store_true", help=verbose_help)
+
+        # Override OptionParser's error() method so that option help will
+        # also display when an error occurs.  Normally, just the usage
+        # string displays and not option help.
+        parser.error = self._parse_error
+
+        # Override OptionParser's print_help() method so that help output
+        # does not render to the screen while running unit tests.
+        print_help = parser.print_help
+        parser.print_help = lambda: print_help(file=stderr)
+
+        return parser
+
+    def _parse_error(self, error_message):
+        """Print the help string and an error message, and exit."""
+        # The method format_help() includes both the usage string and
+        # the flag options.
+        help = self._parser.format_help()
+        # Separate help from the error message with a single blank line.
+        self.stderr_write(help + "\n")
+        if error_message:
+            _log.error(error_message)
+
+        # Since we are using this method to replace/override the Python
+        # module optparse's OptionParser.error() method, we match its
+        # behavior and exit with status code 2.
+        #
+        # As additional background, Python documentation says--
+        #
+        # "Unix programs generally use 2 for command line syntax errors
+        #  and 1 for all other kind of errors."
+        #
+        # (from http://docs.python.org/library/sys.html#sys.exit )
+        sys.exit(2)
+
+    def _exit_with_categories(self):
+        """Exit and print the style categories and default filter rules."""
+        self.stderr_write('\nAll categories:\n')
+        for category in sorted(self._all_categories):
+            self.stderr_write('    ' + category + '\n')
+
+        self.stderr_write('\nDefault filter rules**:\n')
+        for filter_rule in sorted(self._base_filter_rules):
+            self.stderr_write('    ' + filter_rule + '\n')
+        self.stderr_write('\n**The command always evaluates the above rules, '
+                          'and before any --filter flag.\n\n')
+
+        sys.exit(0)
+
+    def _parse_filter_flag(self, flag_value):
+        """Parse the --filter flag, and return a list of filter rules.
+
+        Args:
+          flag_value: A string of comma-separated filter rules, for
+                      example "-whitespace,+whitespace/indent".
+
+        """
+        filters = []
+        for uncleaned_filter in flag_value.split(','):
+            filter = uncleaned_filter.strip()
+            if not filter:
+                continue
+            filters.append(filter)
+        return filters
+
+    def parse(self, args):
+        """Parse the command line arguments to check-webkit-style.
+
+        Args:
+          args: A list of command-line arguments as returned by sys.argv[1:].
+
+        Returns:
+          A tuple of (paths, options)
+
+          paths: The list of paths to check.
+          options: A CommandOptionValues instance.
+
+        """
+        (options, paths) = self._parser.parse_args(args=args)
+
+        filter_value = options.filter_value
+        git_commit = options.git_commit
+        diff_files = options.diff_files
+        is_verbose = options.is_verbose
+        min_confidence = options.min_confidence
+        output_format = options.output_format
+
+        if filter_value is not None and not filter_value:
+            # Then the user explicitly passed no filter, for
+            # example "-f ''" or "--filter=".
+            self._exit_with_categories()
+
+        # Validate user-provided values.
+
+        min_confidence = int(min_confidence)
+        if (min_confidence < 1) or (min_confidence > 5):
+            self._parse_error('option --min-confidence: invalid integer: '
+                              '%s: value must be between 1 and 5'
+                              % min_confidence)
+
+        if filter_value:
+            filter_rules = self._parse_filter_flag(filter_value)
+        else:
+            filter_rules = []
+
+        try:
+            validate_filter_rules(filter_rules, self._all_categories)
+        except ValueError, err:
+            self._parse_error(err)
+
+        options = CommandOptionValues(filter_rules=filter_rules,
+                                      git_commit=git_commit,
+                                      diff_files=diff_files,
+                                      is_verbose=is_verbose,
+                                      min_confidence=min_confidence,
+                                      output_format=output_format)
+
+        return (paths, options)
+
diff --git a/Tools/Scripts/webkitpy/style/optparser_unittest.py b/Tools/Scripts/webkitpy/style/optparser_unittest.py
new file mode 100644
index 0000000..a6b64da
--- /dev/null
+++ b/Tools/Scripts/webkitpy/style/optparser_unittest.py
@@ -0,0 +1,258 @@
+# Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org)
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1.  Redistributions of source code must retain the above copyright
+#     notice, this list of conditions and the following disclaimer.
+# 2.  Redistributions in binary form must reproduce the above copyright
+#     notice, this list of conditions and the following disclaimer in the
+#     documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Unit tests for parser.py."""
+
+import unittest
+
+from webkitpy.common.system.logtesting import LoggingTestCase
+from webkitpy.style.optparser import ArgumentParser
+from webkitpy.style.optparser import ArgumentPrinter
+from webkitpy.style.optparser import CommandOptionValues as ProcessorOptions
+from webkitpy.style.optparser import DefaultCommandOptionValues
+
+
+class ArgumentPrinterTest(unittest.TestCase):
+
+    """Tests the ArgumentPrinter class."""
+
+    _printer = ArgumentPrinter()
+
+    def _create_options(self,
+                        output_format='emacs',
+                        min_confidence=3,
+                        filter_rules=[],
+                        git_commit=None):
+        return ProcessorOptions(filter_rules=filter_rules,
+                                git_commit=git_commit,
+                                min_confidence=min_confidence,
+                                output_format=output_format)
+
+    def test_to_flag_string(self):
+        options = self._create_options('vs7', 5, ['+foo', '-bar'], 'git')
+        self.assertEquals('--filter=+foo,-bar --git-commit=git '
+                          '--min-confidence=5 --output=vs7',
+                          self._printer.to_flag_string(options))
+
+        # This is to check that --filter and --git-commit do not
+        # show up when not user-specified.
+        options = self._create_options()
+        self.assertEquals('--min-confidence=3 --output=emacs',
+                          self._printer.to_flag_string(options))
+
+
+class ArgumentParserTest(LoggingTestCase):
+
+    """Test the ArgumentParser class."""
+
+    class _MockStdErr(object):
+
+        def write(self, message):
+            # We do not want the usage string or style categories
+            # to print during unit tests, so print nothing.
+            return
+
+    def _parse(self, args):
+        """Call a test parser.parse()."""
+        parser = self._create_parser()
+        return parser.parse(args)
+
+    def _create_defaults(self):
+        """Return a DefaultCommandOptionValues instance for testing."""
+        base_filter_rules = ["-", "+whitespace"]
+        return DefaultCommandOptionValues(min_confidence=3,
+                                          output_format="vs7")
+
+    def _create_parser(self):
+        """Return an ArgumentParser instance for testing."""
+        default_options = self._create_defaults()
+
+        all_categories = ["build" ,"whitespace"]
+
+        mock_stderr = self._MockStdErr()
+
+        return ArgumentParser(all_categories=all_categories,
+                              base_filter_rules=[],
+                              default_options=default_options,
+                              mock_stderr=mock_stderr,
+                              usage="test usage")
+
+    def test_parse_documentation(self):
+        parse = self._parse
+
+        # FIXME: Test both the printing of the usage string and the
+        #        filter categories help.
+
+        # Request the usage string.
+        self.assertRaises(SystemExit, parse, ['--help'])
+        # Request default filter rules and available style categories.
+        self.assertRaises(SystemExit, parse, ['--filter='])
+
+    def test_parse_bad_values(self):
+        parse = self._parse
+
+        # Pass an unsupported argument.
+        self.assertRaises(SystemExit, parse, ['--bad'])
+        self.assertLog(['ERROR: no such option: --bad\n'])
+
+        self.assertRaises(SystemExit, parse, ['--min-confidence=bad'])
+        self.assertLog(['ERROR: option --min-confidence: '
+                        "invalid integer value: 'bad'\n"])
+        self.assertRaises(SystemExit, parse, ['--min-confidence=0'])
+        self.assertLog(['ERROR: option --min-confidence: invalid integer: 0: '
+                        'value must be between 1 and 5\n'])
+        self.assertRaises(SystemExit, parse, ['--min-confidence=6'])
+        self.assertLog(['ERROR: option --min-confidence: invalid integer: 6: '
+                        'value must be between 1 and 5\n'])
+        parse(['--min-confidence=1']) # works
+        parse(['--min-confidence=5']) # works
+
+        self.assertRaises(SystemExit, parse, ['--output=bad'])
+        self.assertLog(['ERROR: option --output-format: invalid choice: '
+                        "'bad' (choose from 'emacs', 'vs7')\n"])
+        parse(['--output=vs7']) # works
+
+        # Pass a filter rule not beginning with + or -.
+        self.assertRaises(SystemExit, parse, ['--filter=build'])
+        self.assertLog(['ERROR: Invalid filter rule "build": '
+                        'every rule must start with + or -.\n'])
+        parse(['--filter=+build']) # works
+
+    def test_parse_default_arguments(self):
+        parse = self._parse
+
+        (files, options) = parse([])
+
+        self.assertEquals(files, [])
+
+        self.assertEquals(options.filter_rules, [])
+        self.assertEquals(options.git_commit, None)
+        self.assertEquals(options.diff_files, False)
+        self.assertEquals(options.is_verbose, False)
+        self.assertEquals(options.min_confidence, 3)
+        self.assertEquals(options.output_format, 'vs7')
+
+    def test_parse_explicit_arguments(self):
+        parse = self._parse
+
+        # Pass non-default explicit values.
+        (files, options) = parse(['--min-confidence=4'])
+        self.assertEquals(options.min_confidence, 4)
+        (files, options) = parse(['--output=emacs'])
+        self.assertEquals(options.output_format, 'emacs')
+        (files, options) = parse(['-g', 'commit'])
+        self.assertEquals(options.git_commit, 'commit')
+        (files, options) = parse(['--git-commit=commit'])
+        self.assertEquals(options.git_commit, 'commit')
+        (files, options) = parse(['--git-diff=commit'])
+        self.assertEquals(options.git_commit, 'commit')
+        (files, options) = parse(['--verbose'])
+        self.assertEquals(options.is_verbose, True)
+        (files, options) = parse(['--diff-files', 'file.txt'])
+        self.assertEquals(options.diff_files, True)
+
+        # Pass user_rules.
+        (files, options) = parse(['--filter=+build,-whitespace'])
+        self.assertEquals(options.filter_rules,
+                          ["+build", "-whitespace"])
+
+        # Pass spurious white space in user rules.
+        (files, options) = parse(['--filter=+build, -whitespace'])
+        self.assertEquals(options.filter_rules,
+                          ["+build", "-whitespace"])
+
+    def test_parse_files(self):
+        parse = self._parse
+
+        (files, options) = parse(['foo.cpp'])
+        self.assertEquals(files, ['foo.cpp'])
+
+        # Pass multiple files.
+        (files, options) = parse(['--output=emacs', 'foo.cpp', 'bar.cpp'])
+        self.assertEquals(files, ['foo.cpp', 'bar.cpp'])
+
+
+class CommandOptionValuesTest(unittest.TestCase):
+
+    """Tests CommandOptionValues class."""
+
+    def test_init(self):
+        """Test __init__ constructor."""
+        # Check default parameters.
+        options = ProcessorOptions()
+        self.assertEquals(options.filter_rules, [])
+        self.assertEquals(options.git_commit, None)
+        self.assertEquals(options.is_verbose, False)
+        self.assertEquals(options.min_confidence, 1)
+        self.assertEquals(options.output_format, "emacs")
+
+        # Check argument validation.
+        self.assertRaises(ValueError, ProcessorOptions, output_format="bad")
+        ProcessorOptions(output_format="emacs") # No ValueError: works
+        ProcessorOptions(output_format="vs7") # works
+        self.assertRaises(ValueError, ProcessorOptions, min_confidence=0)
+        self.assertRaises(ValueError, ProcessorOptions, min_confidence=6)
+        ProcessorOptions(min_confidence=1) # works
+        ProcessorOptions(min_confidence=5) # works
+
+        # Check attributes.
+        options = ProcessorOptions(filter_rules=["+"],
+                                   git_commit="commit",
+                                   is_verbose=True,
+                                   min_confidence=3,
+                                   output_format="vs7")
+        self.assertEquals(options.filter_rules, ["+"])
+        self.assertEquals(options.git_commit, "commit")
+        self.assertEquals(options.is_verbose, True)
+        self.assertEquals(options.min_confidence, 3)
+        self.assertEquals(options.output_format, "vs7")
+
+    def test_eq(self):
+        """Test __eq__ equality function."""
+        self.assertTrue(ProcessorOptions().__eq__(ProcessorOptions()))
+
+        # Also verify that a difference in any argument causes equality to fail.
+
+        # Explicitly create a ProcessorOptions instance with all default
+        # values.  We do this to be sure we are assuming the right default
+        # values in our self.assertFalse() calls below.
+        options = ProcessorOptions(filter_rules=[],
+                                   git_commit=None,
+                                   is_verbose=False,
+                                   min_confidence=1,
+                                   output_format="emacs")
+        # Verify that we created options correctly.
+        self.assertTrue(options.__eq__(ProcessorOptions()))
+
+        self.assertFalse(options.__eq__(ProcessorOptions(filter_rules=["+"])))
+        self.assertFalse(options.__eq__(ProcessorOptions(git_commit="commit")))
+        self.assertFalse(options.__eq__(ProcessorOptions(is_verbose=True)))
+        self.assertFalse(options.__eq__(ProcessorOptions(min_confidence=2)))
+        self.assertFalse(options.__eq__(ProcessorOptions(output_format="vs7")))
+
+    def test_ne(self):
+        """Test __ne__ inequality function."""
+        # By default, __ne__ always returns true on different objects.
+        # Thus, just check the distinguishing case to verify that the
+        # code defines __ne__.
+        self.assertFalse(ProcessorOptions().__ne__(ProcessorOptions()))
+
diff --git a/Tools/Scripts/webkitpy/style/patchreader.py b/Tools/Scripts/webkitpy/style/patchreader.py
new file mode 100644
index 0000000..8495cd0
--- /dev/null
+++ b/Tools/Scripts/webkitpy/style/patchreader.py
@@ -0,0 +1,83 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+# Copyright (C) 2010 Chris Jerdonek (chris.jerdonek@gmail.com)
+# Copyright (C) 2010 ProFUSION embedded systems
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import logging
+import re
+
+from webkitpy.common.checkout.diff_parser import DiffParser
+from webkitpy.common.system.executive import Executive
+from webkitpy.common.system.filesystem import FileSystem
+from webkitpy.common.checkout.scm.detection import SCMDetector
+
+
+_log = logging.getLogger(__name__)
+
+
+class PatchReader(object):
+    """Supports checking style in patches."""
+
+    def __init__(self, text_file_reader):
+        """Create a PatchReader instance.
+
+        Args:
+          text_file_reader: A TextFileReader instance.
+
+        """
+        self._text_file_reader = text_file_reader
+
+    def check(self, patch_string, fs=None):
+        """Check style in the given patch."""
+        fs = fs or FileSystem()
+        patch_files = DiffParser(patch_string.splitlines()).files
+
+        # If the user uses git, checking subversion config file only once is enough.
+        call_only_once = True
+
+        for path, diff_file in patch_files.iteritems():
+            line_numbers = diff_file.added_or_modified_line_numbers()
+            _log.debug('Found %s new or modified lines in: %s' % (len(line_numbers), path))
+
+            if not line_numbers:
+                match = re.search("\s*png$", path)
+                if match and fs.exists(path):
+                    if call_only_once:
+                        self._text_file_reader.process_file(file_path=path, line_numbers=None)
+                        cwd = FileSystem().getcwd()
+                        detection = SCMDetector(fs, Executive()).detect_scm_system(cwd)
+                        if detection.display_name() == "git":
+                            call_only_once = False
+                    continue
+                # Don't check files which contain only deleted lines
+                # as they can never add style errors. However, mark them as
+                # processed so that we count up number of such files.
+                self._text_file_reader.count_delete_only_file()
+                continue
+
+            self._text_file_reader.process_file(file_path=path, line_numbers=line_numbers)
diff --git a/Tools/Scripts/webkitpy/style/patchreader_unittest.py b/Tools/Scripts/webkitpy/style/patchreader_unittest.py
new file mode 100644
index 0000000..eb26d47
--- /dev/null
+++ b/Tools/Scripts/webkitpy/style/patchreader_unittest.py
@@ -0,0 +1,103 @@
+#!/usr/bin/python
+#
+# Copyright (C) 2010 Google Inc. All rights reserved.
+# Copyright (C) 2009 Torch Mobile Inc.
+# Copyright (C) 2009 Apple Inc. All rights reserved.
+# Copyright (C) 2010 Chris Jerdonek (chris.jerdonek@gmail.com)
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#    * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from webkitpy.common.system.filesystem_mock import MockFileSystem
+from webkitpy.style.patchreader import PatchReader
+
+
+class PatchReaderTest(unittest.TestCase):
+
+    """Test the PatchReader class."""
+
+    class MockTextFileReader(object):
+
+        def __init__(self):
+            self.passed_to_process_file = []
+            """A list of (file_path, line_numbers) pairs."""
+            self.delete_only_file_count = 0
+            """A number of times count_delete_only_file() called"""
+
+        def process_file(self, file_path, line_numbers):
+            self.passed_to_process_file.append((file_path, line_numbers))
+
+        def count_delete_only_file(self):
+            self.delete_only_file_count += 1
+
+    def setUp(self):
+        file_reader = self.MockTextFileReader()
+        self._file_reader = file_reader
+        self._patch_checker = PatchReader(file_reader)
+
+    def _call_check_patch(self, patch_string):
+        self._patch_checker.check(patch_string)
+
+    def _assert_checked(self, passed_to_process_file, delete_only_file_count):
+        self.assertEquals(self._file_reader.passed_to_process_file,
+                          passed_to_process_file)
+        self.assertEquals(self._file_reader.delete_only_file_count,
+                          delete_only_file_count)
+
+    def test_check_patch(self):
+        # The modified line_numbers array for this patch is: [2].
+        self._call_check_patch("""diff --git a/__init__.py b/__init__.py
+index ef65bee..e3db70e 100644
+--- a/__init__.py
++++ b/__init__.py
+@@ -1,1 +1,2 @@
+ # Required for Python to search this directory for module files
++# New line
+""")
+        self._assert_checked([("__init__.py", [2])], 0)
+
+    def test_check_patch_with_deletion(self):
+        self._call_check_patch("""Index: __init__.py
+===================================================================
+--- __init__.py  (revision 3593)
++++ __init__.py  (working copy)
+@@ -1 +0,0 @@
+-foobar
+""")
+        # _mock_check_file should not be called for the deletion patch.
+        self._assert_checked([], 1)
+
+    def test_check_patch_with_png_deletion(self):
+        fs = MockFileSystem()
+        diff_text = """Index: LayoutTests/platform/mac/foo-expected.png
+===================================================================
+Cannot display: file marked as a binary type.
+svn:mime-type = image/png
+"""
+        self._patch_checker.check(diff_text, fs)
+        self._assert_checked([], 1)
diff --git a/Tools/Scripts/webkitpy/test/__init__.py b/Tools/Scripts/webkitpy/test/__init__.py
new file mode 100644
index 0000000..ef65bee
--- /dev/null
+++ b/Tools/Scripts/webkitpy/test/__init__.py
@@ -0,0 +1 @@
+# Required for Python to search this directory for module files
diff --git a/Tools/Scripts/webkitpy/test/finder.py b/Tools/Scripts/webkitpy/test/finder.py
new file mode 100644
index 0000000..21eceac
--- /dev/null
+++ b/Tools/Scripts/webkitpy/test/finder.py
@@ -0,0 +1,173 @@
+# Copyright (C) 2012 Google, Inc.
+# Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org)
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1.  Redistributions of source code must retain the above copyright
+#     notice, this list of conditions and the following disclaimer.
+# 2.  Redistributions in binary form must reproduce the above copyright
+#     notice, this list of conditions and the following disclaimer in the
+#     documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
+# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""this module is responsible for finding python tests."""
+
+import logging
+import re
+
+
+_log = logging.getLogger(__name__)
+
+
+class _DirectoryTree(object):
+    def __init__(self, filesystem, top_directory, starting_subdirectory):
+        self.filesystem = filesystem
+        self.top_directory = filesystem.realpath(top_directory)
+        self.search_directory = self.top_directory
+        self.top_package = ''
+        if starting_subdirectory:
+            self.top_package = starting_subdirectory.replace(filesystem.sep, '.') + '.'
+            self.search_directory = filesystem.join(self.top_directory, starting_subdirectory)
+
+    def find_modules(self, suffixes, sub_directory=None):
+        if sub_directory:
+            search_directory = self.filesystem.join(self.top_directory, sub_directory)
+        else:
+            search_directory = self.search_directory
+
+        def file_filter(filesystem, dirname, basename):
+            return any(basename.endswith(suffix) for suffix in suffixes)
+
+        filenames = self.filesystem.files_under(search_directory, file_filter=file_filter)
+        return [self.to_module(filename) for filename in filenames]
+
+    def to_module(self, path):
+        return path.replace(self.top_directory + self.filesystem.sep, '').replace(self.filesystem.sep, '.')[:-3]
+
+    def subpath(self, path):
+        """Returns the relative path from the top of the tree to the path, or None if the path is not under the top of the tree."""
+        realpath = self.filesystem.realpath(self.filesystem.join(self.top_directory, path))
+        if realpath.startswith(self.top_directory + self.filesystem.sep):
+            return realpath.replace(self.top_directory + self.filesystem.sep, '')
+        return None
+
+    def clean(self):
+        """Delete all .pyc files in the tree that have no matching .py file."""
+        _log.debug("Cleaning orphaned *.pyc files from: %s" % self.search_directory)
+        filenames = self.filesystem.files_under(self.search_directory)
+        for filename in filenames:
+            if filename.endswith(".pyc") and filename[:-1] not in filenames:
+                _log.info("Deleting orphan *.pyc file: %s" % filename)
+                self.filesystem.remove(filename)
+
+
+class Finder(object):
+    def __init__(self, filesystem):
+        self.filesystem = filesystem
+        self.trees = []
+        self._names_to_skip = []
+
+    def add_tree(self, top_directory, starting_subdirectory=None):
+        self.trees.append(_DirectoryTree(self.filesystem, top_directory, starting_subdirectory))
+
+    def skip(self, names, reason, bugid):
+        self._names_to_skip.append(tuple([names, reason, bugid]))
+
+    def additional_paths(self, paths):
+        return [tree.top_directory for tree in self.trees if tree.top_directory not in paths]
+
+    def clean_trees(self):
+        for tree in self.trees:
+            tree.clean()
+
+    def is_module(self, name):
+        relpath = name.replace('.', self.filesystem.sep) + '.py'
+        return any(self.filesystem.exists(self.filesystem.join(tree.top_directory, relpath)) for tree in self.trees)
+
+    def is_dotted_name(self, name):
+        return re.match(r'[a-zA-Z.][a-zA-Z0-9_.]*', name)
+
+    def to_module(self, path):
+        for tree in self.trees:
+            if path.startswith(tree.top_directory):
+                return tree.to_module(path)
+        return None
+
+    def find_names(self, args, find_all):
+        suffixes = ['_unittest.py', '_integrationtest.py']
+        if args:
+            names = []
+            for arg in args:
+                names.extend(self._find_names_for_arg(arg, suffixes))
+            return names
+
+        return self._default_names(suffixes, find_all)
+
+    def _find_names_for_arg(self, arg, suffixes):
+        realpath = self.filesystem.realpath(arg)
+        if self.filesystem.exists(realpath):
+            names = self._find_in_trees(realpath, suffixes)
+            if not names:
+                _log.error("%s is not in one of the test trees." % arg)
+            return names
+
+        # See if it's a python package in a tree (or a relative path from the top of a tree).
+        names = self._find_in_trees(arg.replace('.', self.filesystem.sep), suffixes)
+        if names:
+            return names
+
+        if self.is_dotted_name(arg):
+            # The name may not exist, but that's okay; we'll find out later.
+            return [arg]
+
+        _log.error("%s is not a python name or an existing file or directory." % arg)
+        return []
+
+    def _find_in_trees(self, path, suffixes):
+        for tree in self.trees:
+            relpath = tree.subpath(path)
+            if not relpath:
+                continue
+            if self.filesystem.isfile(path):
+                return [tree.to_module(path)]
+            else:
+                return tree.find_modules(suffixes, path)
+        return []
+
+    def _default_names(self, suffixes, find_all):
+        modules = []
+        for tree in self.trees:
+            modules.extend(tree.find_modules(suffixes))
+        modules.sort()
+
+        for module in modules:
+            _log.debug("Found: %s" % module)
+
+        if not find_all:
+            for (names, reason, bugid) in self._names_to_skip:
+                self._exclude(modules, names, reason, bugid)
+
+        return modules
+
+    def _exclude(self, modules, module_prefixes, reason, bugid):
+        _log.info('Skipping tests in the following modules or packages because they %s:' % reason)
+        for prefix in module_prefixes:
+            _log.info('    %s' % prefix)
+            modules_to_exclude = filter(lambda m: m.startswith(prefix), modules)
+            for m in modules_to_exclude:
+                if len(modules_to_exclude) > 1:
+                    _log.debug('        %s' % m)
+                modules.remove(m)
+        _log.info('    (https://bugs.webkit.org/show_bug.cgi?id=%d; use --all to include)' % bugid)
+        _log.info('')
diff --git a/Tools/Scripts/webkitpy/test/finder_unittest.py b/Tools/Scripts/webkitpy/test/finder_unittest.py
new file mode 100644
index 0000000..5c808a1
--- /dev/null
+++ b/Tools/Scripts/webkitpy/test/finder_unittest.py
@@ -0,0 +1,129 @@
+# Copyright (C) 2012 Google, Inc.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1.  Redistributions of source code must retain the above copyright
+#     notice, this list of conditions and the following disclaimer.
+# 2.  Redistributions in binary form must reproduce the above copyright
+#     notice, this list of conditions and the following disclaimer in the
+#     documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
+# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import logging
+import unittest
+
+from webkitpy.common.system.filesystem_mock import MockFileSystem
+from webkitpy.common.system.outputcapture import OutputCapture
+from webkitpy.test.finder import Finder
+
+
+class FinderTest(unittest.TestCase):
+    def setUp(self):
+        files = {
+          '/foo/bar/baz.py': '',
+          '/foo/bar/baz_unittest.py': '',
+          '/foo2/bar2/baz2.py': '',
+          '/foo2/bar2/baz2.pyc': '',
+          '/foo2/bar2/baz2_integrationtest.py': '',
+          '/foo2/bar2/missing.pyc': '',
+          '/tmp/another_unittest.py': '',
+        }
+        self.fs = MockFileSystem(files)
+        self.finder = Finder(self.fs)
+        self.finder.add_tree('/foo', 'bar')
+        self.finder.add_tree('/foo2')
+
+        # Here we have to jump through a hoop to make sure test-webkitpy doesn't log
+        # any messages from these tests :(.
+        self.root_logger = logging.getLogger()
+        self.log_levels = []
+        self.log_handlers = self.root_logger.handlers[:]
+        for handler in self.log_handlers:
+            self.log_levels.append(handler.level)
+            handler.level = logging.CRITICAL
+
+    def tearDown(self):
+        for handler in self.log_handlers:
+            handler.level = self.log_levels.pop(0)
+
+    def test_additional_system_paths(self):
+        self.assertEquals(self.finder.additional_paths(['/usr']),
+                          ['/foo', '/foo2'])
+
+    def test_is_module(self):
+        self.assertTrue(self.finder.is_module('bar.baz'))
+        self.assertTrue(self.finder.is_module('bar2.baz2'))
+        self.assertTrue(self.finder.is_module('bar2.baz2_integrationtest'))
+
+        # Missing the proper namespace.
+        self.assertFalse(self.finder.is_module('baz'))
+
+    def test_to_module(self):
+        self.assertEquals(self.finder.to_module('/foo/test.py'), 'test')
+        self.assertEquals(self.finder.to_module('/foo/bar/test.py'), 'bar.test')
+        self.assertEquals(self.finder.to_module('/foo/bar/pytest.py'), 'bar.pytest')
+
+    def test_clean(self):
+        self.assertTrue(self.fs.exists('/foo2/bar2/missing.pyc'))
+        self.finder.clean_trees()
+        self.assertFalse(self.fs.exists('/foo2/bar2/missing.pyc'))
+
+    def check_names(self, names, expected_names, find_all=True):
+        self.assertEquals(self.finder.find_names(names, find_all), expected_names)
+
+    def test_default_names(self):
+        self.check_names([], ['bar.baz_unittest', 'bar2.baz2_integrationtest'], find_all=True)
+        self.check_names([], ['bar.baz_unittest', 'bar2.baz2_integrationtest'], find_all=False)
+
+        # Should return the names given it, even if they don't exist.
+        self.check_names(['foobar'], ['foobar'], find_all=False)
+
+    def test_paths(self):
+        self.fs.chdir('/foo/bar')
+        self.check_names(['baz_unittest.py'], ['bar.baz_unittest'])
+        self.check_names(['./baz_unittest.py'], ['bar.baz_unittest'])
+        self.check_names(['/foo/bar/baz_unittest.py'], ['bar.baz_unittest'])
+        self.check_names(['.'], ['bar.baz_unittest'])
+        self.check_names(['../../foo2/bar2'], ['bar2.baz2_integrationtest'])
+
+        self.fs.chdir('/')
+        self.check_names(['bar'], ['bar.baz_unittest'])
+        self.check_names(['/foo/bar/'], ['bar.baz_unittest'])
+
+        # This works 'by accident' since it maps onto a package.
+        self.check_names(['bar/'], ['bar.baz_unittest'])
+
+        # This should log an error, since it's outside the trees.
+        oc = OutputCapture()
+        oc.set_log_level(logging.ERROR)
+        oc.capture_output()
+        try:
+            self.check_names(['/tmp/another_unittest.py'], [])
+        finally:
+            _, _, logs = oc.restore_output()
+            self.assertTrue('another_unittest.py' in logs)
+
+        # Paths that don't exist are errors.
+        oc.capture_output()
+        try:
+            self.check_names(['/foo/bar/notexist_unittest.py'], [])
+        finally:
+            _, _, logs = oc.restore_output()
+            self.assertTrue('notexist_unittest.py' in logs)
+
+        # Names that don't exist are caught later, at load time.
+        self.check_names(['bar.notexist_unittest'], ['bar.notexist_unittest'])
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/Tools/Scripts/webkitpy/test/main.py b/Tools/Scripts/webkitpy/test/main.py
new file mode 100644
index 0000000..e639a45
--- /dev/null
+++ b/Tools/Scripts/webkitpy/test/main.py
@@ -0,0 +1,234 @@
+# Copyright (C) 2012 Google, Inc.
+# Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org)
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1.  Redistributions of source code must retain the above copyright
+#     notice, this list of conditions and the following disclaimer.
+# 2.  Redistributions in binary form must reproduce the above copyright
+#     notice, this list of conditions and the following disclaimer in the
+#     documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
+# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""unit testing code for webkitpy."""
+
+import logging
+import multiprocessing
+import optparse
+import os
+import StringIO
+import sys
+import time
+import traceback
+import unittest
+
+from webkitpy.common.system.filesystem import FileSystem
+from webkitpy.test.finder import Finder
+from webkitpy.test.printer import Printer
+from webkitpy.test.runner import Runner, unit_test_name
+
+_log = logging.getLogger(__name__)
+
+
+def main():
+    up = os.path.dirname
+    webkit_root = up(up(up(up(up(os.path.abspath(__file__))))))
+
+    tester = Tester()
+    tester.add_tree(os.path.join(webkit_root, 'Tools', 'Scripts'), 'webkitpy')
+    tester.add_tree(os.path.join(webkit_root, 'Source', 'WebKit2', 'Scripts'), 'webkit2')
+
+    tester.skip(('webkitpy.common.checkout.scm.scm_unittest',), 'are really, really, slow', 31818)
+    if sys.platform == 'win32':
+        tester.skip(('webkitpy.common.checkout', 'webkitpy.common.config', 'webkitpy.tool'), 'fail horribly on win32', 54526)
+
+    # This only needs to run on Unix, so don't worry about win32 for now.
+    appengine_sdk_path = '/usr/local/google_appengine'
+    if os.path.exists(appengine_sdk_path):
+        if not appengine_sdk_path in sys.path:
+            sys.path.append(appengine_sdk_path)
+        import dev_appserver
+        from google.appengine.dist import use_library
+        use_library('django', '1.2')
+        dev_appserver.fix_sys_path()
+        tester.add_tree(os.path.join(webkit_root, 'Tools', 'QueueStatusServer'))
+    else:
+        _log.info('Skipping QueueStatusServer tests; the Google AppEngine Python SDK is not installed.')
+
+    return not tester.run()
+
+
+class Tester(object):
+    def __init__(self, filesystem=None):
+        self.finder = Finder(filesystem or FileSystem())
+        self.printer = Printer(sys.stderr)
+        self._options = None
+
+    def add_tree(self, top_directory, starting_subdirectory=None):
+        self.finder.add_tree(top_directory, starting_subdirectory)
+
+    def skip(self, names, reason, bugid):
+        self.finder.skip(names, reason, bugid)
+
+    def _parse_args(self, argv=None):
+        parser = optparse.OptionParser(usage='usage: %prog [options] [args...]')
+        parser.add_option('-a', '--all', action='store_true', default=False,
+                          help='run all the tests')
+        parser.add_option('-c', '--coverage', action='store_true', default=False,
+                          help='generate code coverage info (requires http://pypi.python.org/pypi/coverage)')
+        parser.add_option('-i', '--integration-tests', action='store_true', default=False,
+                          help='run integration tests as well as unit tests'),
+        parser.add_option('-j', '--child-processes', action='store', type='int', default=(1 if sys.platform == 'win32' else multiprocessing.cpu_count()),
+                          help='number of tests to run in parallel (default=%default)')
+        parser.add_option('-p', '--pass-through', action='store_true', default=False,
+                          help='be debugger friendly by passing captured output through to the system')
+        parser.add_option('-q', '--quiet', action='store_true', default=False,
+                          help='run quietly (errors, warnings, and progress only)')
+        parser.add_option('-t', '--timing', action='store_true', default=False,
+                          help='display per-test execution time (implies --verbose)')
+        parser.add_option('-v', '--verbose', action='count', default=0,
+                          help='verbose output (specify once for individual test results, twice for debug messages)')
+
+        parser.epilog = ('[args...] is an optional list of modules, test_classes, or individual tests. '
+                         'If no args are given, all the tests will be run.')
+
+        return parser.parse_args(argv)
+
+    def run(self):
+        self._options, args = self._parse_args()
+        self.printer.configure(self._options)
+
+        self.finder.clean_trees()
+
+        names = self.finder.find_names(args, self._options.all)
+        if not names:
+            _log.error('No tests to run')
+            return False
+
+        return self._run_tests(names)
+
+    def _run_tests(self, names):
+        # Make sure PYTHONPATH is set up properly.
+        sys.path = self.finder.additional_paths(sys.path) + sys.path
+
+        # We autoinstall everything up so that we can run tests concurrently
+        # and not have to worry about autoinstalling packages concurrently.
+        self.printer.write_update("Checking autoinstalled packages ...")
+        from webkitpy.thirdparty import autoinstall_everything
+        installed_something = autoinstall_everything()
+
+        # FIXME: There appears to be a bug in Python 2.6.1 that is causing multiprocessing
+        # to hang after we install the packages in a clean checkout.
+        if installed_something:
+            _log.warning("We installed new packages, so running things serially at first")
+            self._options.child_processes = 1
+
+        if self._options.coverage:
+            import webkitpy.thirdparty.autoinstalled.coverage as coverage
+            cov = coverage.coverage()
+            cov.start()
+
+        self.printer.write_update("Checking imports ...")
+        if not self._check_imports(names):
+            return False
+
+        self.printer.write_update("Finding the individual test methods ...")
+        loader = _Loader()
+        parallel_tests, serial_tests = self._test_names(loader, names)
+
+        self.printer.write_update("Running the tests ...")
+        self.printer.num_tests = len(parallel_tests) + len(serial_tests)
+        start = time.time()
+        test_runner = Runner(self.printer, loader)
+        test_runner.run(parallel_tests, self._options.child_processes)
+        test_runner.run(serial_tests, 1)
+
+        self.printer.print_result(time.time() - start)
+
+        if self._options.coverage:
+            cov.stop()
+            cov.save()
+            cov.report(show_missing=False)
+
+        return not self.printer.num_errors and not self.printer.num_failures
+
+    def _check_imports(self, names):
+        for name in names:
+            if self.finder.is_module(name):
+                # if we failed to load a name and it looks like a module,
+                # try importing it directly, because loadTestsFromName()
+                # produces lousy error messages for bad modules.
+                try:
+                    __import__(name)
+                except ImportError:
+                    _log.fatal('Failed to import %s:' % name)
+                    self._log_exception()
+                    return False
+        return True
+
+    def _test_names(self, loader, names):
+        parallel_test_method_prefixes = ['test_']
+        serial_test_method_prefixes = ['serial_test_']
+        if self._options.integration_tests:
+            parallel_test_method_prefixes.append('integration_test_')
+            serial_test_method_prefixes.append('serial_integration_test_')
+
+        parallel_tests = []
+        loader.test_method_prefixes = parallel_test_method_prefixes
+        for name in names:
+            parallel_tests.extend(self._all_test_names(loader.loadTestsFromName(name, None)))
+
+        serial_tests = []
+        loader.test_method_prefixes = serial_test_method_prefixes
+        for name in names:
+            serial_tests.extend(self._all_test_names(loader.loadTestsFromName(name, None)))
+
+        # loader.loadTestsFromName() will not verify that names begin with one of the test_method_prefixes
+        # if the names were explicitly provided (e.g., MainTest.test_basic), so this means that any individual
+        # tests will be included in both parallel_tests and serial_tests, and we need to de-dup them.
+        serial_tests = list(set(serial_tests).difference(set(parallel_tests)))
+
+        return (parallel_tests, serial_tests)
+
+    def _all_test_names(self, suite):
+        names = []
+        if hasattr(suite, '_tests'):
+            for t in suite._tests:
+                names.extend(self._all_test_names(t))
+        else:
+            names.append(unit_test_name(suite))
+        return names
+
+    def _log_exception(self):
+        s = StringIO.StringIO()
+        traceback.print_exc(file=s)
+        for l in s.buflist:
+            _log.error('  ' + l.rstrip())
+
+
+class _Loader(unittest.TestLoader):
+    test_method_prefixes = []
+
+    def getTestCaseNames(self, testCaseClass):
+        def isTestMethod(attrname, testCaseClass=testCaseClass):
+            if not hasattr(getattr(testCaseClass, attrname), '__call__'):
+                return False
+            return (any(attrname.startswith(prefix) for prefix in self.test_method_prefixes))
+        testFnNames = filter(isTestMethod, dir(testCaseClass))
+        testFnNames.sort()
+        return testFnNames
+
+
+if __name__ == '__main__':
+    sys.exit(main())
diff --git a/Tools/Scripts/webkitpy/test/main_unittest.py b/Tools/Scripts/webkitpy/test/main_unittest.py
new file mode 100644
index 0000000..4fa6ef3
--- /dev/null
+++ b/Tools/Scripts/webkitpy/test/main_unittest.py
@@ -0,0 +1,117 @@
+# Copyright (C) 2012 Google, Inc.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1.  Redistributions of source code must retain the above copyright
+#     notice, this list of conditions and the following disclaimer.
+# 2.  Redistributions in binary form must reproduce the above copyright
+#     notice, this list of conditions and the following disclaimer in the
+#     documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
+# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import logging
+import sys
+import unittest
+import StringIO
+
+from webkitpy.common.system.filesystem import FileSystem
+from webkitpy.common.system.executive import Executive
+from webkitpy.common.system.outputcapture import OutputCapture
+from webkitpy.test.main import Tester, _Loader
+
+
+STUBS_CLASS = __name__ + ".TestStubs"
+
+
+class TestStubs(unittest.TestCase):
+    def test_empty(self):
+        pass
+
+    def integration_test_empty(self):
+        pass
+
+    def serial_test_empty(self):
+        pass
+
+    def serial_integration_test_empty(self):
+        pass
+
+
+class TesterTest(unittest.TestCase):
+
+    def test_no_tests_found(self):
+        tester = Tester()
+        errors = StringIO.StringIO()
+
+        # Here we need to remove any existing log handlers so that they
+        # don't log the messages webkitpy.test while we're testing it.
+        root_logger = logging.getLogger()
+        root_handlers = root_logger.handlers
+        root_logger.handlers = []
+
+        tester.printer.stream = errors
+        tester.finder.find_names = lambda args, run_all: []
+        oc = OutputCapture()
+        try:
+            oc.capture_output()
+            self.assertFalse(tester.run())
+        finally:
+            _, _, logs = oc.restore_output()
+            root_logger.handlers = root_handlers
+
+        self.assertTrue('No tests to run' in errors.getvalue())
+        self.assertTrue('No tests to run' in logs)
+
+    def _find_test_names(self, args):
+        tester = Tester()
+        tester._options, args = tester._parse_args(args)
+        return tester._test_names(_Loader(), args)
+
+    def test_individual_names_are_not_run_twice(self):
+        args = [STUBS_CLASS + '.test_empty']
+        parallel_tests, serial_tests = self._find_test_names(args)
+        self.assertEquals(parallel_tests, args)
+        self.assertEquals(serial_tests, [])
+
+    def test_integration_tests_are_not_found_by_default(self):
+        parallel_tests, serial_tests = self._find_test_names([STUBS_CLASS])
+        self.assertEquals(parallel_tests, [
+            STUBS_CLASS + '.test_empty',
+            ])
+        self.assertEquals(serial_tests, [
+            STUBS_CLASS + '.serial_test_empty',
+            ])
+
+    def test_integration_tests_are_found(self):
+        parallel_tests, serial_tests = self._find_test_names(['--integration-tests', STUBS_CLASS])
+        self.assertEquals(parallel_tests, [
+            STUBS_CLASS + '.integration_test_empty',
+            STUBS_CLASS + '.test_empty',
+            ])
+        self.assertEquals(serial_tests, [
+            STUBS_CLASS + '.serial_integration_test_empty',
+            STUBS_CLASS + '.serial_test_empty',
+            ])
+
+    def integration_test_coverage_works(self):
+        filesystem = FileSystem()
+        executive = Executive()
+        module_path = filesystem.path_to_module(self.__module__)
+        script_dir = module_path[0:module_path.find('webkitpy') - 1]
+        proc = executive.popen([sys.executable, filesystem.join(script_dir, 'test-webkitpy'), '-c', STUBS_CLASS + '.test_empty'],
+                               stdout=executive.PIPE, stderr=executive.PIPE)
+        out, _ = proc.communicate()
+        retcode = proc.returncode
+        self.assertEquals(retcode, 0)
+        self.assertTrue('Cover' in out)
diff --git a/Tools/Scripts/webkitpy/test/printer.py b/Tools/Scripts/webkitpy/test/printer.py
new file mode 100644
index 0000000..0ec3035
--- /dev/null
+++ b/Tools/Scripts/webkitpy/test/printer.py
@@ -0,0 +1,199 @@
+# Copyright (C) 2012 Google, Inc.
+# Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org)
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1.  Redistributions of source code must retain the above copyright
+#     notice, this list of conditions and the following disclaimer.
+# 2.  Redistributions in binary form must reproduce the above copyright
+#     notice, this list of conditions and the following disclaimer in the
+#     documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
+# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import logging
+import StringIO
+
+from webkitpy.common.system import outputcapture
+from webkitpy.layout_tests.views.metered_stream import MeteredStream
+
+_log = logging.getLogger(__name__)
+
+
+class Printer(object):
+    def __init__(self, stream, options=None):
+        self.stream = stream
+        self.meter = None
+        self.options = options
+        self.num_tests = 0
+        self.num_completed = 0
+        self.num_errors = 0
+        self.num_failures = 0
+        self.running_tests = []
+        self.completed_tests = []
+        if options:
+            self.configure(options)
+
+    def configure(self, options):
+        self.options = options
+
+        if options.timing:
+            # --timing implies --verbose
+            options.verbose = max(options.verbose, 1)
+
+        log_level = logging.INFO
+        if options.quiet:
+            log_level = logging.WARNING
+        elif options.verbose == 2:
+            log_level = logging.DEBUG
+
+        self.meter = MeteredStream(self.stream, (options.verbose == 2))
+
+        handler = logging.StreamHandler(self.stream)
+        # We constrain the level on the handler rather than on the root
+        # logger itself.  This is probably better because the handler is
+        # configured and known only to this module, whereas the root logger
+        # is an object shared (and potentially modified) by many modules.
+        # Modifying the handler, then, is less intrusive and less likely to
+        # interfere with modifications made by other modules (e.g. in unit
+        # tests).
+        handler.name = __name__
+        handler.setLevel(log_level)
+        formatter = logging.Formatter("%(message)s")
+        handler.setFormatter(formatter)
+
+        logger = logging.getLogger()
+        logger.addHandler(handler)
+        logger.setLevel(logging.NOTSET)
+
+        # Filter out most webkitpy messages.
+        #
+        # Messages can be selectively re-enabled for this script by updating
+        # this method accordingly.
+        def filter_records(record):
+            """Filter out autoinstall and non-third-party webkitpy messages."""
+            # FIXME: Figure out a way not to use strings here, for example by
+            #        using syntax like webkitpy.test.__name__.  We want to be
+            #        sure not to import any non-Python 2.4 code, though, until
+            #        after the version-checking code has executed.
+            if (record.name.startswith("webkitpy.common.system.autoinstall") or
+                record.name.startswith("webkitpy.test")):
+                return True
+            if record.name.startswith("webkitpy"):
+                return False
+            return True
+
+        testing_filter = logging.Filter()
+        testing_filter.filter = filter_records
+
+        # Display a message so developers are not mystified as to why
+        # logging does not work in the unit tests.
+        _log.info("Suppressing most webkitpy logging while running unit tests.")
+        handler.addFilter(testing_filter)
+
+        if self.options.pass_through:
+            outputcapture.OutputCapture.stream_wrapper = _CaptureAndPassThroughStream
+
+    def write_update(self, msg):
+        self.meter.write_update(msg)
+
+    def print_started_test(self, source, test_name):
+        self.running_tests.append(test_name)
+        if len(self.running_tests) > 1:
+            suffix = ' (+%d)' % (len(self.running_tests) - 1)
+        else:
+            suffix = ''
+
+        if self.options.verbose:
+            write = self.meter.write_update
+        else:
+            write = self.meter.write_throttled_update
+
+        write(self._test_line(self.running_tests[0], suffix))
+
+    def print_finished_test(self, source, test_name, test_time, failures, errors):
+        write = self.meter.writeln
+        if failures:
+            lines = failures[0].splitlines() + ['']
+            suffix = ' failed:'
+            self.num_failures += 1
+        elif errors:
+            lines = errors[0].splitlines() + ['']
+            suffix = ' erred:'
+            self.num_errors += 1
+        else:
+            suffix = ' passed'
+            lines = []
+            if self.options.verbose:
+                write = self.meter.writeln
+            else:
+                write = self.meter.write_throttled_update
+        if self.options.timing:
+            suffix += ' %.4fs' % test_time
+
+        self.num_completed += 1
+
+        if test_name == self.running_tests[0]:
+            self.completed_tests.insert(0, [test_name, suffix, lines])
+        else:
+            self.completed_tests.append([test_name, suffix, lines])
+        self.running_tests.remove(test_name)
+
+        for test_name, msg, lines in self.completed_tests:
+            if lines:
+                self.meter.writeln(self._test_line(test_name, msg))
+                for line in lines:
+                    self.meter.writeln('  ' + line)
+            else:
+                write(self._test_line(test_name, msg))
+        self.completed_tests = []
+
+    def _test_line(self, test_name, suffix):
+        return '[%d/%d] %s%s' % (self.num_completed, self.num_tests, test_name, suffix)
+
+    def print_result(self, run_time):
+        write = self.meter.writeln
+        write('Ran %d test%s in %.3fs' % (self.num_completed, self.num_completed != 1 and "s" or "", run_time))
+        if self.num_failures or self.num_errors:
+            write('FAILED (failures=%d, errors=%d)\n' % (self.num_failures, self.num_errors))
+        else:
+            write('\nOK\n')
+
+
+class _CaptureAndPassThroughStream(object):
+    def __init__(self, stream):
+        self._buffer = StringIO.StringIO()
+        self._stream = stream
+
+    def write(self, msg):
+        self._stream.write(msg)
+
+        # Note that we don't want to capture any output generated by the debugger
+        # because that could cause the results of capture_output() to be invalid.
+        if not self._message_is_from_pdb():
+            self._buffer.write(msg)
+
+    def _message_is_from_pdb(self):
+        # We will assume that if the pdb module is in the stack then the output
+        # is being generated by the python debugger (or the user calling something
+        # from inside the debugger).
+        import inspect
+        import pdb
+        stack = inspect.stack()
+        return any(frame[1] == pdb.__file__.replace('.pyc', '.py') for frame in stack)
+
+    def flush(self):
+        self._stream.flush()
+
+    def getvalue(self):
+        return self._buffer.getvalue()
diff --git a/Tools/Scripts/webkitpy/test/runner.py b/Tools/Scripts/webkitpy/test/runner.py
new file mode 100644
index 0000000..d3f5764
--- /dev/null
+++ b/Tools/Scripts/webkitpy/test/runner.py
@@ -0,0 +1,82 @@
+# Copyright (C) 2012 Google, Inc.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1.  Redistributions of source code must retain the above copyright
+#     notice, this list of conditions and the following disclaimer.
+# 2.  Redistributions in binary form must reproduce the above copyright
+#     notice, this list of conditions and the following disclaimer in the
+#     documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
+# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""code to actually run a list of python tests."""
+
+import re
+import time
+import unittest
+
+from webkitpy.common import message_pool
+
+_test_description = re.compile("(\w+) \(([\w.]+)\)")
+
+
+def unit_test_name(test):
+    m = _test_description.match(str(test))
+    return "%s.%s" % (m.group(2), m.group(1))
+
+
+class Runner(object):
+    def __init__(self, printer, loader):
+        self.printer = printer
+        self.loader = loader
+        self.tests_run = 0
+        self.errors = []
+        self.failures = []
+        self.worker_factory = lambda caller: _Worker(caller, self.loader)
+
+    def run(self, test_names, num_workers):
+        if not test_names:
+            return
+        num_workers = min(num_workers, len(test_names))
+        with message_pool.get(self, self.worker_factory, num_workers) as pool:
+            pool.run(('test', test_name) for test_name in test_names)
+
+    def handle(self, message_name, source, test_name, delay=None, failures=None, errors=None):
+        if message_name == 'started_test':
+            self.printer.print_started_test(source, test_name)
+            return
+
+        self.tests_run += 1
+        if failures:
+            self.failures.append((test_name, failures))
+        if errors:
+            self.errors.append((test_name, errors))
+        self.printer.print_finished_test(source, test_name, delay, failures, errors)
+
+
+class _Worker(object):
+    def __init__(self, caller, loader):
+        self._caller = caller
+        self._loader = loader
+
+    def handle(self, message_name, source, test_name):
+        assert message_name == 'test'
+        result = unittest.TestResult()
+        start = time.time()
+        self._caller.post('started_test', test_name)
+
+        # We will need to rework this if a test_name results in multiple tests.
+        self._loader.loadTestsFromName(test_name, None).run(result)
+        self._caller.post('finished_test', test_name, time.time() - start,
+            [failure[1] for failure in result.failures], [error[1] for error in result.errors])
diff --git a/Tools/Scripts/webkitpy/test/runner_unittest.py b/Tools/Scripts/webkitpy/test/runner_unittest.py
new file mode 100644
index 0000000..8fe1b06
--- /dev/null
+++ b/Tools/Scripts/webkitpy/test/runner_unittest.py
@@ -0,0 +1,101 @@
+# Copyright (C) 2012 Google, Inc.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1.  Redistributions of source code must retain the above copyright
+#     notice, this list of conditions and the following disclaimer.
+# 2.  Redistributions in binary form must reproduce the above copyright
+#     notice, this list of conditions and the following disclaimer in the
+#     documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
+# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import logging
+import re
+import StringIO
+import unittest
+
+from webkitpy.tool.mocktool import MockOptions
+from webkitpy.test.printer import Printer
+from webkitpy.test.runner import Runner
+
+
+class FakeModuleSuite(object):
+    def __init__(self, name, result, msg):
+        self.name = name
+        self.result = result
+        self.msg = msg
+
+    def __str__(self):
+        return self.name
+
+    def run(self, result):
+        result.testsRun += 1
+        if self.result == 'F':
+            result.failures.append((self.name, self.msg))
+        elif self.result == 'E':
+            result.errors.append((self.name, self.msg))
+
+
+class FakeTopSuite(object):
+    def __init__(self, tests):
+        self._tests = tests
+
+
+class FakeLoader(object):
+    def __init__(self, *test_triples):
+        self.triples = test_triples
+        self._tests = []
+        self._results = {}
+        for test_name, result, msg in self.triples:
+            self._tests.append(test_name)
+            m = re.match("(\w+) \(([\w.]+)\)", test_name)
+            self._results['%s.%s' % (m.group(2), m.group(1))] = tuple([test_name, result, msg])
+
+    def top_suite(self):
+        return FakeTopSuite(self._tests)
+
+    def loadTestsFromName(self, name, _):
+        return FakeModuleSuite(*self._results[name])
+
+
+class RunnerTest(unittest.TestCase):
+    def setUp(self):
+        # Here we have to jump through a hoop to make sure test-webkitpy doesn't log
+        # any messages from these tests :(.
+        self.root_logger = logging.getLogger()
+        self.log_levels = []
+        self.log_handlers = self.root_logger.handlers[:]
+        for handler in self.log_handlers:
+            self.log_levels.append(handler.level)
+            handler.level = logging.CRITICAL
+
+    def tearDown(self):
+        for handler in self.log_handlers:
+            handler.level = self.log_levels.pop(0)
+
+    def test_run(self, verbose=0, timing=False, child_processes=1, quiet=False):
+        options = MockOptions(verbose=verbose, timing=timing, child_processes=child_processes, quiet=quiet, pass_through=False)
+        stream = StringIO.StringIO()
+        loader = FakeLoader(('test1 (Foo)', '.', ''),
+                            ('test2 (Foo)', 'F', 'test2\nfailed'),
+                            ('test3 (Foo)', 'E', 'test3\nerred'))
+        runner = Runner(Printer(stream, options), loader)
+        runner.run(['Foo.test1', 'Foo.test2', 'Foo.test3'], 1)
+        self.assertEquals(runner.tests_run, 3)
+        self.assertEquals(len(runner.failures), 1)
+        self.assertEquals(len(runner.errors), 1)
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/Tools/Scripts/webkitpy/test/skip.py b/Tools/Scripts/webkitpy/test/skip.py
new file mode 100644
index 0000000..8587d56
--- /dev/null
+++ b/Tools/Scripts/webkitpy/test/skip.py
@@ -0,0 +1,52 @@
+# Copyright (C) 2010 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1.  Redistributions of source code must retain the above copyright
+#     notice, this list of conditions and the following disclaimer.
+# 2.  Redistributions in binary form must reproduce the above copyright
+#     notice, this list of conditions and the following disclaimer in the
+#     documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
+# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import logging
+
+_log = logging.getLogger(__name__)
+
+
+def skip_if(klass, condition, message=None, logger=None):
+    """Makes all test_* methods in a given class no-ops if the given condition
+    is False. Backported from Python 3.1+'s unittest.skipIf decorator."""
+    if not logger:
+        logger = _log
+    if not condition:
+        return klass
+    for name in dir(klass):
+        attr = getattr(klass, name)
+        if not callable(attr):
+            continue
+        if not name.startswith('test_'):
+            continue
+        setattr(klass, name, _skipped_method(attr, message, logger))
+    klass._printed_skipped_message = False
+    return klass
+
+
+def _skipped_method(method, message, logger):
+    def _skip(*args):
+        if method.im_class._printed_skipped_message:
+            return
+        method.im_class._printed_skipped_message = True
+        logger.info('Skipping %s.%s: %s' % (method.__module__, method.im_class.__name__, message))
+    return _skip
diff --git a/Tools/Scripts/webkitpy/test/skip_unittest.py b/Tools/Scripts/webkitpy/test/skip_unittest.py
new file mode 100644
index 0000000..f61a1bb
--- /dev/null
+++ b/Tools/Scripts/webkitpy/test/skip_unittest.py
@@ -0,0 +1,77 @@
+# Copyright (C) 2010 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1.  Redistributions of source code must retain the above copyright
+#     notice, this list of conditions and the following disclaimer.
+# 2.  Redistributions in binary form must reproduce the above copyright
+#     notice, this list of conditions and the following disclaimer in the
+#     documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
+# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import StringIO
+import logging
+import unittest
+
+from webkitpy.test.skip import skip_if
+
+
+class SkipTest(unittest.TestCase):
+    def setUp(self):
+        self.logger = logging.getLogger(__name__)
+
+        self.old_level = self.logger.level
+        self.logger.setLevel(logging.INFO)
+
+        self.old_propagate = self.logger.propagate
+        self.logger.propagate = False
+
+        self.log_stream = StringIO.StringIO()
+        self.handler = logging.StreamHandler(self.log_stream)
+        self.logger.addHandler(self.handler)
+
+        self.foo_was_called = False
+
+    def tearDown(self):
+        self.logger.removeHandler(self.handler)
+        self.propagate = self.old_propagate
+        self.logger.setLevel(self.old_level)
+
+    def create_fixture_class(self):
+        class TestSkipFixture(object):
+            def __init__(self, callback):
+                self.callback = callback
+
+            def test_foo(self):
+                self.callback()
+
+        return TestSkipFixture
+
+    def foo_callback(self):
+        self.foo_was_called = True
+
+    def test_skip_if_false(self):
+        klass = skip_if(self.create_fixture_class(), False, 'Should not see this message.', logger=self.logger)
+        klass(self.foo_callback).test_foo()
+        self.assertEqual(self.log_stream.getvalue(), '')
+        self.assertTrue(self.foo_was_called)
+
+    def test_skip_if_true(self):
+        klass = skip_if(self.create_fixture_class(), True, 'Should see this message.', logger=self.logger)
+        klass(self.foo_callback).test_foo()
+        self.assertEqual(self.log_stream.getvalue(), 'Skipping webkitpy.test.skip_unittest.TestSkipFixture: Should see this message.\n')
+        self.assertFalse(self.foo_was_called)
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/Tools/Scripts/webkitpy/thirdparty/BeautifulSoup.py b/Tools/Scripts/webkitpy/thirdparty/BeautifulSoup.py
new file mode 100644
index 0000000..4b17b85
--- /dev/null
+++ b/Tools/Scripts/webkitpy/thirdparty/BeautifulSoup.py
@@ -0,0 +1,2014 @@
+"""Beautiful Soup
+Elixir and Tonic
+"The Screen-Scraper's Friend"
+http://www.crummy.com/software/BeautifulSoup/
+
+Beautiful Soup parses a (possibly invalid) XML or HTML document into a
+tree representation. It provides methods and Pythonic idioms that make
+it easy to navigate, search, and modify the tree.
+
+A well-formed XML/HTML document yields a well-formed data
+structure. An ill-formed XML/HTML document yields a correspondingly
+ill-formed data structure. If your document is only locally
+well-formed, you can use this library to find and process the
+well-formed part of it.
+
+Beautiful Soup works with Python 2.2 and up. It has no external
+dependencies, but you'll have more success at converting data to UTF-8
+if you also install these three packages:
+
+* chardet, for auto-detecting character encodings
+  http://chardet.feedparser.org/
+* cjkcodecs and iconv_codec, which add more encodings to the ones supported
+  by stock Python.
+  http://cjkpython.i18n.org/
+
+Beautiful Soup defines classes for two main parsing strategies:
+
+ * BeautifulStoneSoup, for parsing XML, SGML, or your domain-specific
+   language that kind of looks like XML.
+
+ * BeautifulSoup, for parsing run-of-the-mill HTML code, be it valid
+   or invalid. This class has web browser-like heuristics for
+   obtaining a sensible parse tree in the face of common HTML errors.
+
+Beautiful Soup also defines a class (UnicodeDammit) for autodetecting
+the encoding of an HTML or XML document, and converting it to
+Unicode. Much of this code is taken from Mark Pilgrim's Universal Feed Parser.
+
+For more than you ever wanted to know about Beautiful Soup, see the
+documentation:
+http://www.crummy.com/software/BeautifulSoup/documentation.html
+
+Here, have some legalese:
+
+Copyright (c) 2004-2010, Leonard Richardson
+
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+  * Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+
+  * Redistributions in binary form must reproduce the above
+    copyright notice, this list of conditions and the following
+    disclaimer in the documentation and/or other materials provided
+    with the distribution.
+
+  * Neither the name of the the Beautiful Soup Consortium and All
+    Night Kosher Bakery nor the names of its contributors may be
+    used to endorse or promote products derived from this software
+    without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE, DAMMIT.
+
+"""
+from __future__ import generators
+
+__author__ = "Leonard Richardson (leonardr@segfault.org)"
+__version__ = "3.2.0"
+__copyright__ = "Copyright (c) 2004-2010 Leonard Richardson"
+__license__ = "New-style BSD"
+
+from sgmllib import SGMLParser, SGMLParseError
+import codecs
+import markupbase
+import types
+import re
+import sgmllib
+try:
+  from htmlentitydefs import name2codepoint
+except ImportError:
+  name2codepoint = {}
+try:
+    set
+except NameError:
+    from sets import Set as set
+
+#These hacks make Beautiful Soup able to parse XML with namespaces
+sgmllib.tagfind = re.compile('[a-zA-Z][-_.:a-zA-Z0-9]*')
+markupbase._declname_match = re.compile(r'[a-zA-Z][-_.:a-zA-Z0-9]*\s*').match
+
+DEFAULT_OUTPUT_ENCODING = "utf-8"
+
+def _match_css_class(str):
+    """Build a RE to match the given CSS class."""
+    return re.compile(r"(^|.*\s)%s($|\s)" % str)
+
+# First, the classes that represent markup elements.
+
+class PageElement(object):
+    """Contains the navigational information for some part of the page
+    (either a tag or a piece of text)"""
+
+    def setup(self, parent=None, previous=None):
+        """Sets up the initial relations between this element and
+        other elements."""
+        self.parent = parent
+        self.previous = previous
+        self.next = None
+        self.previousSibling = None
+        self.nextSibling = None
+        if self.parent and self.parent.contents:
+            self.previousSibling = self.parent.contents[-1]
+            self.previousSibling.nextSibling = self
+
+    def replaceWith(self, replaceWith):
+        oldParent = self.parent
+        myIndex = self.parent.index(self)
+        if hasattr(replaceWith, "parent")\
+                  and replaceWith.parent is self.parent:
+            # We're replacing this element with one of its siblings.
+            index = replaceWith.parent.index(replaceWith)
+            if index and index < myIndex:
+                # Furthermore, it comes before this element. That
+                # means that when we extract it, the index of this
+                # element will change.
+                myIndex = myIndex - 1
+        self.extract()
+        oldParent.insert(myIndex, replaceWith)
+
+    def replaceWithChildren(self):
+        myParent = self.parent
+        myIndex = self.parent.index(self)
+        self.extract()
+        reversedChildren = list(self.contents)
+        reversedChildren.reverse()
+        for child in reversedChildren:
+            myParent.insert(myIndex, child)
+
+    def extract(self):
+        """Destructively rips this element out of the tree."""
+        if self.parent:
+            try:
+                del self.parent.contents[self.parent.index(self)]
+            except ValueError:
+                pass
+
+        #Find the two elements that would be next to each other if
+        #this element (and any children) hadn't been parsed. Connect
+        #the two.
+        lastChild = self._lastRecursiveChild()
+        nextElement = lastChild.next
+
+        if self.previous:
+            self.previous.next = nextElement
+        if nextElement:
+            nextElement.previous = self.previous
+        self.previous = None
+        lastChild.next = None
+
+        self.parent = None
+        if self.previousSibling:
+            self.previousSibling.nextSibling = self.nextSibling
+        if self.nextSibling:
+            self.nextSibling.previousSibling = self.previousSibling
+        self.previousSibling = self.nextSibling = None
+        return self
+
+    def _lastRecursiveChild(self):
+        "Finds the last element beneath this object to be parsed."
+        lastChild = self
+        while hasattr(lastChild, 'contents') and lastChild.contents:
+            lastChild = lastChild.contents[-1]
+        return lastChild
+
+    def insert(self, position, newChild):
+        if isinstance(newChild, basestring) \
+            and not isinstance(newChild, NavigableString):
+            newChild = NavigableString(newChild)
+
+        position =  min(position, len(self.contents))
+        if hasattr(newChild, 'parent') and newChild.parent is not None:
+            # We're 'inserting' an element that's already one
+            # of this object's children.
+            if newChild.parent is self:
+                index = self.index(newChild)
+                if index > position:
+                    # Furthermore we're moving it further down the
+                    # list of this object's children. That means that
+                    # when we extract this element, our target index
+                    # will jump down one.
+                    position = position - 1
+            newChild.extract()
+
+        newChild.parent = self
+        previousChild = None
+        if position == 0:
+            newChild.previousSibling = None
+            newChild.previous = self
+        else:
+            previousChild = self.contents[position-1]
+            newChild.previousSibling = previousChild
+            newChild.previousSibling.nextSibling = newChild
+            newChild.previous = previousChild._lastRecursiveChild()
+        if newChild.previous:
+            newChild.previous.next = newChild
+
+        newChildsLastElement = newChild._lastRecursiveChild()
+
+        if position >= len(self.contents):
+            newChild.nextSibling = None
+
+            parent = self
+            parentsNextSibling = None
+            while not parentsNextSibling:
+                parentsNextSibling = parent.nextSibling
+                parent = parent.parent
+                if not parent: # This is the last element in the document.
+                    break
+            if parentsNextSibling:
+                newChildsLastElement.next = parentsNextSibling
+            else:
+                newChildsLastElement.next = None
+        else:
+            nextChild = self.contents[position]
+            newChild.nextSibling = nextChild
+            if newChild.nextSibling:
+                newChild.nextSibling.previousSibling = newChild
+            newChildsLastElement.next = nextChild
+
+        if newChildsLastElement.next:
+            newChildsLastElement.next.previous = newChildsLastElement
+        self.contents.insert(position, newChild)
+
+    def append(self, tag):
+        """Appends the given tag to the contents of this tag."""
+        self.insert(len(self.contents), tag)
+
+    def findNext(self, name=None, attrs={}, text=None, **kwargs):
+        """Returns the first item that matches the given criteria and
+        appears after this Tag in the document."""
+        return self._findOne(self.findAllNext, name, attrs, text, **kwargs)
+
+    def findAllNext(self, name=None, attrs={}, text=None, limit=None,
+                    **kwargs):
+        """Returns all items that match the given criteria and appear
+        after this Tag in the document."""
+        return self._findAll(name, attrs, text, limit, self.nextGenerator,
+                             **kwargs)
+
+    def findNextSibling(self, name=None, attrs={}, text=None, **kwargs):
+        """Returns the closest sibling to this Tag that matches the
+        given criteria and appears after this Tag in the document."""
+        return self._findOne(self.findNextSiblings, name, attrs, text,
+                             **kwargs)
+
+    def findNextSiblings(self, name=None, attrs={}, text=None, limit=None,
+                         **kwargs):
+        """Returns the siblings of this Tag that match the given
+        criteria and appear after this Tag in the document."""
+        return self._findAll(name, attrs, text, limit,
+                             self.nextSiblingGenerator, **kwargs)
+    fetchNextSiblings = findNextSiblings # Compatibility with pre-3.x
+
+    def findPrevious(self, name=None, attrs={}, text=None, **kwargs):
+        """Returns the first item that matches the given criteria and
+        appears before this Tag in the document."""
+        return self._findOne(self.findAllPrevious, name, attrs, text, **kwargs)
+
+    def findAllPrevious(self, name=None, attrs={}, text=None, limit=None,
+                        **kwargs):
+        """Returns all items that match the given criteria and appear
+        before this Tag in the document."""
+        return self._findAll(name, attrs, text, limit, self.previousGenerator,
+                           **kwargs)
+    fetchPrevious = findAllPrevious # Compatibility with pre-3.x
+
+    def findPreviousSibling(self, name=None, attrs={}, text=None, **kwargs):
+        """Returns the closest sibling to this Tag that matches the
+        given criteria and appears before this Tag in the document."""
+        return self._findOne(self.findPreviousSiblings, name, attrs, text,
+                             **kwargs)
+
+    def findPreviousSiblings(self, name=None, attrs={}, text=None,
+                             limit=None, **kwargs):
+        """Returns the siblings of this Tag that match the given
+        criteria and appear before this Tag in the document."""
+        return self._findAll(name, attrs, text, limit,
+                             self.previousSiblingGenerator, **kwargs)
+    fetchPreviousSiblings = findPreviousSiblings # Compatibility with pre-3.x
+
+    def findParent(self, name=None, attrs={}, **kwargs):
+        """Returns the closest parent of this Tag that matches the given
+        criteria."""
+        # NOTE: We can't use _findOne because findParents takes a different
+        # set of arguments.
+        r = None
+        l = self.findParents(name, attrs, 1)
+        if l:
+            r = l[0]
+        return r
+
+    def findParents(self, name=None, attrs={}, limit=None, **kwargs):
+        """Returns the parents of this Tag that match the given
+        criteria."""
+
+        return self._findAll(name, attrs, None, limit, self.parentGenerator,
+                             **kwargs)
+    fetchParents = findParents # Compatibility with pre-3.x
+
+    #These methods do the real heavy lifting.
+
+    def _findOne(self, method, name, attrs, text, **kwargs):
+        r = None
+        l = method(name, attrs, text, 1, **kwargs)
+        if l:
+            r = l[0]
+        return r
+
+    def _findAll(self, name, attrs, text, limit, generator, **kwargs):
+        "Iterates over a generator looking for things that match."
+
+        if isinstance(name, SoupStrainer):
+            strainer = name
+        # (Possibly) special case some findAll*(...) searches
+        elif text is None and not limit and not attrs and not kwargs:
+            # findAll*(True)
+            if name is True:
+                return [element for element in generator()
+                        if isinstance(element, Tag)]
+            # findAll*('tag-name')
+            elif isinstance(name, basestring):
+                return [element for element in generator()
+                        if isinstance(element, Tag) and
+                        element.name == name]
+            else:
+                strainer = SoupStrainer(name, attrs, text, **kwargs)
+        # Build a SoupStrainer
+        else:
+            strainer = SoupStrainer(name, attrs, text, **kwargs)
+        results = ResultSet(strainer)
+        g = generator()
+        while True:
+            try:
+                i = g.next()
+            except StopIteration:
+                break
+            if i:
+                found = strainer.search(i)
+                if found:
+                    results.append(found)
+                    if limit and len(results) >= limit:
+                        break
+        return results
+
+    #These Generators can be used to navigate starting from both
+    #NavigableStrings and Tags.
+    def nextGenerator(self):
+        i = self
+        while i is not None:
+            i = i.next
+            yield i
+
+    def nextSiblingGenerator(self):
+        i = self
+        while i is not None:
+            i = i.nextSibling
+            yield i
+
+    def previousGenerator(self):
+        i = self
+        while i is not None:
+            i = i.previous
+            yield i
+
+    def previousSiblingGenerator(self):
+        i = self
+        while i is not None:
+            i = i.previousSibling
+            yield i
+
+    def parentGenerator(self):
+        i = self
+        while i is not None:
+            i = i.parent
+            yield i
+
+    # Utility methods
+    def substituteEncoding(self, str, encoding=None):
+        encoding = encoding or "utf-8"
+        return str.replace("%SOUP-ENCODING%", encoding)
+
+    def toEncoding(self, s, encoding=None):
+        """Encodes an object to a string in some encoding, or to Unicode.
+        ."""
+        if isinstance(s, unicode):
+            if encoding:
+                s = s.encode(encoding)
+        elif isinstance(s, str):
+            if encoding:
+                s = s.encode(encoding)
+            else:
+                s = unicode(s)
+        else:
+            if encoding:
+                s  = self.toEncoding(str(s), encoding)
+            else:
+                s = unicode(s)
+        return s
+
+class NavigableString(unicode, PageElement):
+
+    def __new__(cls, value):
+        """Create a new NavigableString.
+
+        When unpickling a NavigableString, this method is called with
+        the string in DEFAULT_OUTPUT_ENCODING. That encoding needs to be
+        passed in to the superclass's __new__ or the superclass won't know
+        how to handle non-ASCII characters.
+        """
+        if isinstance(value, unicode):
+            return unicode.__new__(cls, value)
+        return unicode.__new__(cls, value, DEFAULT_OUTPUT_ENCODING)
+
+    def __getnewargs__(self):
+        return (NavigableString.__str__(self),)
+
+    def __getattr__(self, attr):
+        """text.string gives you text. This is for backwards
+        compatibility for Navigable*String, but for CData* it lets you
+        get the string without the CData wrapper."""
+        if attr == 'string':
+            return self
+        else:
+            raise AttributeError, "'%s' object has no attribute '%s'" % (self.__class__.__name__, attr)
+
+    def __unicode__(self):
+        return str(self).decode(DEFAULT_OUTPUT_ENCODING)
+
+    def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING):
+        if encoding:
+            return self.encode(encoding)
+        else:
+            return self
+
+class CData(NavigableString):
+
+    def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING):
+        return "<![CDATA[%s]]>" % NavigableString.__str__(self, encoding)
+
+class ProcessingInstruction(NavigableString):
+    def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING):
+        output = self
+        if "%SOUP-ENCODING%" in output:
+            output = self.substituteEncoding(output, encoding)
+        return "<?%s?>" % self.toEncoding(output, encoding)
+
+class Comment(NavigableString):
+    def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING):
+        return "<!--%s-->" % NavigableString.__str__(self, encoding)
+
+class Declaration(NavigableString):
+    def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING):
+        return "<!%s>" % NavigableString.__str__(self, encoding)
+
+class Tag(PageElement):
+
+    """Represents a found HTML tag with its attributes and contents."""
+
+    def _invert(h):
+        "Cheap function to invert a hash."
+        i = {}
+        for k,v in h.items():
+            i[v] = k
+        return i
+
+    XML_ENTITIES_TO_SPECIAL_CHARS = { "apos" : "'",
+                                      "quot" : '"',
+                                      "amp" : "&",
+                                      "lt" : "<",
+                                      "gt" : ">" }
+
+    XML_SPECIAL_CHARS_TO_ENTITIES = _invert(XML_ENTITIES_TO_SPECIAL_CHARS)
+
+    def _convertEntities(self, match):
+        """Used in a call to re.sub to replace HTML, XML, and numeric
+        entities with the appropriate Unicode characters. If HTML
+        entities are being converted, any unrecognized entities are
+        escaped."""
+        x = match.group(1)
+        if self.convertHTMLEntities and x in name2codepoint:
+            return unichr(name2codepoint[x])
+        elif x in self.XML_ENTITIES_TO_SPECIAL_CHARS:
+            if self.convertXMLEntities:
+                return self.XML_ENTITIES_TO_SPECIAL_CHARS[x]
+            else:
+                return u'&%s;' % x
+        elif len(x) > 0 and x[0] == '#':
+            # Handle numeric entities
+            if len(x) > 1 and x[1] == 'x':
+                return unichr(int(x[2:], 16))
+            else:
+                return unichr(int(x[1:]))
+
+        elif self.escapeUnrecognizedEntities:
+            return u'&amp;%s;' % x
+        else:
+            return u'&%s;' % x
+
+    def __init__(self, parser, name, attrs=None, parent=None,
+                 previous=None):
+        "Basic constructor."
+
+        # We don't actually store the parser object: that lets extracted
+        # chunks be garbage-collected
+        self.parserClass = parser.__class__
+        self.isSelfClosing = parser.isSelfClosingTag(name)
+        self.name = name
+        if attrs is None:
+            attrs = []
+        elif isinstance(attrs, dict):
+            attrs = attrs.items()
+        self.attrs = attrs
+        self.contents = []
+        self.setup(parent, previous)
+        self.hidden = False
+        self.containsSubstitutions = False
+        self.convertHTMLEntities = parser.convertHTMLEntities
+        self.convertXMLEntities = parser.convertXMLEntities
+        self.escapeUnrecognizedEntities = parser.escapeUnrecognizedEntities
+
+        # Convert any HTML, XML, or numeric entities in the attribute values.
+        convert = lambda(k, val): (k,
+                                   re.sub("&(#\d+|#x[0-9a-fA-F]+|\w+);",
+                                          self._convertEntities,
+                                          val))
+        self.attrs = map(convert, self.attrs)
+
+    def getString(self):
+        if (len(self.contents) == 1
+            and isinstance(self.contents[0], NavigableString)):
+            return self.contents[0]
+
+    def setString(self, string):
+        """Replace the contents of the tag with a string"""
+        self.clear()
+        self.append(string)
+
+    string = property(getString, setString)
+
+    def getText(self, separator=u""):
+        if not len(self.contents):
+            return u""
+        stopNode = self._lastRecursiveChild().next
+        strings = []
+        current = self.contents[0]
+        while current is not stopNode:
+            if isinstance(current, NavigableString):
+                strings.append(current.strip())
+            current = current.next
+        return separator.join(strings)
+
+    text = property(getText)
+
+    def get(self, key, default=None):
+        """Returns the value of the 'key' attribute for the tag, or
+        the value given for 'default' if it doesn't have that
+        attribute."""
+        return self._getAttrMap().get(key, default)
+
+    def clear(self):
+        """Extract all children."""
+        for child in self.contents[:]:
+            child.extract()
+
+    def index(self, element):
+        for i, child in enumerate(self.contents):
+            if child is element:
+                return i
+        raise ValueError("Tag.index: element not in tag")
+
+    def has_key(self, key):
+        return self._getAttrMap().has_key(key)
+
+    def __getitem__(self, key):
+        """tag[key] returns the value of the 'key' attribute for the tag,
+        and throws an exception if it's not there."""
+        return self._getAttrMap()[key]
+
+    def __iter__(self):
+        "Iterating over a tag iterates over its contents."
+        return iter(self.contents)
+
+    def __len__(self):
+        "The length of a tag is the length of its list of contents."
+        return len(self.contents)
+
+    def __contains__(self, x):
+        return x in self.contents
+
+    def __nonzero__(self):
+        "A tag is non-None even if it has no contents."
+        return True
+
+    def __setitem__(self, key, value):
+        """Setting tag[key] sets the value of the 'key' attribute for the
+        tag."""
+        self._getAttrMap()
+        self.attrMap[key] = value
+        found = False
+        for i in range(0, len(self.attrs)):
+            if self.attrs[i][0] == key:
+                self.attrs[i] = (key, value)
+                found = True
+        if not found:
+            self.attrs.append((key, value))
+        self._getAttrMap()[key] = value
+
+    def __delitem__(self, key):
+        "Deleting tag[key] deletes all 'key' attributes for the tag."
+        for item in self.attrs:
+            if item[0] == key:
+                self.attrs.remove(item)
+                #We don't break because bad HTML can define the same
+                #attribute multiple times.
+            self._getAttrMap()
+            if self.attrMap.has_key(key):
+                del self.attrMap[key]
+
+    def __call__(self, *args, **kwargs):
+        """Calling a tag like a function is the same as calling its
+        findAll() method. Eg. tag('a') returns a list of all the A tags
+        found within this tag."""
+        return apply(self.findAll, args, kwargs)
+
+    def __getattr__(self, tag):
+        #print "Getattr %s.%s" % (self.__class__, tag)
+        if len(tag) > 3 and tag.rfind('Tag') == len(tag)-3:
+            return self.find(tag[:-3])
+        elif tag.find('__') != 0:
+            return self.find(tag)
+        raise AttributeError, "'%s' object has no attribute '%s'" % (self.__class__, tag)
+
+    def __eq__(self, other):
+        """Returns true iff this tag has the same name, the same attributes,
+        and the same contents (recursively) as the given tag.
+
+        NOTE: right now this will return false if two tags have the
+        same attributes in a different order. Should this be fixed?"""
+        if other is self:
+            return True
+        if not hasattr(other, 'name') or not hasattr(other, 'attrs') or not hasattr(other, 'contents') or self.name != other.name or self.attrs != other.attrs or len(self) != len(other):
+            return False
+        for i in range(0, len(self.contents)):
+            if self.contents[i] != other.contents[i]:
+                return False
+        return True
+
+    def __ne__(self, other):
+        """Returns true iff this tag is not identical to the other tag,
+        as defined in __eq__."""
+        return not self == other
+
+    def __repr__(self, encoding=DEFAULT_OUTPUT_ENCODING):
+        """Renders this tag as a string."""
+        return self.__str__(encoding)
+
+    def __unicode__(self):
+        return self.__str__(None)
+
+    BARE_AMPERSAND_OR_BRACKET = re.compile("([<>]|"
+                                           + "&(?!#\d+;|#x[0-9a-fA-F]+;|\w+;)"
+                                           + ")")
+
+    def _sub_entity(self, x):
+        """Used with a regular expression to substitute the
+        appropriate XML entity for an XML special character."""
+        return "&" + self.XML_SPECIAL_CHARS_TO_ENTITIES[x.group(0)[0]] + ";"
+
+    def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING,
+                prettyPrint=False, indentLevel=0):
+        """Returns a string or Unicode representation of this tag and
+        its contents. To get Unicode, pass None for encoding.
+
+        NOTE: since Python's HTML parser consumes whitespace, this
+        method is not certain to reproduce the whitespace present in
+        the original string."""
+
+        encodedName = self.toEncoding(self.name, encoding)
+
+        attrs = []
+        if self.attrs:
+            for key, val in self.attrs:
+                fmt = '%s="%s"'
+                if isinstance(val, basestring):
+                    if self.containsSubstitutions and '%SOUP-ENCODING%' in val:
+                        val = self.substituteEncoding(val, encoding)
+
+                    # The attribute value either:
+                    #
+                    # * Contains no embedded double quotes or single quotes.
+                    #   No problem: we enclose it in double quotes.
+                    # * Contains embedded single quotes. No problem:
+                    #   double quotes work here too.
+                    # * Contains embedded double quotes. No problem:
+                    #   we enclose it in single quotes.
+                    # * Embeds both single _and_ double quotes. This
+                    #   can't happen naturally, but it can happen if
+                    #   you modify an attribute value after parsing
+                    #   the document. Now we have a bit of a
+                    #   problem. We solve it by enclosing the
+                    #   attribute in single quotes, and escaping any
+                    #   embedded single quotes to XML entities.
+                    if '"' in val:
+                        fmt = "%s='%s'"
+                        if "'" in val:
+                            # TODO: replace with apos when
+                            # appropriate.
+                            val = val.replace("'", "&squot;")
+
+                    # Now we're okay w/r/t quotes. But the attribute
+                    # value might also contain angle brackets, or
+                    # ampersands that aren't part of entities. We need
+                    # to escape those to XML entities too.
+                    val = self.BARE_AMPERSAND_OR_BRACKET.sub(self._sub_entity, val)
+
+                attrs.append(fmt % (self.toEncoding(key, encoding),
+                                    self.toEncoding(val, encoding)))
+        close = ''
+        closeTag = ''
+        if self.isSelfClosing:
+            close = ' /'
+        else:
+            closeTag = '</%s>' % encodedName
+
+        indentTag, indentContents = 0, 0
+        if prettyPrint:
+            indentTag = indentLevel
+            space = (' ' * (indentTag-1))
+            indentContents = indentTag + 1
+        contents = self.renderContents(encoding, prettyPrint, indentContents)
+        if self.hidden:
+            s = contents
+        else:
+            s = []
+            attributeString = ''
+            if attrs:
+                attributeString = ' ' + ' '.join(attrs)
+            if prettyPrint:
+                s.append(space)
+            s.append('<%s%s%s>' % (encodedName, attributeString, close))
+            if prettyPrint:
+                s.append("\n")
+            s.append(contents)
+            if prettyPrint and contents and contents[-1] != "\n":
+                s.append("\n")
+            if prettyPrint and closeTag:
+                s.append(space)
+            s.append(closeTag)
+            if prettyPrint and closeTag and self.nextSibling:
+                s.append("\n")
+            s = ''.join(s)
+        return s
+
+    def decompose(self):
+        """Recursively destroys the contents of this tree."""
+        self.extract()
+        if len(self.contents) == 0:
+            return
+        current = self.contents[0]
+        while current is not None:
+            next = current.next
+            if isinstance(current, Tag):
+                del current.contents[:]
+            current.parent = None
+            current.previous = None
+            current.previousSibling = None
+            current.next = None
+            current.nextSibling = None
+            current = next
+
+    def prettify(self, encoding=DEFAULT_OUTPUT_ENCODING):
+        return self.__str__(encoding, True)
+
+    def renderContents(self, encoding=DEFAULT_OUTPUT_ENCODING,
+                       prettyPrint=False, indentLevel=0):
+        """Renders the contents of this tag as a string in the given
+        encoding. If encoding is None, returns a Unicode string.."""
+        s=[]
+        for c in self:
+            text = None
+            if isinstance(c, NavigableString):
+                text = c.__str__(encoding)
+            elif isinstance(c, Tag):
+                s.append(c.__str__(encoding, prettyPrint, indentLevel))
+            if text and prettyPrint:
+                text = text.strip()
+            if text:
+                if prettyPrint:
+                    s.append(" " * (indentLevel-1))
+                s.append(text)
+                if prettyPrint:
+                    s.append("\n")
+        return ''.join(s)
+
+    #Soup methods
+
+    def find(self, name=None, attrs={}, recursive=True, text=None,
+             **kwargs):
+        """Return only the first child of this Tag matching the given
+        criteria."""
+        r = None
+        l = self.findAll(name, attrs, recursive, text, 1, **kwargs)
+        if l:
+            r = l[0]
+        return r
+    findChild = find
+
+    def findAll(self, name=None, attrs={}, recursive=True, text=None,
+                limit=None, **kwargs):
+        """Extracts a list of Tag objects that match the given
+        criteria.  You can specify the name of the Tag and any
+        attributes you want the Tag to have.
+
+        The value of a key-value pair in the 'attrs' map can be a
+        string, a list of strings, a regular expression object, or a
+        callable that takes a string and returns whether or not the
+        string matches for some custom definition of 'matches'. The
+        same is true of the tag name."""
+        generator = self.recursiveChildGenerator
+        if not recursive:
+            generator = self.childGenerator
+        return self._findAll(name, attrs, text, limit, generator, **kwargs)
+    findChildren = findAll
+
+    # Pre-3.x compatibility methods
+    first = find
+    fetch = findAll
+
+    def fetchText(self, text=None, recursive=True, limit=None):
+        return self.findAll(text=text, recursive=recursive, limit=limit)
+
+    def firstText(self, text=None, recursive=True):
+        return self.find(text=text, recursive=recursive)
+
+    #Private methods
+
+    def _getAttrMap(self):
+        """Initializes a map representation of this tag's attributes,
+        if not already initialized."""
+        if not getattr(self, 'attrMap'):
+            self.attrMap = {}
+            for (key, value) in self.attrs:
+                self.attrMap[key] = value
+        return self.attrMap
+
+    #Generator methods
+    def childGenerator(self):
+        # Just use the iterator from the contents
+        return iter(self.contents)
+
+    def recursiveChildGenerator(self):
+        if not len(self.contents):
+            raise StopIteration
+        stopNode = self._lastRecursiveChild().next
+        current = self.contents[0]
+        while current is not stopNode:
+            yield current
+            current = current.next
+
+
+# Next, a couple classes to represent queries and their results.
+class SoupStrainer:
+    """Encapsulates a number of ways of matching a markup element (tag or
+    text)."""
+
+    def __init__(self, name=None, attrs={}, text=None, **kwargs):
+        self.name = name
+        if isinstance(attrs, basestring):
+            kwargs['class'] = _match_css_class(attrs)
+            attrs = None
+        if kwargs:
+            if attrs:
+                attrs = attrs.copy()
+                attrs.update(kwargs)
+            else:
+                attrs = kwargs
+        self.attrs = attrs
+        self.text = text
+
+    def __str__(self):
+        if self.text:
+            return self.text
+        else:
+            return "%s|%s" % (self.name, self.attrs)
+
+    def searchTag(self, markupName=None, markupAttrs={}):
+        found = None
+        markup = None
+        if isinstance(markupName, Tag):
+            markup = markupName
+            markupAttrs = markup
+        callFunctionWithTagData = callable(self.name) \
+                                and not isinstance(markupName, Tag)
+
+        if (not self.name) \
+               or callFunctionWithTagData \
+               or (markup and self._matches(markup, self.name)) \
+               or (not markup and self._matches(markupName, self.name)):
+            if callFunctionWithTagData:
+                match = self.name(markupName, markupAttrs)
+            else:
+                match = True
+                markupAttrMap = None
+                for attr, matchAgainst in self.attrs.items():
+                    if not markupAttrMap:
+                         if hasattr(markupAttrs, 'get'):
+                            markupAttrMap = markupAttrs
+                         else:
+                            markupAttrMap = {}
+                            for k,v in markupAttrs:
+                                markupAttrMap[k] = v
+                    attrValue = markupAttrMap.get(attr)
+                    if not self._matches(attrValue, matchAgainst):
+                        match = False
+                        break
+            if match:
+                if markup:
+                    found = markup
+                else:
+                    found = markupName
+        return found
+
+    def search(self, markup):
+        #print 'looking for %s in %s' % (self, markup)
+        found = None
+        # If given a list of items, scan it for a text element that
+        # matches.
+        if hasattr(markup, "__iter__") \
+                and not isinstance(markup, Tag):
+            for element in markup:
+                if isinstance(element, NavigableString) \
+                       and self.search(element):
+                    found = element
+                    break
+        # If it's a Tag, make sure its name or attributes match.
+        # Don't bother with Tags if we're searching for text.
+        elif isinstance(markup, Tag):
+            if not self.text:
+                found = self.searchTag(markup)
+        # If it's text, make sure the text matches.
+        elif isinstance(markup, NavigableString) or \
+                 isinstance(markup, basestring):
+            if self._matches(markup, self.text):
+                found = markup
+        else:
+            raise Exception, "I don't know how to match against a %s" \
+                  % markup.__class__
+        return found
+
+    def _matches(self, markup, matchAgainst):
+        #print "Matching %s against %s" % (markup, matchAgainst)
+        result = False
+        if matchAgainst is True:
+            result = markup is not None
+        elif callable(matchAgainst):
+            result = matchAgainst(markup)
+        else:
+            #Custom match methods take the tag as an argument, but all
+            #other ways of matching match the tag name as a string.
+            if isinstance(markup, Tag):
+                markup = markup.name
+            if markup and not isinstance(markup, basestring):
+                markup = unicode(markup)
+            #Now we know that chunk is either a string, or None.
+            if hasattr(matchAgainst, 'match'):
+                # It's a regexp object.
+                result = markup and matchAgainst.search(markup)
+            elif hasattr(matchAgainst, '__iter__'): # list-like
+                result = markup in matchAgainst
+            elif hasattr(matchAgainst, 'items'):
+                result = markup.has_key(matchAgainst)
+            elif matchAgainst and isinstance(markup, basestring):
+                if isinstance(markup, unicode):
+                    matchAgainst = unicode(matchAgainst)
+                else:
+                    matchAgainst = str(matchAgainst)
+
+            if not result:
+                result = matchAgainst == markup
+        return result
+
+class ResultSet(list):
+    """A ResultSet is just a list that keeps track of the SoupStrainer
+    that created it."""
+    def __init__(self, source):
+        list.__init__([])
+        self.source = source
+
+# Now, some helper functions.
+
+def buildTagMap(default, *args):
+    """Turns a list of maps, lists, or scalars into a single map.
+    Used to build the SELF_CLOSING_TAGS, NESTABLE_TAGS, and
+    NESTING_RESET_TAGS maps out of lists and partial maps."""
+    built = {}
+    for portion in args:
+        if hasattr(portion, 'items'):
+            #It's a map. Merge it.
+            for k,v in portion.items():
+                built[k] = v
+        elif hasattr(portion, '__iter__'): # is a list
+            #It's a list. Map each item to the default.
+            for k in portion:
+                built[k] = default
+        else:
+            #It's a scalar. Map it to the default.
+            built[portion] = default
+    return built
+
+# Now, the parser classes.
+
+class BeautifulStoneSoup(Tag, SGMLParser):
+
+    """This class contains the basic parser and search code. It defines
+    a parser that knows nothing about tag behavior except for the
+    following:
+
+      You can't close a tag without closing all the tags it encloses.
+      That is, "<foo><bar></foo>" actually means
+      "<foo><bar></bar></foo>".
+
+    [Another possible explanation is "<foo><bar /></foo>", but since
+    this class defines no SELF_CLOSING_TAGS, it will never use that
+    explanation.]
+
+    This class is useful for parsing XML or made-up markup languages,
+    or when BeautifulSoup makes an assumption counter to what you were
+    expecting."""
+
+    SELF_CLOSING_TAGS = {}
+    NESTABLE_TAGS = {}
+    RESET_NESTING_TAGS = {}
+    QUOTE_TAGS = {}
+    PRESERVE_WHITESPACE_TAGS = []
+
+    MARKUP_MASSAGE = [(re.compile('(<[^<>]*)/>'),
+                       lambda x: x.group(1) + ' />'),
+                      (re.compile('<!\s+([^<>]*)>'),
+                       lambda x: '<!' + x.group(1) + '>')
+                      ]
+
+    ROOT_TAG_NAME = u'[document]'
+
+    HTML_ENTITIES = "html"
+    XML_ENTITIES = "xml"
+    XHTML_ENTITIES = "xhtml"
+    # TODO: This only exists for backwards-compatibility
+    ALL_ENTITIES = XHTML_ENTITIES
+
+    # Used when determining whether a text node is all whitespace and
+    # can be replaced with a single space. A text node that contains
+    # fancy Unicode spaces (usually non-breaking) should be left
+    # alone.
+    STRIP_ASCII_SPACES = { 9: None, 10: None, 12: None, 13: None, 32: None, }
+
+    def __init__(self, markup="", parseOnlyThese=None, fromEncoding=None,
+                 markupMassage=True, smartQuotesTo=XML_ENTITIES,
+                 convertEntities=None, selfClosingTags=None, isHTML=False):
+        """The Soup object is initialized as the 'root tag', and the
+        provided markup (which can be a string or a file-like object)
+        is fed into the underlying parser.
+
+        sgmllib will process most bad HTML, and the BeautifulSoup
+        class has some tricks for dealing with some HTML that kills
+        sgmllib, but Beautiful Soup can nonetheless choke or lose data
+        if your data uses self-closing tags or declarations
+        incorrectly.
+
+        By default, Beautiful Soup uses regexes to sanitize input,
+        avoiding the vast majority of these problems. If the problems
+        don't apply to you, pass in False for markupMassage, and
+        you'll get better performance.
+
+        The default parser massage techniques fix the two most common
+        instances of invalid HTML that choke sgmllib:
+
+         <br/> (No space between name of closing tag and tag close)
+         <! --Comment--> (Extraneous whitespace in declaration)
+
+        You can pass in a custom list of (RE object, replace method)
+        tuples to get Beautiful Soup to scrub your input the way you
+        want."""
+
+        self.parseOnlyThese = parseOnlyThese
+        self.fromEncoding = fromEncoding
+        self.smartQuotesTo = smartQuotesTo
+        self.convertEntities = convertEntities
+        # Set the rules for how we'll deal with the entities we
+        # encounter
+        if self.convertEntities:
+            # It doesn't make sense to convert encoded characters to
+            # entities even while you're converting entities to Unicode.
+            # Just convert it all to Unicode.
+            self.smartQuotesTo = None
+            if convertEntities == self.HTML_ENTITIES:
+                self.convertXMLEntities = False
+                self.convertHTMLEntities = True
+                self.escapeUnrecognizedEntities = True
+            elif convertEntities == self.XHTML_ENTITIES:
+                self.convertXMLEntities = True
+                self.convertHTMLEntities = True
+                self.escapeUnrecognizedEntities = False
+            elif convertEntities == self.XML_ENTITIES:
+                self.convertXMLEntities = True
+                self.convertHTMLEntities = False
+                self.escapeUnrecognizedEntities = False
+        else:
+            self.convertXMLEntities = False
+            self.convertHTMLEntities = False
+            self.escapeUnrecognizedEntities = False
+
+        self.instanceSelfClosingTags = buildTagMap(None, selfClosingTags)
+        SGMLParser.__init__(self)
+
+        if hasattr(markup, 'read'):        # It's a file-type object.
+            markup = markup.read()
+        self.markup = markup
+        self.markupMassage = markupMassage
+        try:
+            self._feed(isHTML=isHTML)
+        except StopParsing:
+            pass
+        self.markup = None                 # The markup can now be GCed
+
+    def convert_charref(self, name):
+        """This method fixes a bug in Python's SGMLParser."""
+        try:
+            n = int(name)
+        except ValueError:
+            return
+        if not 0 <= n <= 127 : # ASCII ends at 127, not 255
+            return
+        return self.convert_codepoint(n)
+
+    def _feed(self, inDocumentEncoding=None, isHTML=False):
+        # Convert the document to Unicode.
+        markup = self.markup
+        if isinstance(markup, unicode):
+            if not hasattr(self, 'originalEncoding'):
+                self.originalEncoding = None
+        else:
+            dammit = UnicodeDammit\
+                     (markup, [self.fromEncoding, inDocumentEncoding],
+                      smartQuotesTo=self.smartQuotesTo, isHTML=isHTML)
+            markup = dammit.unicode
+            self.originalEncoding = dammit.originalEncoding
+            self.declaredHTMLEncoding = dammit.declaredHTMLEncoding
+        if markup:
+            if self.markupMassage:
+                if not hasattr(self.markupMassage, "__iter__"):
+                    self.markupMassage = self.MARKUP_MASSAGE
+                for fix, m in self.markupMassage:
+                    markup = fix.sub(m, markup)
+                # TODO: We get rid of markupMassage so that the
+                # soup object can be deepcopied later on. Some
+                # Python installations can't copy regexes. If anyone
+                # was relying on the existence of markupMassage, this
+                # might cause problems.
+                del(self.markupMassage)
+        self.reset()
+
+        SGMLParser.feed(self, markup)
+        # Close out any unfinished strings and close all the open tags.
+        self.endData()
+        while self.currentTag.name != self.ROOT_TAG_NAME:
+            self.popTag()
+
+    def __getattr__(self, methodName):
+        """This method routes method call requests to either the SGMLParser
+        superclass or the Tag superclass, depending on the method name."""
+        #print "__getattr__ called on %s.%s" % (self.__class__, methodName)
+
+        if methodName.startswith('start_') or methodName.startswith('end_') \
+               or methodName.startswith('do_'):
+            return SGMLParser.__getattr__(self, methodName)
+        elif not methodName.startswith('__'):
+            return Tag.__getattr__(self, methodName)
+        else:
+            raise AttributeError
+
+    def isSelfClosingTag(self, name):
+        """Returns true iff the given string is the name of a
+        self-closing tag according to this parser."""
+        return self.SELF_CLOSING_TAGS.has_key(name) \
+               or self.instanceSelfClosingTags.has_key(name)
+
+    def reset(self):
+        Tag.__init__(self, self, self.ROOT_TAG_NAME)
+        self.hidden = 1
+        SGMLParser.reset(self)
+        self.currentData = []
+        self.currentTag = None
+        self.tagStack = []
+        self.quoteStack = []
+        self.pushTag(self)
+
+    def popTag(self):
+        tag = self.tagStack.pop()
+
+        #print "Pop", tag.name
+        if self.tagStack:
+            self.currentTag = self.tagStack[-1]
+        return self.currentTag
+
+    def pushTag(self, tag):
+        #print "Push", tag.name
+        if self.currentTag:
+            self.currentTag.contents.append(tag)
+        self.tagStack.append(tag)
+        self.currentTag = self.tagStack[-1]
+
+    def endData(self, containerClass=NavigableString):
+        if self.currentData:
+            currentData = u''.join(self.currentData)
+            if (currentData.translate(self.STRIP_ASCII_SPACES) == '' and
+                not set([tag.name for tag in self.tagStack]).intersection(
+                    self.PRESERVE_WHITESPACE_TAGS)):
+                if '\n' in currentData:
+                    currentData = '\n'
+                else:
+                    currentData = ' '
+            self.currentData = []
+            if self.parseOnlyThese and len(self.tagStack) <= 1 and \
+                   (not self.parseOnlyThese.text or \
+                    not self.parseOnlyThese.search(currentData)):
+                return
+            o = containerClass(currentData)
+            o.setup(self.currentTag, self.previous)
+            if self.previous:
+                self.previous.next = o
+            self.previous = o
+            self.currentTag.contents.append(o)
+
+
+    def _popToTag(self, name, inclusivePop=True):
+        """Pops the tag stack up to and including the most recent
+        instance of the given tag. If inclusivePop is false, pops the tag
+        stack up to but *not* including the most recent instqance of
+        the given tag."""
+        #print "Popping to %s" % name
+        if name == self.ROOT_TAG_NAME:
+            return
+
+        numPops = 0
+        mostRecentTag = None
+        for i in range(len(self.tagStack)-1, 0, -1):
+            if name == self.tagStack[i].name:
+                numPops = len(self.tagStack)-i
+                break
+        if not inclusivePop:
+            numPops = numPops - 1
+
+        for i in range(0, numPops):
+            mostRecentTag = self.popTag()
+        return mostRecentTag
+
+    def _smartPop(self, name):
+
+        """We need to pop up to the previous tag of this type, unless
+        one of this tag's nesting reset triggers comes between this
+        tag and the previous tag of this type, OR unless this tag is a
+        generic nesting trigger and another generic nesting trigger
+        comes between this tag and the previous tag of this type.
+
+        Examples:
+         <p>Foo<b>Bar *<p>* should pop to 'p', not 'b'.
+         <p>Foo<table>Bar *<p>* should pop to 'table', not 'p'.
+         <p>Foo<table><tr>Bar *<p>* should pop to 'tr', not 'p'.
+
+         <li><ul><li> *<li>* should pop to 'ul', not the first 'li'.
+         <tr><table><tr> *<tr>* should pop to 'table', not the first 'tr'
+         <td><tr><td> *<td>* should pop to 'tr', not the first 'td'
+        """
+
+        nestingResetTriggers = self.NESTABLE_TAGS.get(name)
+        isNestable = nestingResetTriggers != None
+        isResetNesting = self.RESET_NESTING_TAGS.has_key(name)
+        popTo = None
+        inclusive = True
+        for i in range(len(self.tagStack)-1, 0, -1):
+            p = self.tagStack[i]
+            if (not p or p.name == name) and not isNestable:
+                #Non-nestable tags get popped to the top or to their
+                #last occurance.
+                popTo = name
+                break
+            if (nestingResetTriggers is not None
+                and p.name in nestingResetTriggers) \
+                or (nestingResetTriggers is None and isResetNesting
+                    and self.RESET_NESTING_TAGS.has_key(p.name)):
+
+                #If we encounter one of the nesting reset triggers
+                #peculiar to this tag, or we encounter another tag
+                #that causes nesting to reset, pop up to but not
+                #including that tag.
+                popTo = p.name
+                inclusive = False
+                break
+            p = p.parent
+        if popTo:
+            self._popToTag(popTo, inclusive)
+
+    def unknown_starttag(self, name, attrs, selfClosing=0):
+        #print "Start tag %s: %s" % (name, attrs)
+        if self.quoteStack:
+            #This is not a real tag.
+            #print "<%s> is not real!" % name
+            attrs = ''.join([' %s="%s"' % (x, y) for x, y in attrs])
+            self.handle_data('<%s%s>' % (name, attrs))
+            return
+        self.endData()
+
+        if not self.isSelfClosingTag(name) and not selfClosing:
+            self._smartPop(name)
+
+        if self.parseOnlyThese and len(self.tagStack) <= 1 \
+               and (self.parseOnlyThese.text or not self.parseOnlyThese.searchTag(name, attrs)):
+            return
+
+        tag = Tag(self, name, attrs, self.currentTag, self.previous)
+        if self.previous:
+            self.previous.next = tag
+        self.previous = tag
+        self.pushTag(tag)
+        if selfClosing or self.isSelfClosingTag(name):
+            self.popTag()
+        if name in self.QUOTE_TAGS:
+            #print "Beginning quote (%s)" % name
+            self.quoteStack.append(name)
+            self.literal = 1
+        return tag
+
+    def unknown_endtag(self, name):
+        #print "End tag %s" % name
+        if self.quoteStack and self.quoteStack[-1] != name:
+            #This is not a real end tag.
+            #print "</%s> is not real!" % name
+            self.handle_data('</%s>' % name)
+            return
+        self.endData()
+        self._popToTag(name)
+        if self.quoteStack and self.quoteStack[-1] == name:
+            self.quoteStack.pop()
+            self.literal = (len(self.quoteStack) > 0)
+
+    def handle_data(self, data):
+        self.currentData.append(data)
+
+    def _toStringSubclass(self, text, subclass):
+        """Adds a certain piece of text to the tree as a NavigableString
+        subclass."""
+        self.endData()
+        self.handle_data(text)
+        self.endData(subclass)
+
+    def handle_pi(self, text):
+        """Handle a processing instruction as a ProcessingInstruction
+        object, possibly one with a %SOUP-ENCODING% slot into which an
+        encoding will be plugged later."""
+        if text[:3] == "xml":
+            text = u"xml version='1.0' encoding='%SOUP-ENCODING%'"
+        self._toStringSubclass(text, ProcessingInstruction)
+
+    def handle_comment(self, text):
+        "Handle comments as Comment objects."
+        self._toStringSubclass(text, Comment)
+
+    def handle_charref(self, ref):
+        "Handle character references as data."
+        if self.convertEntities:
+            data = unichr(int(ref))
+        else:
+            data = '&#%s;' % ref
+        self.handle_data(data)
+
+    def handle_entityref(self, ref):
+        """Handle entity references as data, possibly converting known
+        HTML and/or XML entity references to the corresponding Unicode
+        characters."""
+        data = None
+        if self.convertHTMLEntities:
+            try:
+                data = unichr(name2codepoint[ref])
+            except KeyError:
+                pass
+
+        if not data and self.convertXMLEntities:
+                data = self.XML_ENTITIES_TO_SPECIAL_CHARS.get(ref)
+
+        if not data and self.convertHTMLEntities and \
+            not self.XML_ENTITIES_TO_SPECIAL_CHARS.get(ref):
+                # TODO: We've got a problem here. We're told this is
+                # an entity reference, but it's not an XML entity
+                # reference or an HTML entity reference. Nonetheless,
+                # the logical thing to do is to pass it through as an
+                # unrecognized entity reference.
+                #
+                # Except: when the input is "&carol;" this function
+                # will be called with input "carol". When the input is
+                # "AT&T", this function will be called with input
+                # "T". We have no way of knowing whether a semicolon
+                # was present originally, so we don't know whether
+                # this is an unknown entity or just a misplaced
+                # ampersand.
+                #
+                # The more common case is a misplaced ampersand, so I
+                # escape the ampersand and omit the trailing semicolon.
+                data = "&amp;%s" % ref
+        if not data:
+            # This case is different from the one above, because we
+            # haven't already gone through a supposedly comprehensive
+            # mapping of entities to Unicode characters. We might not
+            # have gone through any mapping at all. So the chances are
+            # very high that this is a real entity, and not a
+            # misplaced ampersand.
+            data = "&%s;" % ref
+        self.handle_data(data)
+
+    def handle_decl(self, data):
+        "Handle DOCTYPEs and the like as Declaration objects."
+        self._toStringSubclass(data, Declaration)
+
+    def parse_declaration(self, i):
+        """Treat a bogus SGML declaration as raw data. Treat a CDATA
+        declaration as a CData object."""
+        j = None
+        if self.rawdata[i:i+9] == '<![CDATA[':
+             k = self.rawdata.find(']]>', i)
+             if k == -1:
+                 k = len(self.rawdata)
+             data = self.rawdata[i+9:k]
+             j = k+3
+             self._toStringSubclass(data, CData)
+        else:
+            try:
+                j = SGMLParser.parse_declaration(self, i)
+            except SGMLParseError:
+                toHandle = self.rawdata[i:]
+                self.handle_data(toHandle)
+                j = i + len(toHandle)
+        return j
+
+class BeautifulSoup(BeautifulStoneSoup):
+
+    """This parser knows the following facts about HTML:
+
+    * Some tags have no closing tag and should be interpreted as being
+      closed as soon as they are encountered.
+
+    * The text inside some tags (ie. 'script') may contain tags which
+      are not really part of the document and which should be parsed
+      as text, not tags. If you want to parse the text as tags, you can
+      always fetch it and parse it explicitly.
+
+    * Tag nesting rules:
+
+      Most tags can't be nested at all. For instance, the occurance of
+      a <p> tag should implicitly close the previous <p> tag.
+
+       <p>Para1<p>Para2
+        should be transformed into:
+       <p>Para1</p><p>Para2
+
+      Some tags can be nested arbitrarily. For instance, the occurance
+      of a <blockquote> tag should _not_ implicitly close the previous
+      <blockquote> tag.
+
+       Alice said: <blockquote>Bob said: <blockquote>Blah
+        should NOT be transformed into:
+       Alice said: <blockquote>Bob said: </blockquote><blockquote>Blah
+
+      Some tags can be nested, but the nesting is reset by the
+      interposition of other tags. For instance, a <tr> tag should
+      implicitly close the previous <tr> tag within the same <table>,
+      but not close a <tr> tag in another table.
+
+       <table><tr>Blah<tr>Blah
+        should be transformed into:
+       <table><tr>Blah</tr><tr>Blah
+        but,
+       <tr>Blah<table><tr>Blah
+        should NOT be transformed into
+       <tr>Blah<table></tr><tr>Blah
+
+    Differing assumptions about tag nesting rules are a major source
+    of problems with the BeautifulSoup class. If BeautifulSoup is not
+    treating as nestable a tag your page author treats as nestable,
+    try ICantBelieveItsBeautifulSoup, MinimalSoup, or
+    BeautifulStoneSoup before writing your own subclass."""
+
+    def __init__(self, *args, **kwargs):
+        if not kwargs.has_key('smartQuotesTo'):
+            kwargs['smartQuotesTo'] = self.HTML_ENTITIES
+        kwargs['isHTML'] = True
+        BeautifulStoneSoup.__init__(self, *args, **kwargs)
+
+    SELF_CLOSING_TAGS = buildTagMap(None,
+                                    ('br' , 'hr', 'input', 'img', 'meta',
+                                    'spacer', 'link', 'frame', 'base', 'col'))
+
+    PRESERVE_WHITESPACE_TAGS = set(['pre', 'textarea'])
+
+    QUOTE_TAGS = {'script' : None, 'textarea' : None}
+
+    #According to the HTML standard, each of these inline tags can
+    #contain another tag of the same type. Furthermore, it's common
+    #to actually use these tags this way.
+    NESTABLE_INLINE_TAGS = ('span', 'font', 'q', 'object', 'bdo', 'sub', 'sup',
+                            'center')
+
+    #According to the HTML standard, these block tags can contain
+    #another tag of the same type. Furthermore, it's common
+    #to actually use these tags this way.
+    NESTABLE_BLOCK_TAGS = ('blockquote', 'div', 'fieldset', 'ins', 'del')
+
+    #Lists can contain other lists, but there are restrictions.
+    NESTABLE_LIST_TAGS = { 'ol' : [],
+                           'ul' : [],
+                           'li' : ['ul', 'ol'],
+                           'dl' : [],
+                           'dd' : ['dl'],
+                           'dt' : ['dl'] }
+
+    #Tables can contain other tables, but there are restrictions.
+    NESTABLE_TABLE_TAGS = {'table' : [],
+                           'tr' : ['table', 'tbody', 'tfoot', 'thead'],
+                           'td' : ['tr'],
+                           'th' : ['tr'],
+                           'thead' : ['table'],
+                           'tbody' : ['table'],
+                           'tfoot' : ['table'],
+                           }
+
+    NON_NESTABLE_BLOCK_TAGS = ('address', 'form', 'p', 'pre')
+
+    #If one of these tags is encountered, all tags up to the next tag of
+    #this type are popped.
+    RESET_NESTING_TAGS = buildTagMap(None, NESTABLE_BLOCK_TAGS, 'noscript',
+                                     NON_NESTABLE_BLOCK_TAGS,
+                                     NESTABLE_LIST_TAGS,
+                                     NESTABLE_TABLE_TAGS)
+
+    NESTABLE_TAGS = buildTagMap([], NESTABLE_INLINE_TAGS, NESTABLE_BLOCK_TAGS,
+                                NESTABLE_LIST_TAGS, NESTABLE_TABLE_TAGS)
+
+    # Used to detect the charset in a META tag; see start_meta
+    CHARSET_RE = re.compile("((^|;)\s*charset=)([^;]*)", re.M)
+
+    def start_meta(self, attrs):
+        """Beautiful Soup can detect a charset included in a META tag,
+        try to convert the document to that charset, and re-parse the
+        document from the beginning."""
+        httpEquiv = None
+        contentType = None
+        contentTypeIndex = None
+        tagNeedsEncodingSubstitution = False
+
+        for i in range(0, len(attrs)):
+            key, value = attrs[i]
+            key = key.lower()
+            if key == 'http-equiv':
+                httpEquiv = value
+            elif key == 'content':
+                contentType = value
+                contentTypeIndex = i
+
+        if httpEquiv and contentType: # It's an interesting meta tag.
+            match = self.CHARSET_RE.search(contentType)
+            if match:
+                if (self.declaredHTMLEncoding is not None or
+                    self.originalEncoding == self.fromEncoding):
+                    # An HTML encoding was sniffed while converting
+                    # the document to Unicode, or an HTML encoding was
+                    # sniffed during a previous pass through the
+                    # document, or an encoding was specified
+                    # explicitly and it worked. Rewrite the meta tag.
+                    def rewrite(match):
+                        return match.group(1) + "%SOUP-ENCODING%"
+                    newAttr = self.CHARSET_RE.sub(rewrite, contentType)
+                    attrs[contentTypeIndex] = (attrs[contentTypeIndex][0],
+                                               newAttr)
+                    tagNeedsEncodingSubstitution = True
+                else:
+                    # This is our first pass through the document.
+                    # Go through it again with the encoding information.
+                    newCharset = match.group(3)
+                    if newCharset and newCharset != self.originalEncoding:
+                        self.declaredHTMLEncoding = newCharset
+                        self._feed(self.declaredHTMLEncoding)
+                        raise StopParsing
+                    pass
+        tag = self.unknown_starttag("meta", attrs)
+        if tag and tagNeedsEncodingSubstitution:
+            tag.containsSubstitutions = True
+
+class StopParsing(Exception):
+    pass
+
+class ICantBelieveItsBeautifulSoup(BeautifulSoup):
+
+    """The BeautifulSoup class is oriented towards skipping over
+    common HTML errors like unclosed tags. However, sometimes it makes
+    errors of its own. For instance, consider this fragment:
+
+     <b>Foo<b>Bar</b></b>
+
+    This is perfectly valid (if bizarre) HTML. However, the
+    BeautifulSoup class will implicitly close the first b tag when it
+    encounters the second 'b'. It will think the author wrote
+    "<b>Foo<b>Bar", and didn't close the first 'b' tag, because
+    there's no real-world reason to bold something that's already
+    bold. When it encounters '</b></b>' it will close two more 'b'
+    tags, for a grand total of three tags closed instead of two. This
+    can throw off the rest of your document structure. The same is
+    true of a number of other tags, listed below.
+
+    It's much more common for someone to forget to close a 'b' tag
+    than to actually use nested 'b' tags, and the BeautifulSoup class
+    handles the common case. This class handles the not-co-common
+    case: where you can't believe someone wrote what they did, but
+    it's valid HTML and BeautifulSoup screwed up by assuming it
+    wouldn't be."""
+
+    I_CANT_BELIEVE_THEYRE_NESTABLE_INLINE_TAGS = \
+     ('em', 'big', 'i', 'small', 'tt', 'abbr', 'acronym', 'strong',
+      'cite', 'code', 'dfn', 'kbd', 'samp', 'strong', 'var', 'b',
+      'big')
+
+    I_CANT_BELIEVE_THEYRE_NESTABLE_BLOCK_TAGS = ('noscript',)
+
+    NESTABLE_TAGS = buildTagMap([], BeautifulSoup.NESTABLE_TAGS,
+                                I_CANT_BELIEVE_THEYRE_NESTABLE_BLOCK_TAGS,
+                                I_CANT_BELIEVE_THEYRE_NESTABLE_INLINE_TAGS)
+
+class MinimalSoup(BeautifulSoup):
+    """The MinimalSoup class is for parsing HTML that contains
+    pathologically bad markup. It makes no assumptions about tag
+    nesting, but it does know which tags are self-closing, that
+    <script> tags contain Javascript and should not be parsed, that
+    META tags may contain encoding information, and so on.
+
+    This also makes it better for subclassing than BeautifulStoneSoup
+    or BeautifulSoup."""
+
+    RESET_NESTING_TAGS = buildTagMap('noscript')
+    NESTABLE_TAGS = {}
+
+class BeautifulSOAP(BeautifulStoneSoup):
+    """This class will push a tag with only a single string child into
+    the tag's parent as an attribute. The attribute's name is the tag
+    name, and the value is the string child. An example should give
+    the flavor of the change:
+
+    <foo><bar>baz</bar></foo>
+     =>
+    <foo bar="baz"><bar>baz</bar></foo>
+
+    You can then access fooTag['bar'] instead of fooTag.barTag.string.
+
+    This is, of course, useful for scraping structures that tend to
+    use subelements instead of attributes, such as SOAP messages. Note
+    that it modifies its input, so don't print the modified version
+    out.
+
+    I'm not sure how many people really want to use this class; let me
+    know if you do. Mainly I like the name."""
+
+    def popTag(self):
+        if len(self.tagStack) > 1:
+            tag = self.tagStack[-1]
+            parent = self.tagStack[-2]
+            parent._getAttrMap()
+            if (isinstance(tag, Tag) and len(tag.contents) == 1 and
+                isinstance(tag.contents[0], NavigableString) and
+                not parent.attrMap.has_key(tag.name)):
+                parent[tag.name] = tag.contents[0]
+        BeautifulStoneSoup.popTag(self)
+
+#Enterprise class names! It has come to our attention that some people
+#think the names of the Beautiful Soup parser classes are too silly
+#and "unprofessional" for use in enterprise screen-scraping. We feel
+#your pain! For such-minded folk, the Beautiful Soup Consortium And
+#All-Night Kosher Bakery recommends renaming this file to
+#"RobustParser.py" (or, in cases of extreme enterprisiness,
+#"RobustParserBeanInterface.class") and using the following
+#enterprise-friendly class aliases:
+class RobustXMLParser(BeautifulStoneSoup):
+    pass
+class RobustHTMLParser(BeautifulSoup):
+    pass
+class RobustWackAssHTMLParser(ICantBelieveItsBeautifulSoup):
+    pass
+class RobustInsanelyWackAssHTMLParser(MinimalSoup):
+    pass
+class SimplifyingSOAPParser(BeautifulSOAP):
+    pass
+
+######################################################
+#
+# Bonus library: Unicode, Dammit
+#
+# This class forces XML data into a standard format (usually to UTF-8
+# or Unicode).  It is heavily based on code from Mark Pilgrim's
+# Universal Feed Parser. It does not rewrite the XML or HTML to
+# reflect a new encoding: that happens in BeautifulStoneSoup.handle_pi
+# (XML) and BeautifulSoup.start_meta (HTML).
+
+# Autodetects character encodings.
+# Download from http://chardet.feedparser.org/
+try:
+    import chardet
+#    import chardet.constants
+#    chardet.constants._debug = 1
+except ImportError:
+    chardet = None
+
+# cjkcodecs and iconv_codec make Python know about more character encodings.
+# Both are available from http://cjkpython.i18n.org/
+# They're built in if you use Python 2.4.
+try:
+    import cjkcodecs.aliases
+except ImportError:
+    pass
+try:
+    import iconv_codec
+except ImportError:
+    pass
+
+class UnicodeDammit:
+    """A class for detecting the encoding of a *ML document and
+    converting it to a Unicode string. If the source encoding is
+    windows-1252, can replace MS smart quotes with their HTML or XML
+    equivalents."""
+
+    # This dictionary maps commonly seen values for "charset" in HTML
+    # meta tags to the corresponding Python codec names. It only covers
+    # values that aren't in Python's aliases and can't be determined
+    # by the heuristics in find_codec.
+    CHARSET_ALIASES = { "macintosh" : "mac-roman",
+                        "x-sjis" : "shift-jis" }
+
+    def __init__(self, markup, overrideEncodings=[],
+                 smartQuotesTo='xml', isHTML=False):
+        self.declaredHTMLEncoding = None
+        self.markup, documentEncoding, sniffedEncoding = \
+                     self._detectEncoding(markup, isHTML)
+        self.smartQuotesTo = smartQuotesTo
+        self.triedEncodings = []
+        if markup == '' or isinstance(markup, unicode):
+            self.originalEncoding = None
+            self.unicode = unicode(markup)
+            return
+
+        u = None
+        for proposedEncoding in overrideEncodings:
+            u = self._convertFrom(proposedEncoding)
+            if u: break
+        if not u:
+            for proposedEncoding in (documentEncoding, sniffedEncoding):
+                u = self._convertFrom(proposedEncoding)
+                if u: break
+
+        # If no luck and we have auto-detection library, try that:
+        if not u and chardet and not isinstance(self.markup, unicode):
+            u = self._convertFrom(chardet.detect(self.markup)['encoding'])
+
+        # As a last resort, try utf-8 and windows-1252:
+        if not u:
+            for proposed_encoding in ("utf-8", "windows-1252"):
+                u = self._convertFrom(proposed_encoding)
+                if u: break
+
+        self.unicode = u
+        if not u: self.originalEncoding = None
+
+    def _subMSChar(self, orig):
+        """Changes a MS smart quote character to an XML or HTML
+        entity."""
+        sub = self.MS_CHARS.get(orig)
+        if isinstance(sub, tuple):
+            if self.smartQuotesTo == 'xml':
+                sub = '&#x%s;' % sub[1]
+            else:
+                sub = '&%s;' % sub[0]
+        return sub
+
+    def _convertFrom(self, proposed):
+        proposed = self.find_codec(proposed)
+        if not proposed or proposed in self.triedEncodings:
+            return None
+        self.triedEncodings.append(proposed)
+        markup = self.markup
+
+        # Convert smart quotes to HTML if coming from an encoding
+        # that might have them.
+        if self.smartQuotesTo and proposed.lower() in("windows-1252",
+                                                      "iso-8859-1",
+                                                      "iso-8859-2"):
+            markup = re.compile("([\x80-\x9f])").sub \
+                     (lambda(x): self._subMSChar(x.group(1)),
+                      markup)
+
+        try:
+            # print "Trying to convert document to %s" % proposed
+            u = self._toUnicode(markup, proposed)
+            self.markup = u
+            self.originalEncoding = proposed
+        except Exception, e:
+            # print "That didn't work!"
+            # print e
+            return None
+        #print "Correct encoding: %s" % proposed
+        return self.markup
+
+    def _toUnicode(self, data, encoding):
+        '''Given a string and its encoding, decodes the string into Unicode.
+        %encoding is a string recognized by encodings.aliases'''
+
+        # strip Byte Order Mark (if present)
+        if (len(data) >= 4) and (data[:2] == '\xfe\xff') \
+               and (data[2:4] != '\x00\x00'):
+            encoding = 'utf-16be'
+            data = data[2:]
+        elif (len(data) >= 4) and (data[:2] == '\xff\xfe') \
+                 and (data[2:4] != '\x00\x00'):
+            encoding = 'utf-16le'
+            data = data[2:]
+        elif data[:3] == '\xef\xbb\xbf':
+            encoding = 'utf-8'
+            data = data[3:]
+        elif data[:4] == '\x00\x00\xfe\xff':
+            encoding = 'utf-32be'
+            data = data[4:]
+        elif data[:4] == '\xff\xfe\x00\x00':
+            encoding = 'utf-32le'
+            data = data[4:]
+        newdata = unicode(data, encoding)
+        return newdata
+
+    def _detectEncoding(self, xml_data, isHTML=False):
+        """Given a document, tries to detect its XML encoding."""
+        xml_encoding = sniffed_xml_encoding = None
+        try:
+            if xml_data[:4] == '\x4c\x6f\xa7\x94':
+                # EBCDIC
+                xml_data = self._ebcdic_to_ascii(xml_data)
+            elif xml_data[:4] == '\x00\x3c\x00\x3f':
+                # UTF-16BE
+                sniffed_xml_encoding = 'utf-16be'
+                xml_data = unicode(xml_data, 'utf-16be').encode('utf-8')
+            elif (len(xml_data) >= 4) and (xml_data[:2] == '\xfe\xff') \
+                     and (xml_data[2:4] != '\x00\x00'):
+                # UTF-16BE with BOM
+                sniffed_xml_encoding = 'utf-16be'
+                xml_data = unicode(xml_data[2:], 'utf-16be').encode('utf-8')
+            elif xml_data[:4] == '\x3c\x00\x3f\x00':
+                # UTF-16LE
+                sniffed_xml_encoding = 'utf-16le'
+                xml_data = unicode(xml_data, 'utf-16le').encode('utf-8')
+            elif (len(xml_data) >= 4) and (xml_data[:2] == '\xff\xfe') and \
+                     (xml_data[2:4] != '\x00\x00'):
+                # UTF-16LE with BOM
+                sniffed_xml_encoding = 'utf-16le'
+                xml_data = unicode(xml_data[2:], 'utf-16le').encode('utf-8')
+            elif xml_data[:4] == '\x00\x00\x00\x3c':
+                # UTF-32BE
+                sniffed_xml_encoding = 'utf-32be'
+                xml_data = unicode(xml_data, 'utf-32be').encode('utf-8')
+            elif xml_data[:4] == '\x3c\x00\x00\x00':
+                # UTF-32LE
+                sniffed_xml_encoding = 'utf-32le'
+                xml_data = unicode(xml_data, 'utf-32le').encode('utf-8')
+            elif xml_data[:4] == '\x00\x00\xfe\xff':
+                # UTF-32BE with BOM
+                sniffed_xml_encoding = 'utf-32be'
+                xml_data = unicode(xml_data[4:], 'utf-32be').encode('utf-8')
+            elif xml_data[:4] == '\xff\xfe\x00\x00':
+                # UTF-32LE with BOM
+                sniffed_xml_encoding = 'utf-32le'
+                xml_data = unicode(xml_data[4:], 'utf-32le').encode('utf-8')
+            elif xml_data[:3] == '\xef\xbb\xbf':
+                # UTF-8 with BOM
+                sniffed_xml_encoding = 'utf-8'
+                xml_data = unicode(xml_data[3:], 'utf-8').encode('utf-8')
+            else:
+                sniffed_xml_encoding = 'ascii'
+                pass
+        except:
+            xml_encoding_match = None
+        xml_encoding_match = re.compile(
+            '^<\?.*encoding=[\'"](.*?)[\'"].*\?>').match(xml_data)
+        if not xml_encoding_match and isHTML:
+            regexp = re.compile('<\s*meta[^>]+charset=([^>]*?)[;\'">]', re.I)
+            xml_encoding_match = regexp.search(xml_data)
+        if xml_encoding_match is not None:
+            xml_encoding = xml_encoding_match.groups()[0].lower()
+            if isHTML:
+                self.declaredHTMLEncoding = xml_encoding
+            if sniffed_xml_encoding and \
+               (xml_encoding in ('iso-10646-ucs-2', 'ucs-2', 'csunicode',
+                                 'iso-10646-ucs-4', 'ucs-4', 'csucs4',
+                                 'utf-16', 'utf-32', 'utf_16', 'utf_32',
+                                 'utf16', 'u16')):
+                xml_encoding = sniffed_xml_encoding
+        return xml_data, xml_encoding, sniffed_xml_encoding
+
+
+    def find_codec(self, charset):
+        return self._codec(self.CHARSET_ALIASES.get(charset, charset)) \
+               or (charset and self._codec(charset.replace("-", ""))) \
+               or (charset and self._codec(charset.replace("-", "_"))) \
+               or charset
+
+    def _codec(self, charset):
+        if not charset: return charset
+        codec = None
+        try:
+            codecs.lookup(charset)
+            codec = charset
+        except (LookupError, ValueError):
+            pass
+        return codec
+
+    EBCDIC_TO_ASCII_MAP = None
+    def _ebcdic_to_ascii(self, s):
+        c = self.__class__
+        if not c.EBCDIC_TO_ASCII_MAP:
+            emap = (0,1,2,3,156,9,134,127,151,141,142,11,12,13,14,15,
+                    16,17,18,19,157,133,8,135,24,25,146,143,28,29,30,31,
+                    128,129,130,131,132,10,23,27,136,137,138,139,140,5,6,7,
+                    144,145,22,147,148,149,150,4,152,153,154,155,20,21,158,26,
+                    32,160,161,162,163,164,165,166,167,168,91,46,60,40,43,33,
+                    38,169,170,171,172,173,174,175,176,177,93,36,42,41,59,94,
+                    45,47,178,179,180,181,182,183,184,185,124,44,37,95,62,63,
+                    186,187,188,189,190,191,192,193,194,96,58,35,64,39,61,34,
+                    195,97,98,99,100,101,102,103,104,105,196,197,198,199,200,
+                    201,202,106,107,108,109,110,111,112,113,114,203,204,205,
+                    206,207,208,209,126,115,116,117,118,119,120,121,122,210,
+                    211,212,213,214,215,216,217,218,219,220,221,222,223,224,
+                    225,226,227,228,229,230,231,123,65,66,67,68,69,70,71,72,
+                    73,232,233,234,235,236,237,125,74,75,76,77,78,79,80,81,
+                    82,238,239,240,241,242,243,92,159,83,84,85,86,87,88,89,
+                    90,244,245,246,247,248,249,48,49,50,51,52,53,54,55,56,57,
+                    250,251,252,253,254,255)
+            import string
+            c.EBCDIC_TO_ASCII_MAP = string.maketrans( \
+            ''.join(map(chr, range(256))), ''.join(map(chr, emap)))
+        return s.translate(c.EBCDIC_TO_ASCII_MAP)
+
+    MS_CHARS = { '\x80' : ('euro', '20AC'),
+                 '\x81' : ' ',
+                 '\x82' : ('sbquo', '201A'),
+                 '\x83' : ('fnof', '192'),
+                 '\x84' : ('bdquo', '201E'),
+                 '\x85' : ('hellip', '2026'),
+                 '\x86' : ('dagger', '2020'),
+                 '\x87' : ('Dagger', '2021'),
+                 '\x88' : ('circ', '2C6'),
+                 '\x89' : ('permil', '2030'),
+                 '\x8A' : ('Scaron', '160'),
+                 '\x8B' : ('lsaquo', '2039'),
+                 '\x8C' : ('OElig', '152'),
+                 '\x8D' : '?',
+                 '\x8E' : ('#x17D', '17D'),
+                 '\x8F' : '?',
+                 '\x90' : '?',
+                 '\x91' : ('lsquo', '2018'),
+                 '\x92' : ('rsquo', '2019'),
+                 '\x93' : ('ldquo', '201C'),
+                 '\x94' : ('rdquo', '201D'),
+                 '\x95' : ('bull', '2022'),
+                 '\x96' : ('ndash', '2013'),
+                 '\x97' : ('mdash', '2014'),
+                 '\x98' : ('tilde', '2DC'),
+                 '\x99' : ('trade', '2122'),
+                 '\x9a' : ('scaron', '161'),
+                 '\x9b' : ('rsaquo', '203A'),
+                 '\x9c' : ('oelig', '153'),
+                 '\x9d' : '?',
+                 '\x9e' : ('#x17E', '17E'),
+                 '\x9f' : ('Yuml', ''),}
+
+#######################################################################
+
+
+#By default, act as an HTML pretty-printer.
+if __name__ == '__main__':
+    import sys
+    soup = BeautifulSoup(sys.stdin)
+    print soup.prettify()
diff --git a/Tools/Scripts/webkitpy/thirdparty/__init__.py b/Tools/Scripts/webkitpy/thirdparty/__init__.py
new file mode 100644
index 0000000..74ea5f6
--- /dev/null
+++ b/Tools/Scripts/webkitpy/thirdparty/__init__.py
@@ -0,0 +1,180 @@
+# Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org)
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1.  Redistributions of source code must retain the above copyright
+#     notice, this list of conditions and the following disclaimer.
+# 2.  Redistributions in binary form must reproduce the above copyright
+#     notice, this list of conditions and the following disclaimer in the
+#     documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
+# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# This module is required for Python to treat this directory as a package.
+
+"""Autoinstalls third-party code required by WebKit."""
+
+
+import codecs
+import os
+import sys
+
+from webkitpy.common.system.autoinstall import AutoInstaller
+from webkitpy.common.system.filesystem import FileSystem
+
+_THIRDPARTY_DIR = os.path.dirname(__file__)
+_AUTOINSTALLED_DIR = os.path.join(_THIRDPARTY_DIR, "autoinstalled")
+
+# Putting the autoinstall code into webkitpy/thirdparty/__init__.py
+# ensures that no autoinstalling occurs until a caller imports from
+# webkitpy.thirdparty.  This is useful if the caller wants to configure
+# logging prior to executing autoinstall code.
+
+# FIXME: If any of these servers is offline, webkit-patch breaks (and maybe
+# other scripts do, too). See <http://webkit.org/b/42080>.
+
+# We put auto-installed third-party modules in this directory--
+#
+#     webkitpy/thirdparty/autoinstalled
+fs = FileSystem()
+fs.maybe_make_directory(_AUTOINSTALLED_DIR)
+
+init_path = fs.join(_AUTOINSTALLED_DIR, "__init__.py")
+if not fs.exists(init_path):
+    fs.write_text_file(init_path, "")
+
+readme_path = fs.join(_AUTOINSTALLED_DIR, "README")
+if not fs.exists(readme_path):
+    fs.write_text_file(readme_path,
+        "This directory is auto-generated by WebKit and is "
+        "safe to delete.\nIt contains needed third-party Python "
+        "packages automatically downloaded from the web.")
+
+
+class AutoinstallImportHook(object):
+    def __init__(self, filesystem=None):
+        self._fs = filesystem or FileSystem()
+
+    def _ensure_autoinstalled_dir_is_in_sys_path(self):
+        # Some packages require that the are being put somewhere under a directory in sys.path.
+        if not _AUTOINSTALLED_DIR in sys.path:
+            sys.path.append(_AUTOINSTALLED_DIR)
+
+    def find_module(self, fullname, _):
+        # This method will run before each import. See http://www.python.org/dev/peps/pep-0302/
+        if '.autoinstalled' not in fullname:
+            return
+
+        # Note: all of the methods must follow the "_install_XXX" convention in
+        # order for autoinstall_everything(), below, to work properly.
+        if '.mechanize' in fullname:
+            self._install_mechanize()
+        elif '.pep8' in fullname:
+            self._install_pep8()
+        elif '.pylint' in fullname:
+            self._install_pylint()
+        elif '.coverage' in fullname:
+            self._install_coverage()
+        elif '.eliza' in fullname:
+            self._install_eliza()
+        elif '.irc' in fullname:
+            self._install_irc()
+        elif '.buildbot' in fullname:
+            self._install_buildbot()
+        elif '.webpagereplay' in fullname:
+            self._install_webpagereplay()
+
+    def _install_mechanize(self):
+        return self._install("http://pypi.python.org/packages/source/m/mechanize/mechanize-0.2.5.tar.gz",
+                             "mechanize-0.2.5/mechanize")
+
+    def _install_pep8(self):
+        return self._install("http://pypi.python.org/packages/source/p/pep8/pep8-0.5.0.tar.gz#md5=512a818af9979290cd619cce8e9c2e2b",
+                             "pep8-0.5.0/pep8.py")
+
+    def _install_pylint(self):
+        self._ensure_autoinstalled_dir_is_in_sys_path()
+        did_install_something = False
+        if not self._fs.exists(self._fs.join(_AUTOINSTALLED_DIR, "pylint")):
+            installer = AutoInstaller(target_dir=_AUTOINSTALLED_DIR)
+            did_install_something = installer.install("http://pypi.python.org/packages/source/l/logilab-common/logilab-common-0.58.1.tar.gz#md5=77298ab2d8bb8b4af9219791e7cee8ce", url_subpath="logilab-common-0.58.1", target_name="logilab/common")
+            did_install_something |= installer.install("http://pypi.python.org/packages/source/l/logilab-astng/logilab-astng-0.24.1.tar.gz#md5=ddaf66e4d85714d9c47a46d4bed406de", url_subpath="logilab-astng-0.24.1", target_name="logilab/astng")
+            did_install_something |= installer.install('http://pypi.python.org/packages/source/p/pylint/pylint-0.25.1.tar.gz#md5=728bbc2b339bc3749af013709a7f87a5', url_subpath="pylint-0.25.1", target_name="pylint")
+        return did_install_something
+
+    # autoinstalled.buildbot is used by BuildSlaveSupport/build.webkit.org-config/mastercfg_unittest.py
+    # and should ideally match the version of BuildBot used at build.webkit.org.
+    def _install_buildbot(self):
+        # The buildbot package uses jinja2, for example, in buildbot/status/web/base.py.
+        # buildbot imports jinja2 directly (as though it were installed on the system),
+        # so the search path needs to include jinja2.  We put jinja2 in
+        # its own directory so that we can include it in the search path
+        # without including other modules as a side effect.
+        jinja_dir = self._fs.join(_AUTOINSTALLED_DIR, "jinja2")
+        installer = AutoInstaller(append_to_search_path=True, target_dir=jinja_dir)
+        did_install_something = installer.install(url="http://pypi.python.org/packages/source/J/Jinja2/Jinja2-2.6.tar.gz#md5=1c49a8825c993bfdcf55bb36897d28a2",
+                                                url_subpath="Jinja2-2.6/jinja2")
+
+        SQLAlchemy_dir = self._fs.join(_AUTOINSTALLED_DIR, "sqlalchemy")
+        installer = AutoInstaller(append_to_search_path=True, target_dir=SQLAlchemy_dir)
+        did_install_something |= installer.install(url="http://pypi.python.org/packages/source/S/SQLAlchemy/SQLAlchemy-0.7.7.tar.gz#md5=ddf6df7e014cea318fa981364f3f93b9",
+                                                 url_subpath="SQLAlchemy-0.7.7/lib/sqlalchemy")
+
+        did_install_something |= self._install("http://pypi.python.org/packages/source/b/buildbot/buildbot-0.8.6p1.tar.gz#md5=b6727d2810c692062c657492bcbeac6a", "buildbot-0.8.6p1/buildbot")
+        return did_install_something
+
+    def _install_coverage(self):
+        self._ensure_autoinstalled_dir_is_in_sys_path()
+        return self._install(url="http://pypi.python.org/packages/source/c/coverage/coverage-3.5.1.tar.gz#md5=410d4c8155a4dab222f2bc51212d4a24", url_subpath="coverage-3.5.1/coverage")
+
+    def _install_eliza(self):
+        return self._install(url="http://www.adambarth.com/webkit/eliza", target_name="eliza.py")
+
+    def _install_irc(self):
+        # Since irclib and ircbot are two top-level packages, we need to import
+        # them separately.  We group them into an irc package for better
+        # organization purposes.
+        irc_dir = self._fs.join(_AUTOINSTALLED_DIR, "irc")
+        installer = AutoInstaller(target_dir=irc_dir)
+        did_install_something = installer.install(url="http://downloads.sourceforge.net/project/python-irclib/python-irclib/0.4.8/python-irclib-0.4.8.zip",
+                                                url_subpath="irclib.py")
+        did_install_something |= installer.install(url="http://downloads.sourceforge.net/project/python-irclib/python-irclib/0.4.8/python-irclib-0.4.8.zip",
+                          url_subpath="ircbot.py")
+        return did_install_something
+
+    def _install_webpagereplay(self):
+        did_install_something = False
+        if not self._fs.exists(self._fs.join(_AUTOINSTALLED_DIR, "webpagereplay")):
+            did_install_something = self._install("http://web-page-replay.googlecode.com/files/webpagereplay-1.1.2.tar.gz", "webpagereplay-1.1.2")
+            self._fs.move(self._fs.join(_AUTOINSTALLED_DIR, "webpagereplay-1.1.2"), self._fs.join(_AUTOINSTALLED_DIR, "webpagereplay"))
+
+        module_init_path = self._fs.join(_AUTOINSTALLED_DIR, "webpagereplay", "__init__.py")
+        if not self._fs.exists(module_init_path):
+            self._fs.write_text_file(module_init_path, "")
+        return did_install_something
+
+    def _install(self, url, url_subpath=None, target_name=None):
+        installer = AutoInstaller(target_dir=_AUTOINSTALLED_DIR)
+        return installer.install(url=url, url_subpath=url_subpath, target_name=target_name)
+
+
+_hook = AutoinstallImportHook()
+sys.meta_path.append(_hook)
+
+
+def autoinstall_everything():
+    install_methods = [method for method in dir(_hook.__class__) if method.startswith('_install_')]
+    did_install_something = False
+    for method in install_methods:
+        did_install_something |= getattr(_hook, method)()
+    return did_install_something
diff --git a/Tools/Scripts/webkitpy/thirdparty/__init___unittest.py b/Tools/Scripts/webkitpy/thirdparty/__init___unittest.py
new file mode 100644
index 0000000..b3eb75f
--- /dev/null
+++ b/Tools/Scripts/webkitpy/thirdparty/__init___unittest.py
@@ -0,0 +1,74 @@
+#!/usr/bin/python
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import sys
+import unittest
+
+from webkitpy.thirdparty import AutoinstallImportHook
+
+
+class ThirdpartyTest(unittest.TestCase):
+    def test_import_hook(self):
+        # Add another import hook and make sure we get called.
+        class MockImportHook(AutoinstallImportHook):
+            def __init__(self):
+                AutoinstallImportHook.__init__(self)
+                self.eliza_installed = False
+
+            def _install_eliza(self):
+                self.eliza_installed = True
+
+        mock_import_hook = MockImportHook()
+        try:
+            # The actual AutoinstallImportHook should be installed before us,
+            # so these modules will get installed before MockImportHook runs.
+            sys.meta_path.append(mock_import_hook)
+            # unused-variable, import failures - pylint: disable-msg=W0612,E0611,F0401
+            from webkitpy.thirdparty.autoinstalled import eliza
+            self.assertTrue(mock_import_hook.eliza_installed)
+
+        finally:
+            sys.meta_path.remove(mock_import_hook)
+
+    def test_imports(self):
+        # This method tests that we can actually import everything.
+        # unused-variable, import failures - pylint: disable-msg=W0612,E0611,F0401
+        import webkitpy.thirdparty.autoinstalled.buildbot
+        import webkitpy.thirdparty.autoinstalled.coverage
+        import webkitpy.thirdparty.autoinstalled.eliza
+        import webkitpy.thirdparty.autoinstalled.irc.ircbot
+        import webkitpy.thirdparty.autoinstalled.irc.irclib
+        import webkitpy.thirdparty.autoinstalled.mechanize
+        import webkitpy.thirdparty.autoinstalled.pylint
+        import webkitpy.thirdparty.autoinstalled.webpagereplay
+        import webkitpy.thirdparty.autoinstalled.pep8
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/Tools/Scripts/webkitpy/thirdparty/mock.py b/Tools/Scripts/webkitpy/thirdparty/mock.py
new file mode 100644
index 0000000..015c19e
--- /dev/null
+++ b/Tools/Scripts/webkitpy/thirdparty/mock.py
@@ -0,0 +1,309 @@
+# mock.py
+# Test tools for mocking and patching.
+# Copyright (C) 2007-2009 Michael Foord
+# E-mail: fuzzyman AT voidspace DOT org DOT uk
+
+# mock 0.6.0
+# http://www.voidspace.org.uk/python/mock/
+
+# Released subject to the BSD License
+# Please see http://www.voidspace.org.uk/python/license.shtml
+
+# 2009-11-25: Licence downloaded from above URL.
+# BEGIN DOWNLOADED LICENSE
+#
+# Copyright (c) 2003-2009, Michael Foord
+# All rights reserved.
+# E-mail : fuzzyman AT voidspace DOT org DOT uk
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#
+#     * Redistributions of source code must retain the above copyright
+#       notice, this list of conditions and the following disclaimer.
+#
+#     * Redistributions in binary form must reproduce the above
+#       copyright notice, this list of conditions and the following
+#       disclaimer in the documentation and/or other materials provided
+#       with the distribution.
+#
+#     * Neither the name of Michael Foord nor the name of Voidspace
+#       may be used to endorse or promote products derived from this
+#       software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# END DOWNLOADED LICENSE
+
+# Scripts maintained at http://www.voidspace.org.uk/python/index.shtml
+# Comments, suggestions and bug reports welcome.
+
+
+__all__ = (
+    'Mock',
+    'patch',
+    'patch_object',
+    'sentinel',
+    'DEFAULT'
+)
+
+__version__ = '0.6.0'
+
+class SentinelObject(object):
+    def __init__(self, name):
+        self.name = name
+
+    def __repr__(self):
+        return '<SentinelObject "%s">' % self.name
+
+
+class Sentinel(object):
+    def __init__(self):
+        self._sentinels = {}
+
+    def __getattr__(self, name):
+        return self._sentinels.setdefault(name, SentinelObject(name))
+
+
+sentinel = Sentinel()
+
+DEFAULT = sentinel.DEFAULT
+
+class OldStyleClass:
+    pass
+ClassType = type(OldStyleClass)
+
+def _is_magic(name):
+    return '__%s__' % name[2:-2] == name
+
+def _copy(value):
+    if type(value) in (dict, list, tuple, set):
+        return type(value)(value)
+    return value
+
+
+class Mock(object):
+
+    def __init__(self, spec=None, side_effect=None, return_value=DEFAULT,
+                 name=None, parent=None, wraps=None):
+        self._parent = parent
+        self._name = name
+        if spec is not None and not isinstance(spec, list):
+            spec = [member for member in dir(spec) if not _is_magic(member)]
+
+        self._methods = spec
+        self._children = {}
+        self._return_value = return_value
+        self.side_effect = side_effect
+        self._wraps = wraps
+
+        self.reset_mock()
+
+
+    def reset_mock(self):
+        self.called = False
+        self.call_args = None
+        self.call_count = 0
+        self.call_args_list = []
+        self.method_calls = []
+        for child in self._children.itervalues():
+            child.reset_mock()
+        if isinstance(self._return_value, Mock):
+            self._return_value.reset_mock()
+
+
+    def __get_return_value(self):
+        if self._return_value is DEFAULT:
+            self._return_value = Mock()
+        return self._return_value
+
+    def __set_return_value(self, value):
+        self._return_value = value
+
+    return_value = property(__get_return_value, __set_return_value)
+
+
+    def __call__(self, *args, **kwargs):
+        self.called = True
+        self.call_count += 1
+        self.call_args = (args, kwargs)
+        self.call_args_list.append((args, kwargs))
+
+        parent = self._parent
+        name = self._name
+        while parent is not None:
+            parent.method_calls.append((name, args, kwargs))
+            if parent._parent is None:
+                break
+            name = parent._name + '.' + name
+            parent = parent._parent
+
+        ret_val = DEFAULT
+        if self.side_effect is not None:
+            if (isinstance(self.side_effect, Exception) or
+                isinstance(self.side_effect, (type, ClassType)) and
+                issubclass(self.side_effect, Exception)):
+                raise self.side_effect
+
+            ret_val = self.side_effect(*args, **kwargs)
+            if ret_val is DEFAULT:
+                ret_val = self.return_value
+
+        if self._wraps is not None and self._return_value is DEFAULT:
+            return self._wraps(*args, **kwargs)
+        if ret_val is DEFAULT:
+            ret_val = self.return_value
+        return ret_val
+
+
+    def __getattr__(self, name):
+        if self._methods is not None:
+            if name not in self._methods:
+                raise AttributeError("Mock object has no attribute '%s'" % name)
+        elif _is_magic(name):
+            raise AttributeError(name)
+
+        if name not in self._children:
+            wraps = None
+            if self._wraps is not None:
+                wraps = getattr(self._wraps, name)
+            self._children[name] = Mock(parent=self, name=name, wraps=wraps)
+
+        return self._children[name]
+
+
+    def assert_called_with(self, *args, **kwargs):
+        assert self.call_args == (args, kwargs), 'Expected: %s\nCalled with: %s' % ((args, kwargs), self.call_args)
+
+
+def _dot_lookup(thing, comp, import_path):
+    try:
+        return getattr(thing, comp)
+    except AttributeError:
+        __import__(import_path)
+        return getattr(thing, comp)
+
+
+def _importer(target):
+    components = target.split('.')
+    import_path = components.pop(0)
+    thing = __import__(import_path)
+
+    for comp in components:
+        import_path += ".%s" % comp
+        thing = _dot_lookup(thing, comp, import_path)
+    return thing
+
+
+class _patch(object):
+    def __init__(self, target, attribute, new, spec, create):
+        self.target = target
+        self.attribute = attribute
+        self.new = new
+        self.spec = spec
+        self.create = create
+        self.has_local = False
+
+
+    def __call__(self, func):
+        if hasattr(func, 'patchings'):
+            func.patchings.append(self)
+            return func
+
+        def patched(*args, **keywargs):
+            # don't use a with here (backwards compatability with 2.5)
+            extra_args = []
+            for patching in patched.patchings:
+                arg = patching.__enter__()
+                if patching.new is DEFAULT:
+                    extra_args.append(arg)
+            args += tuple(extra_args)
+            try:
+                return func(*args, **keywargs)
+            finally:
+                for patching in getattr(patched, 'patchings', []):
+                    patching.__exit__()
+
+        patched.patchings = [self]
+        patched.__name__ = func.__name__
+        patched.compat_co_firstlineno = getattr(func, "compat_co_firstlineno",
+                                                func.func_code.co_firstlineno)
+        return patched
+
+
+    def get_original(self):
+        target = self.target
+        name = self.attribute
+        create = self.create
+
+        original = DEFAULT
+        if _has_local_attr(target, name):
+            try:
+                original = target.__dict__[name]
+            except AttributeError:
+                # for instances of classes with slots, they have no __dict__
+                original = getattr(target, name)
+        elif not create and not hasattr(target, name):
+            raise AttributeError("%s does not have the attribute %r" % (target, name))
+        return original
+
+
+    def __enter__(self):
+        new, spec, = self.new, self.spec
+        original = self.get_original()
+        if new is DEFAULT:
+            # XXXX what if original is DEFAULT - shouldn't use it as a spec
+            inherit = False
+            if spec == True:
+                # set spec to the object we are replacing
+                spec = original
+                if isinstance(spec, (type, ClassType)):
+                    inherit = True
+            new = Mock(spec=spec)
+            if inherit:
+                new.return_value = Mock(spec=spec)
+        self.temp_original = original
+        setattr(self.target, self.attribute, new)
+        return new
+
+
+    def __exit__(self, *_):
+        if self.temp_original is not DEFAULT:
+            setattr(self.target, self.attribute, self.temp_original)
+        else:
+            delattr(self.target, self.attribute)
+        del self.temp_original
+
+
+def patch_object(target, attribute, new=DEFAULT, spec=None, create=False):
+    return _patch(target, attribute, new, spec, create)
+
+
+def patch(target, new=DEFAULT, spec=None, create=False):
+    try:
+        target, attribute = target.rsplit('.', 1)
+    except (TypeError, ValueError):
+        raise TypeError("Need a valid target to patch. You supplied: %r" % (target,))
+    target = _importer(target)
+    return _patch(target, attribute, new, spec, create)
+
+
+
+def _has_local_attr(obj, name):
+    try:
+        return name in vars(obj)
+    except TypeError:
+        # objects without a __dict__
+        return hasattr(obj, name)
diff --git a/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/COPYING b/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/COPYING
new file mode 100644
index 0000000..989d02e
--- /dev/null
+++ b/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/COPYING
@@ -0,0 +1,28 @@
+Copyright 2012, Google Inc.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+    * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+    * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/__init__.py b/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/__init__.py
new file mode 100644
index 0000000..454ae0c
--- /dev/null
+++ b/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/__init__.py
@@ -0,0 +1,197 @@
+# Copyright 2011, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""WebSocket extension for Apache HTTP Server.
+
+mod_pywebsocket is a WebSocket extension for Apache HTTP Server
+intended for testing or experimental purposes. mod_python is required.
+
+
+Installation
+============
+
+0. Prepare an Apache HTTP Server for which mod_python is enabled.
+
+1. Specify the following Apache HTTP Server directives to suit your
+   configuration.
+
+   If mod_pywebsocket is not in the Python path, specify the following.
+   <websock_lib> is the directory where mod_pywebsocket is installed.
+
+       PythonPath "sys.path+['<websock_lib>']"
+
+   Always specify the following. <websock_handlers> is the directory where
+   user-written WebSocket handlers are placed.
+
+       PythonOption mod_pywebsocket.handler_root <websock_handlers>
+       PythonHeaderParserHandler mod_pywebsocket.headerparserhandler
+
+   To limit the search for WebSocket handlers to a directory <scan_dir>
+   under <websock_handlers>, configure as follows:
+
+       PythonOption mod_pywebsocket.handler_scan <scan_dir>
+
+   <scan_dir> is useful in saving scan time when <websock_handlers>
+   contains many non-WebSocket handler files.
+
+   If you want to allow handlers whose canonical path is not under the root
+   directory (i.e. symbolic link is in root directory but its target is not),
+   configure as follows:
+
+       PythonOption mod_pywebsocket.allow_handlers_outside_root_dir On
+
+   Example snippet of httpd.conf:
+   (mod_pywebsocket is in /websock_lib, WebSocket handlers are in
+   /websock_handlers, port is 80 for ws, 443 for wss.)
+
+       <IfModule python_module>
+         PythonPath "sys.path+['/websock_lib']"
+         PythonOption mod_pywebsocket.handler_root /websock_handlers
+         PythonHeaderParserHandler mod_pywebsocket.headerparserhandler
+       </IfModule>
+
+2. Tune Apache parameters for serving WebSocket. We'd like to note that at
+   least TimeOut directive from core features and RequestReadTimeout
+   directive from mod_reqtimeout should be modified not to kill connections
+   in only a few seconds of idle time.
+
+3. Verify installation. You can use example/console.html to poke the server.
+
+
+Writing WebSocket handlers
+==========================
+
+When a WebSocket request comes in, the resource name
+specified in the handshake is considered as if it is a file path under
+<websock_handlers> and the handler defined in
+<websock_handlers>/<resource_name>_wsh.py is invoked.
+
+For example, if the resource name is /example/chat, the handler defined in
+<websock_handlers>/example/chat_wsh.py is invoked.
+
+A WebSocket handler is composed of the following three functions:
+
+    web_socket_do_extra_handshake(request)
+    web_socket_transfer_data(request)
+    web_socket_passive_closing_handshake(request)
+
+where:
+    request: mod_python request.
+
+web_socket_do_extra_handshake is called during the handshake after the
+headers are successfully parsed and WebSocket properties (ws_location,
+ws_origin, and ws_resource) are added to request. A handler
+can reject the request by raising an exception.
+
+A request object has the following properties that you can use during the
+extra handshake (web_socket_do_extra_handshake):
+- ws_resource
+- ws_origin
+- ws_version
+- ws_location (HyBi 00 only)
+- ws_extensions (HyBi 06 and later)
+- ws_deflate (HyBi 06 and later)
+- ws_protocol
+- ws_requested_protocols (HyBi 06 and later)
+
+The last two are a bit tricky. See the next subsection.
+
+
+Subprotocol Negotiation
+-----------------------
+
+For HyBi 06 and later, ws_protocol is always set to None when
+web_socket_do_extra_handshake is called. If ws_requested_protocols is not
+None, you must choose one subprotocol from this list and set it to
+ws_protocol.
+
+For HyBi 00, when web_socket_do_extra_handshake is called,
+ws_protocol is set to the value given by the client in
+Sec-WebSocket-Protocol header or None if
+such header was not found in the opening handshake request. Finish extra
+handshake with ws_protocol untouched to accept the request subprotocol.
+Then, Sec-WebSocket-Protocol header will be sent to
+the client in response with the same value as requested. Raise an exception
+in web_socket_do_extra_handshake to reject the requested subprotocol.
+
+
+Data Transfer
+-------------
+
+web_socket_transfer_data is called after the handshake completed
+successfully. A handler can receive/send messages from/to the client
+using request. mod_pywebsocket.msgutil module provides utilities
+for data transfer.
+
+You can receive a message by the following statement.
+
+    message = request.ws_stream.receive_message()
+
+This call blocks until any complete text frame arrives, and the payload data
+of the incoming frame will be stored into message. When you're using IETF
+HyBi 00 or later protocol, receive_message() will return None on receiving
+client-initiated closing handshake. When any error occurs, receive_message()
+will raise some exception.
+
+You can send a message by the following statement.
+
+    request.ws_stream.send_message(message)
+
+
+Closing Connection
+------------------
+
+Executing the following statement or just return-ing from
+web_socket_transfer_data cause connection close.
+
+    request.ws_stream.close_connection()
+
+close_connection will wait
+for closing handshake acknowledgement coming from the client. When it
+couldn't receive a valid acknowledgement, raises an exception.
+
+web_socket_passive_closing_handshake is called after the server receives
+incoming closing frame from the client peer immediately. You can specify
+code and reason by return values. They are sent as a outgoing closing frame
+from the server. A request object has the following properties that you can
+use in web_socket_passive_closing_handshake.
+- ws_close_code
+- ws_close_reason
+
+
+Threading
+---------
+
+A WebSocket handler must be thread-safe if the server (Apache or
+standalone.py) is configured to use threads.
+"""
+
+
+# vi:sts=4 sw=4 et tw=72
diff --git a/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/_stream_base.py b/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/_stream_base.py
new file mode 100644
index 0000000..60fb33d
--- /dev/null
+++ b/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/_stream_base.py
@@ -0,0 +1,165 @@
+# Copyright 2011, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""Base stream class.
+"""
+
+
+# Note: request.connection.write/read are used in this module, even though
+# mod_python document says that they should be used only in connection
+# handlers. Unfortunately, we have no other options. For example,
+# request.write/read are not suitable because they don't allow direct raw bytes
+# writing/reading.
+
+
+from mod_pywebsocket import util
+
+
+# Exceptions
+
+
+class ConnectionTerminatedException(Exception):
+    """This exception will be raised when a connection is terminated
+    unexpectedly.
+    """
+
+    pass
+
+
+class InvalidFrameException(ConnectionTerminatedException):
+    """This exception will be raised when we received an invalid frame we
+    cannot parse.
+    """
+
+    pass
+
+
+class BadOperationException(Exception):
+    """This exception will be raised when send_message() is called on
+    server-terminated connection or receive_message() is called on
+    client-terminated connection.
+    """
+
+    pass
+
+
+class UnsupportedFrameException(Exception):
+    """This exception will be raised when we receive a frame with flag, opcode
+    we cannot handle. Handlers can just catch and ignore this exception and
+    call receive_message() again to continue processing the next frame.
+    """
+
+    pass
+
+
+class InvalidUTF8Exception(Exception):
+    """This exception will be raised when we receive a text frame which
+    contains invalid UTF-8 strings.
+    """
+
+    pass
+
+
+class StreamBase(object):
+    """Base stream class."""
+
+    def __init__(self, request):
+        """Construct an instance.
+
+        Args:
+            request: mod_python request.
+        """
+
+        self._logger = util.get_class_logger(self)
+
+        self._request = request
+
+    def _read(self, length):
+        """Reads length bytes from connection. In case we catch any exception,
+        prepends remote address to the exception message and raise again.
+
+        Raises:
+            ConnectionTerminatedException: when read returns empty string.
+        """
+
+        bytes = self._request.connection.read(length)
+        if not bytes:
+            raise ConnectionTerminatedException(
+                'Receiving %d byte failed. Peer (%r) closed connection' %
+                (length, (self._request.connection.remote_addr,)))
+        return bytes
+
+    def _write(self, bytes):
+        """Writes given bytes to connection. In case we catch any exception,
+        prepends remote address to the exception message and raise again.
+        """
+
+        try:
+            self._request.connection.write(bytes)
+        except Exception, e:
+            util.prepend_message_to_exception(
+                    'Failed to send message to %r: ' %
+                            (self._request.connection.remote_addr,),
+                    e)
+            raise
+
+    def receive_bytes(self, length):
+        """Receives multiple bytes. Retries read when we couldn't receive the
+        specified amount.
+
+        Raises:
+            ConnectionTerminatedException: when read returns empty string.
+        """
+
+        bytes = []
+        while length > 0:
+            new_bytes = self._read(length)
+            bytes.append(new_bytes)
+            length -= len(new_bytes)
+        return ''.join(bytes)
+
+    def _read_until(self, delim_char):
+        """Reads bytes until we encounter delim_char. The result will not
+        contain delim_char.
+
+        Raises:
+            ConnectionTerminatedException: when read returns empty string.
+        """
+
+        bytes = []
+        while True:
+            ch = self._read(1)
+            if ch == delim_char:
+                break
+            bytes.append(ch)
+        return ''.join(bytes)
+
+
+# vi:sts=4 sw=4 et
diff --git a/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/_stream_hixie75.py b/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/_stream_hixie75.py
new file mode 100644
index 0000000..94cf5b3
--- /dev/null
+++ b/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/_stream_hixie75.py
@@ -0,0 +1,229 @@
+# Copyright 2011, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""This file provides a class for parsing/building frames of the WebSocket
+protocol version HyBi 00 and Hixie 75.
+
+Specification:
+- HyBi 00 http://tools.ietf.org/html/draft-ietf-hybi-thewebsocketprotocol-00
+- Hixie 75 http://tools.ietf.org/html/draft-hixie-thewebsocketprotocol-75
+"""
+
+
+from mod_pywebsocket import common
+from mod_pywebsocket._stream_base import BadOperationException
+from mod_pywebsocket._stream_base import ConnectionTerminatedException
+from mod_pywebsocket._stream_base import InvalidFrameException
+from mod_pywebsocket._stream_base import StreamBase
+from mod_pywebsocket._stream_base import UnsupportedFrameException
+from mod_pywebsocket import util
+
+
+class StreamHixie75(StreamBase):
+    """A class for parsing/building frames of the WebSocket protocol version
+    HyBi 00 and Hixie 75.
+    """
+
+    def __init__(self, request, enable_closing_handshake=False):
+        """Construct an instance.
+
+        Args:
+            request: mod_python request.
+            enable_closing_handshake: to let StreamHixie75 perform closing
+                                      handshake as specified in HyBi 00, set
+                                      this option to True.
+        """
+
+        StreamBase.__init__(self, request)
+
+        self._logger = util.get_class_logger(self)
+
+        self._enable_closing_handshake = enable_closing_handshake
+
+        self._request.client_terminated = False
+        self._request.server_terminated = False
+
+    def send_message(self, message, end=True, binary=False):
+        """Send message.
+
+        Args:
+            message: unicode string to send.
+            binary: not used in hixie75.
+
+        Raises:
+            BadOperationException: when called on a server-terminated
+                connection.
+        """
+
+        if not end:
+            raise BadOperationException(
+                'StreamHixie75 doesn\'t support send_message with end=False')
+
+        if binary:
+            raise BadOperationException(
+                'StreamHixie75 doesn\'t support send_message with binary=True')
+
+        if self._request.server_terminated:
+            raise BadOperationException(
+                'Requested send_message after sending out a closing handshake')
+
+        self._write(''.join(['\x00', message.encode('utf-8'), '\xff']))
+
+    def _read_payload_length_hixie75(self):
+        """Reads a length header in a Hixie75 version frame with length.
+
+        Raises:
+            ConnectionTerminatedException: when read returns empty string.
+        """
+
+        length = 0
+        while True:
+            b_str = self._read(1)
+            b = ord(b_str)
+            length = length * 128 + (b & 0x7f)
+            if (b & 0x80) == 0:
+                break
+        return length
+
+    def receive_message(self):
+        """Receive a WebSocket frame and return its payload an unicode string.
+
+        Returns:
+            payload unicode string in a WebSocket frame.
+
+        Raises:
+            ConnectionTerminatedException: when read returns empty
+                string.
+            BadOperationException: when called on a client-terminated
+                connection.
+        """
+
+        if self._request.client_terminated:
+            raise BadOperationException(
+                'Requested receive_message after receiving a closing '
+                'handshake')
+
+        while True:
+            # Read 1 byte.
+            # mp_conn.read will block if no bytes are available.
+            # Timeout is controlled by TimeOut directive of Apache.
+            frame_type_str = self.receive_bytes(1)
+            frame_type = ord(frame_type_str)
+            if (frame_type & 0x80) == 0x80:
+                # The payload length is specified in the frame.
+                # Read and discard.
+                length = self._read_payload_length_hixie75()
+                if length > 0:
+                    _ = self.receive_bytes(length)
+                # 5.3 3. 12. if /type/ is 0xFF and /length/ is 0, then set the
+                # /client terminated/ flag and abort these steps.
+                if not self._enable_closing_handshake:
+                    continue
+
+                if frame_type == 0xFF and length == 0:
+                    self._request.client_terminated = True
+
+                    if self._request.server_terminated:
+                        self._logger.debug(
+                            'Received ack for server-initiated closing '
+                            'handshake')
+                        return None
+
+                    self._logger.debug(
+                        'Received client-initiated closing handshake')
+
+                    self._send_closing_handshake()
+                    self._logger.debug(
+                        'Sent ack for client-initiated closing handshake')
+                    return None
+            else:
+                # The payload is delimited with \xff.
+                bytes = self._read_until('\xff')
+                # The WebSocket protocol section 4.4 specifies that invalid
+                # characters must be replaced with U+fffd REPLACEMENT
+                # CHARACTER.
+                message = bytes.decode('utf-8', 'replace')
+                if frame_type == 0x00:
+                    return message
+                # Discard data of other types.
+
+    def _send_closing_handshake(self):
+        if not self._enable_closing_handshake:
+            raise BadOperationException(
+                'Closing handshake is not supported in Hixie 75 protocol')
+
+        self._request.server_terminated = True
+
+        # 5.3 the server may decide to terminate the WebSocket connection by
+        # running through the following steps:
+        # 1. send a 0xFF byte and a 0x00 byte to the client to indicate the
+        # start of the closing handshake.
+        self._write('\xff\x00')
+
+    def close_connection(self, unused_code='', unused_reason=''):
+        """Closes a WebSocket connection.
+
+        Raises:
+            ConnectionTerminatedException: when closing handshake was
+                not successfull.
+        """
+
+        if self._request.server_terminated:
+            self._logger.debug(
+                'Requested close_connection but server is already terminated')
+            return
+
+        if not self._enable_closing_handshake:
+            self._request.server_terminated = True
+            self._logger.debug('Connection closed')
+            return
+
+        self._send_closing_handshake()
+        self._logger.debug('Sent server-initiated closing handshake')
+
+        # TODO(ukai): 2. wait until the /client terminated/ flag has been set,
+        # or until a server-defined timeout expires.
+        #
+        # For now, we expect receiving closing handshake right after sending
+        # out closing handshake, and if we couldn't receive non-handshake
+        # frame, we take it as ConnectionTerminatedException.
+        message = self.receive_message()
+        if message is not None:
+            raise ConnectionTerminatedException(
+                'Didn\'t receive valid ack for closing handshake')
+        # TODO: 3. close the WebSocket connection.
+        # note: mod_python Connection (mp_conn) doesn't have close method.
+
+    def send_ping(self, body):
+        raise BadOperationException(
+            'StreamHixie75 doesn\'t support send_ping')
+
+
+# vi:sts=4 sw=4 et
diff --git a/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/_stream_hybi.py b/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/_stream_hybi.py
new file mode 100644
index 0000000..bd158fa
--- /dev/null
+++ b/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/_stream_hybi.py
@@ -0,0 +1,915 @@
+# Copyright 2012, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""This file provides classes and helper functions for parsing/building frames
+of the WebSocket protocol (RFC 6455).
+
+Specification:
+http://tools.ietf.org/html/rfc6455
+"""
+
+
+from collections import deque
+import logging
+import os
+import struct
+import time
+
+from mod_pywebsocket import common
+from mod_pywebsocket import util
+from mod_pywebsocket._stream_base import BadOperationException
+from mod_pywebsocket._stream_base import ConnectionTerminatedException
+from mod_pywebsocket._stream_base import InvalidFrameException
+from mod_pywebsocket._stream_base import InvalidUTF8Exception
+from mod_pywebsocket._stream_base import StreamBase
+from mod_pywebsocket._stream_base import UnsupportedFrameException
+
+
+_NOOP_MASKER = util.NoopMasker()
+
+
+class Frame(object):
+
+    def __init__(self, fin=1, rsv1=0, rsv2=0, rsv3=0,
+                 opcode=None, payload=''):
+        self.fin = fin
+        self.rsv1 = rsv1
+        self.rsv2 = rsv2
+        self.rsv3 = rsv3
+        self.opcode = opcode
+        self.payload = payload
+
+
+# Helper functions made public to be used for writing unittests for WebSocket
+# clients.
+
+
+def create_length_header(length, mask):
+    """Creates a length header.
+
+    Args:
+        length: Frame length. Must be less than 2^63.
+        mask: Mask bit. Must be boolean.
+
+    Raises:
+        ValueError: when bad data is given.
+    """
+
+    if mask:
+        mask_bit = 1 << 7
+    else:
+        mask_bit = 0
+
+    if length < 0:
+        raise ValueError('length must be non negative integer')
+    elif length <= 125:
+        return chr(mask_bit | length)
+    elif length < (1 << 16):
+        return chr(mask_bit | 126) + struct.pack('!H', length)
+    elif length < (1 << 63):
+        return chr(mask_bit | 127) + struct.pack('!Q', length)
+    else:
+        raise ValueError('Payload is too big for one frame')
+
+
+def create_header(opcode, payload_length, fin, rsv1, rsv2, rsv3, mask):
+    """Creates a frame header.
+
+    Raises:
+        Exception: when bad data is given.
+    """
+
+    if opcode < 0 or 0xf < opcode:
+        raise ValueError('Opcode out of range')
+
+    if payload_length < 0 or (1 << 63) <= payload_length:
+        raise ValueError('payload_length out of range')
+
+    if (fin | rsv1 | rsv2 | rsv3) & ~1:
+        raise ValueError('FIN bit and Reserved bit parameter must be 0 or 1')
+
+    header = ''
+
+    first_byte = ((fin << 7)
+                  | (rsv1 << 6) | (rsv2 << 5) | (rsv3 << 4)
+                  | opcode)
+    header += chr(first_byte)
+    header += create_length_header(payload_length, mask)
+
+    return header
+
+
+def _build_frame(header, body, mask):
+    if not mask:
+        return header + body
+
+    masking_nonce = os.urandom(4)
+    masker = util.RepeatedXorMasker(masking_nonce)
+
+    return header + masking_nonce + masker.mask(body)
+
+
+def _filter_and_format_frame_object(frame, mask, frame_filters):
+    for frame_filter in frame_filters:
+        frame_filter.filter(frame)
+
+    header = create_header(
+        frame.opcode, len(frame.payload), frame.fin,
+        frame.rsv1, frame.rsv2, frame.rsv3, mask)
+    return _build_frame(header, frame.payload, mask)
+
+
+def create_binary_frame(
+    message, opcode=common.OPCODE_BINARY, fin=1, mask=False, frame_filters=[]):
+    """Creates a simple binary frame with no extension, reserved bit."""
+
+    frame = Frame(fin=fin, opcode=opcode, payload=message)
+    return _filter_and_format_frame_object(frame, mask, frame_filters)
+
+
+def create_text_frame(
+    message, opcode=common.OPCODE_TEXT, fin=1, mask=False, frame_filters=[]):
+    """Creates a simple text frame with no extension, reserved bit."""
+
+    encoded_message = message.encode('utf-8')
+    return create_binary_frame(encoded_message, opcode, fin, mask,
+                               frame_filters)
+
+
+def parse_frame(receive_bytes, logger=None,
+                ws_version=common.VERSION_HYBI_LATEST,
+                unmask_receive=True):
+    """Parses a frame. Returns a tuple containing each header field and
+    payload.
+
+    Args:
+        receive_bytes: a function that reads frame data from a stream or
+            something similar. The function takes length of the bytes to be
+            read. The function must raise ConnectionTerminatedException if
+            there is not enough data to be read.
+        logger: a logging object.
+        ws_version: the version of WebSocket protocol.
+        unmask_receive: unmask received frames. When received unmasked
+            frame, raises InvalidFrameException.
+
+    Raises:
+        ConnectionTerminatedException: when receive_bytes raises it.
+        InvalidFrameException: when the frame contains invalid data.
+    """
+
+    if not logger:
+        logger = logging.getLogger()
+
+    logger.log(common.LOGLEVEL_FINE, 'Receive the first 2 octets of a frame')
+
+    received = receive_bytes(2)
+
+    first_byte = ord(received[0])
+    fin = (first_byte >> 7) & 1
+    rsv1 = (first_byte >> 6) & 1
+    rsv2 = (first_byte >> 5) & 1
+    rsv3 = (first_byte >> 4) & 1
+    opcode = first_byte & 0xf
+
+    second_byte = ord(received[1])
+    mask = (second_byte >> 7) & 1
+    payload_length = second_byte & 0x7f
+
+    logger.log(common.LOGLEVEL_FINE,
+               'FIN=%s, RSV1=%s, RSV2=%s, RSV3=%s, opcode=%s, '
+               'Mask=%s, Payload_length=%s',
+               fin, rsv1, rsv2, rsv3, opcode, mask, payload_length)
+
+    if (mask == 1) != unmask_receive:
+        raise InvalidFrameException(
+            'Mask bit on the received frame did\'nt match masking '
+            'configuration for received frames')
+
+    # The HyBi and later specs disallow putting a value in 0x0-0xFFFF
+    # into the 8-octet extended payload length field (or 0x0-0xFD in
+    # 2-octet field).
+    valid_length_encoding = True
+    length_encoding_bytes = 1
+    if payload_length == 127:
+        logger.log(common.LOGLEVEL_FINE,
+                   'Receive 8-octet extended payload length')
+
+        extended_payload_length = receive_bytes(8)
+        payload_length = struct.unpack(
+            '!Q', extended_payload_length)[0]
+        if payload_length > 0x7FFFFFFFFFFFFFFF:
+            raise InvalidFrameException(
+                'Extended payload length >= 2^63')
+        if ws_version >= 13 and payload_length < 0x10000:
+            valid_length_encoding = False
+            length_encoding_bytes = 8
+
+        logger.log(common.LOGLEVEL_FINE,
+                   'Decoded_payload_length=%s', payload_length)
+    elif payload_length == 126:
+        logger.log(common.LOGLEVEL_FINE,
+                   'Receive 2-octet extended payload length')
+
+        extended_payload_length = receive_bytes(2)
+        payload_length = struct.unpack(
+            '!H', extended_payload_length)[0]
+        if ws_version >= 13 and payload_length < 126:
+            valid_length_encoding = False
+            length_encoding_bytes = 2
+
+        logger.log(common.LOGLEVEL_FINE,
+                   'Decoded_payload_length=%s', payload_length)
+
+    if not valid_length_encoding:
+        logger.warning(
+            'Payload length is not encoded using the minimal number of '
+            'bytes (%d is encoded using %d bytes)',
+            payload_length,
+            length_encoding_bytes)
+
+    if mask == 1:
+        logger.log(common.LOGLEVEL_FINE, 'Receive mask')
+
+        masking_nonce = receive_bytes(4)
+        masker = util.RepeatedXorMasker(masking_nonce)
+
+        logger.log(common.LOGLEVEL_FINE, 'Mask=%r', masking_nonce)
+    else:
+        masker = _NOOP_MASKER
+
+    logger.log(common.LOGLEVEL_FINE, 'Receive payload data')
+    if logger.isEnabledFor(common.LOGLEVEL_FINE):
+        receive_start = time.time()
+
+    raw_payload_bytes = receive_bytes(payload_length)
+
+    if logger.isEnabledFor(common.LOGLEVEL_FINE):
+        logger.log(
+            common.LOGLEVEL_FINE,
+            'Done receiving payload data at %s MB/s',
+            payload_length / (time.time() - receive_start) / 1000 / 1000)
+    logger.log(common.LOGLEVEL_FINE, 'Unmask payload data')
+
+    if logger.isEnabledFor(common.LOGLEVEL_FINE):
+        unmask_start = time.time()
+
+    bytes = masker.mask(raw_payload_bytes)
+
+    if logger.isEnabledFor(common.LOGLEVEL_FINE):
+        logger.log(
+            common.LOGLEVEL_FINE,
+            'Done unmasking payload data at %s MB/s',
+            payload_length / (time.time() - unmask_start) / 1000 / 1000)
+
+    return opcode, bytes, fin, rsv1, rsv2, rsv3
+
+
+class FragmentedFrameBuilder(object):
+    """A stateful class to send a message as fragments."""
+
+    def __init__(self, mask, frame_filters=[], encode_utf8=True):
+        """Constructs an instance."""
+
+        self._mask = mask
+        self._frame_filters = frame_filters
+        # This is for skipping UTF-8 encoding when building text type frames
+        # from compressed data.
+        self._encode_utf8 = encode_utf8
+
+        self._started = False
+
+        # Hold opcode of the first frame in messages to verify types of other
+        # frames in the message are all the same.
+        self._opcode = common.OPCODE_TEXT
+
+    def build(self, payload_data, end, binary):
+        if binary:
+            frame_type = common.OPCODE_BINARY
+        else:
+            frame_type = common.OPCODE_TEXT
+        if self._started:
+            if self._opcode != frame_type:
+                raise ValueError('Message types are different in frames for '
+                                 'the same message')
+            opcode = common.OPCODE_CONTINUATION
+        else:
+            opcode = frame_type
+            self._opcode = frame_type
+
+        if end:
+            self._started = False
+            fin = 1
+        else:
+            self._started = True
+            fin = 0
+
+        if binary or not self._encode_utf8:
+            return create_binary_frame(
+                payload_data, opcode, fin, self._mask, self._frame_filters)
+        else:
+            return create_text_frame(
+                payload_data, opcode, fin, self._mask, self._frame_filters)
+
+
+def _create_control_frame(opcode, body, mask, frame_filters):
+    frame = Frame(opcode=opcode, payload=body)
+
+    for frame_filter in frame_filters:
+        frame_filter.filter(frame)
+
+    if len(frame.payload) > 125:
+        raise BadOperationException(
+            'Payload data size of control frames must be 125 bytes or less')
+
+    header = create_header(
+        frame.opcode, len(frame.payload), frame.fin,
+        frame.rsv1, frame.rsv2, frame.rsv3, mask)
+    return _build_frame(header, frame.payload, mask)
+
+
+def create_ping_frame(body, mask=False, frame_filters=[]):
+    return _create_control_frame(common.OPCODE_PING, body, mask, frame_filters)
+
+
+def create_pong_frame(body, mask=False, frame_filters=[]):
+    return _create_control_frame(common.OPCODE_PONG, body, mask, frame_filters)
+
+
+def create_close_frame(body, mask=False, frame_filters=[]):
+    return _create_control_frame(
+        common.OPCODE_CLOSE, body, mask, frame_filters)
+
+
+def create_closing_handshake_body(code, reason):
+    body = ''
+    if code is not None:
+        if (code > common.STATUS_USER_PRIVATE_MAX or
+            code < common.STATUS_NORMAL_CLOSURE):
+            raise BadOperationException('Status code is out of range')
+        if (code == common.STATUS_NO_STATUS_RECEIVED or
+            code == common.STATUS_ABNORMAL_CLOSURE or
+            code == common.STATUS_TLS_HANDSHAKE):
+            raise BadOperationException('Status code is reserved pseudo '
+                'code')
+        encoded_reason = reason.encode('utf-8')
+        body = struct.pack('!H', code) + encoded_reason
+    return body
+
+
+class StreamOptions(object):
+    """Holds option values to configure Stream objects."""
+
+    def __init__(self):
+        """Constructs StreamOptions."""
+
+        # Enables deflate-stream extension.
+        self.deflate_stream = False
+
+        # Filters applied to frames.
+        self.outgoing_frame_filters = []
+        self.incoming_frame_filters = []
+
+        # Filters applied to messages. Control frames are not affected by them.
+        self.outgoing_message_filters = []
+        self.incoming_message_filters = []
+
+        self.encode_text_message_to_utf8 = True
+        self.mask_send = False
+        self.unmask_receive = True
+        # RFC6455 disallows fragmented control frames, but mux extension
+        # relaxes the restriction.
+        self.allow_fragmented_control_frame = False
+
+
+class Stream(StreamBase):
+    """A class for parsing/building frames of the WebSocket protocol
+    (RFC 6455).
+    """
+
+    def __init__(self, request, options):
+        """Constructs an instance.
+
+        Args:
+            request: mod_python request.
+        """
+
+        StreamBase.__init__(self, request)
+
+        self._logger = util.get_class_logger(self)
+
+        self._options = options
+
+        if self._options.deflate_stream:
+            self._logger.debug('Setup filter for deflate-stream')
+            self._request = util.DeflateRequest(self._request)
+
+        self._request.client_terminated = False
+        self._request.server_terminated = False
+
+        # Holds body of received fragments.
+        self._received_fragments = []
+        # Holds the opcode of the first fragment.
+        self._original_opcode = None
+
+        self._writer = FragmentedFrameBuilder(
+            self._options.mask_send, self._options.outgoing_frame_filters,
+            self._options.encode_text_message_to_utf8)
+
+        self._ping_queue = deque()
+
+    def _receive_frame(self):
+        """Receives a frame and return data in the frame as a tuple containing
+        each header field and payload separately.
+
+        Raises:
+            ConnectionTerminatedException: when read returns empty
+                string.
+            InvalidFrameException: when the frame contains invalid data.
+        """
+
+        def _receive_bytes(length):
+            return self.receive_bytes(length)
+
+        return parse_frame(receive_bytes=_receive_bytes,
+                           logger=self._logger,
+                           ws_version=self._request.ws_version,
+                           unmask_receive=self._options.unmask_receive)
+
+    def _receive_frame_as_frame_object(self):
+        opcode, bytes, fin, rsv1, rsv2, rsv3 = self._receive_frame()
+
+        return Frame(fin=fin, rsv1=rsv1, rsv2=rsv2, rsv3=rsv3,
+                     opcode=opcode, payload=bytes)
+
+    def receive_filtered_frame(self):
+        """Receives a frame and applies frame filters and message filters.
+        The frame to be received must satisfy following conditions:
+        - The frame is not fragmented.
+        - The opcode of the frame is TEXT or BINARY.
+
+        DO NOT USE this method except for testing purpose.
+        """
+
+        frame = self._receive_frame_as_frame_object()
+        if not frame.fin:
+            raise InvalidFrameException(
+                'Segmented frames must not be received via '
+                'receive_filtered_frame()')
+        if (frame.opcode != common.OPCODE_TEXT and
+            frame.opcode != common.OPCODE_BINARY):
+            raise InvalidFrameException(
+                'Control frames must not be received via '
+                'receive_filtered_frame()')
+
+        for frame_filter in self._options.incoming_frame_filters:
+            frame_filter.filter(frame)
+        for message_filter in self._options.incoming_message_filters:
+            frame.payload = message_filter.filter(frame.payload)
+        return frame
+
+    def send_message(self, message, end=True, binary=False):
+        """Send message.
+
+        Args:
+            message: text in unicode or binary in str to send.
+            binary: send message as binary frame.
+
+        Raises:
+            BadOperationException: when called on a server-terminated
+                connection or called with inconsistent message type or
+                binary parameter.
+        """
+
+        if self._request.server_terminated:
+            raise BadOperationException(
+                'Requested send_message after sending out a closing handshake')
+
+        if binary and isinstance(message, unicode):
+            raise BadOperationException(
+                'Message for binary frame must be instance of str')
+
+        for message_filter in self._options.outgoing_message_filters:
+            message = message_filter.filter(message, end, binary)
+
+        try:
+            # Set this to any positive integer to limit maximum size of data in
+            # payload data of each frame.
+            MAX_PAYLOAD_DATA_SIZE = -1
+
+            if MAX_PAYLOAD_DATA_SIZE <= 0:
+                self._write(self._writer.build(message, end, binary))
+                return
+
+            bytes_written = 0
+            while True:
+                end_for_this_frame = end
+                bytes_to_write = len(message) - bytes_written
+                if (MAX_PAYLOAD_DATA_SIZE > 0 and
+                    bytes_to_write > MAX_PAYLOAD_DATA_SIZE):
+                    end_for_this_frame = False
+                    bytes_to_write = MAX_PAYLOAD_DATA_SIZE
+
+                frame = self._writer.build(
+                    message[bytes_written:bytes_written + bytes_to_write],
+                    end_for_this_frame,
+                    binary)
+                self._write(frame)
+
+                bytes_written += bytes_to_write
+
+                # This if must be placed here (the end of while block) so that
+                # at least one frame is sent.
+                if len(message) <= bytes_written:
+                    break
+        except ValueError, e:
+            raise BadOperationException(e)
+
+    def _get_message_from_frame(self, frame):
+        """Gets a message from frame. If the message is composed of fragmented
+        frames and the frame is not the last fragmented frame, this method
+        returns None. The whole message will be returned when the last
+        fragmented frame is passed to this method.
+
+        Raises:
+            InvalidFrameException: when the frame doesn't match defragmentation
+                context, or the frame contains invalid data.
+        """
+
+        if frame.opcode == common.OPCODE_CONTINUATION:
+            if not self._received_fragments:
+                if frame.fin:
+                    raise InvalidFrameException(
+                        'Received a termination frame but fragmentation '
+                        'not started')
+                else:
+                    raise InvalidFrameException(
+                        'Received an intermediate frame but '
+                        'fragmentation not started')
+
+            if frame.fin:
+                # End of fragmentation frame
+                self._received_fragments.append(frame.payload)
+                message = ''.join(self._received_fragments)
+                self._received_fragments = []
+                return message
+            else:
+                # Intermediate frame
+                self._received_fragments.append(frame.payload)
+                return None
+        else:
+            if self._received_fragments:
+                if frame.fin:
+                    raise InvalidFrameException(
+                        'Received an unfragmented frame without '
+                        'terminating existing fragmentation')
+                else:
+                    raise InvalidFrameException(
+                        'New fragmentation started without terminating '
+                        'existing fragmentation')
+
+            if frame.fin:
+                # Unfragmented frame
+
+                self._original_opcode = frame.opcode
+                return frame.payload
+            else:
+                # Start of fragmentation frame
+
+                if (not self._options.allow_fragmented_control_frame and
+                    common.is_control_opcode(frame.opcode)):
+                    raise InvalidFrameException(
+                        'Control frames must not be fragmented')
+
+                self._original_opcode = frame.opcode
+                self._received_fragments.append(frame.payload)
+                return None
+
+    def _process_close_message(self, message):
+        """Processes close message.
+
+        Args:
+            message: close message.
+
+        Raises:
+            InvalidFrameException: when the message is invalid.
+        """
+
+        self._request.client_terminated = True
+
+        # Status code is optional. We can have status reason only if we
+        # have status code. Status reason can be empty string. So,
+        # allowed cases are
+        # - no application data: no code no reason
+        # - 2 octet of application data: has code but no reason
+        # - 3 or more octet of application data: both code and reason
+        if len(message) == 0:
+            self._logger.debug('Received close frame (empty body)')
+            self._request.ws_close_code = (
+                common.STATUS_NO_STATUS_RECEIVED)
+        elif len(message) == 1:
+            raise InvalidFrameException(
+                'If a close frame has status code, the length of '
+                'status code must be 2 octet')
+        elif len(message) >= 2:
+            self._request.ws_close_code = struct.unpack(
+                '!H', message[0:2])[0]
+            self._request.ws_close_reason = message[2:].decode(
+                'utf-8', 'replace')
+            self._logger.debug(
+                'Received close frame (code=%d, reason=%r)',
+                self._request.ws_close_code,
+                self._request.ws_close_reason)
+
+        # Drain junk data after the close frame if necessary.
+        self._drain_received_data()
+
+        if self._request.server_terminated:
+            self._logger.debug(
+                'Received ack for server-initiated closing handshake')
+            return
+
+        self._logger.debug(
+            'Received client-initiated closing handshake')
+
+        code = common.STATUS_NORMAL_CLOSURE
+        reason = ''
+        if hasattr(self._request, '_dispatcher'):
+            dispatcher = self._request._dispatcher
+            code, reason = dispatcher.passive_closing_handshake(
+                self._request)
+            if code is None and reason is not None and len(reason) > 0:
+                self._logger.warning(
+                    'Handler specified reason despite code being None')
+                reason = ''
+            if reason is None:
+                reason = ''
+        self._send_closing_handshake(code, reason)
+        self._logger.debug(
+            'Sent ack for client-initiated closing handshake '
+            '(code=%r, reason=%r)', code, reason)
+
+    def _process_ping_message(self, message):
+        """Processes ping message.
+
+        Args:
+            message: ping message.
+        """
+
+        try:
+            handler = self._request.on_ping_handler
+            if handler:
+                handler(self._request, message)
+                return
+        except AttributeError, e:
+            pass
+        self._send_pong(message)
+
+    def _process_pong_message(self, message):
+        """Processes pong message.
+
+        Args:
+            message: pong message.
+        """
+
+        # TODO(tyoshino): Add ping timeout handling.
+
+        inflight_pings = deque()
+
+        while True:
+            try:
+                expected_body = self._ping_queue.popleft()
+                if expected_body == message:
+                    # inflight_pings contains pings ignored by the
+                    # other peer. Just forget them.
+                    self._logger.debug(
+                        'Ping %r is acked (%d pings were ignored)',
+                        expected_body, len(inflight_pings))
+                    break
+                else:
+                    inflight_pings.append(expected_body)
+            except IndexError, e:
+                # The received pong was unsolicited pong. Keep the
+                # ping queue as is.
+                self._ping_queue = inflight_pings
+                self._logger.debug('Received a unsolicited pong')
+                break
+
+        try:
+            handler = self._request.on_pong_handler
+            if handler:
+                handler(self._request, message)
+        except AttributeError, e:
+            pass
+
+    def receive_message(self):
+        """Receive a WebSocket frame and return its payload as a text in
+        unicode or a binary in str.
+
+        Returns:
+            payload data of the frame
+            - as unicode instance if received text frame
+            - as str instance if received binary frame
+            or None iff received closing handshake.
+        Raises:
+            BadOperationException: when called on a client-terminated
+                connection.
+            ConnectionTerminatedException: when read returns empty
+                string.
+            InvalidFrameException: when the frame contains invalid
+                data.
+            UnsupportedFrameException: when the received frame has
+                flags, opcode we cannot handle. You can ignore this
+                exception and continue receiving the next frame.
+        """
+
+        if self._request.client_terminated:
+            raise BadOperationException(
+                'Requested receive_message after receiving a closing '
+                'handshake')
+
+        while True:
+            # mp_conn.read will block if no bytes are available.
+            # Timeout is controlled by TimeOut directive of Apache.
+
+            frame = self._receive_frame_as_frame_object()
+
+            # Check the constraint on the payload size for control frames
+            # before extension processes the frame.
+            # See also http://tools.ietf.org/html/rfc6455#section-5.5
+            if (common.is_control_opcode(frame.opcode) and
+                len(frame.payload) > 125):
+                raise InvalidFrameException(
+                    'Payload data size of control frames must be 125 bytes or '
+                    'less')
+
+            for frame_filter in self._options.incoming_frame_filters:
+                frame_filter.filter(frame)
+
+            if frame.rsv1 or frame.rsv2 or frame.rsv3:
+                raise UnsupportedFrameException(
+                    'Unsupported flag is set (rsv = %d%d%d)' %
+                    (frame.rsv1, frame.rsv2, frame.rsv3))
+
+            message = self._get_message_from_frame(frame)
+            if message is None:
+                continue
+
+            for message_filter in self._options.incoming_message_filters:
+                message = message_filter.filter(message)
+
+            if self._original_opcode == common.OPCODE_TEXT:
+                # The WebSocket protocol section 4.4 specifies that invalid
+                # characters must be replaced with U+fffd REPLACEMENT
+                # CHARACTER.
+                try:
+                    return message.decode('utf-8')
+                except UnicodeDecodeError, e:
+                    raise InvalidUTF8Exception(e)
+            elif self._original_opcode == common.OPCODE_BINARY:
+                return message
+            elif self._original_opcode == common.OPCODE_CLOSE:
+                self._process_close_message(message)
+                return None
+            elif self._original_opcode == common.OPCODE_PING:
+                self._process_ping_message(message)
+            elif self._original_opcode == common.OPCODE_PONG:
+                self._process_pong_message(message)
+            else:
+                raise UnsupportedFrameException(
+                    'Opcode %d is not supported' % self._original_opcode)
+
+    def _send_closing_handshake(self, code, reason):
+        body = create_closing_handshake_body(code, reason)
+        frame = create_close_frame(
+            body, mask=self._options.mask_send,
+            frame_filters=self._options.outgoing_frame_filters)
+
+        self._request.server_terminated = True
+
+        self._write(frame)
+
+    def close_connection(self, code=common.STATUS_NORMAL_CLOSURE, reason=''):
+        """Closes a WebSocket connection.
+
+        Args:
+            code: Status code for close frame. If code is None, a close
+                frame with empty body will be sent.
+            reason: string representing close reason.
+        Raises:
+            BadOperationException: when reason is specified with code None
+            or reason is not an instance of both str and unicode.
+        """
+
+        if self._request.server_terminated:
+            self._logger.debug(
+                'Requested close_connection but server is already terminated')
+            return
+
+        if code is None:
+            if reason is not None and len(reason) > 0:
+                raise BadOperationException(
+                    'close reason must not be specified if code is None')
+            reason = ''
+        else:
+            if not isinstance(reason, str) and not isinstance(reason, unicode):
+                raise BadOperationException(
+                    'close reason must be an instance of str or unicode')
+
+        self._send_closing_handshake(code, reason)
+        self._logger.debug(
+            'Sent server-initiated closing handshake (code=%r, reason=%r)',
+            code, reason)
+
+        if (code == common.STATUS_GOING_AWAY or
+            code == common.STATUS_PROTOCOL_ERROR):
+            # It doesn't make sense to wait for a close frame if the reason is
+            # protocol error or that the server is going away. For some of
+            # other reasons, it might not make sense to wait for a close frame,
+            # but it's not clear, yet.
+            return
+
+        # TODO(ukai): 2. wait until the /client terminated/ flag has been set,
+        # or until a server-defined timeout expires.
+        #
+        # For now, we expect receiving closing handshake right after sending
+        # out closing handshake.
+        message = self.receive_message()
+        if message is not None:
+            raise ConnectionTerminatedException(
+                'Didn\'t receive valid ack for closing handshake')
+        # TODO: 3. close the WebSocket connection.
+        # note: mod_python Connection (mp_conn) doesn't have close method.
+
+    def send_ping(self, body=''):
+        frame = create_ping_frame(
+            body,
+            self._options.mask_send,
+            self._options.outgoing_frame_filters)
+        self._write(frame)
+
+        self._ping_queue.append(body)
+
+    def _send_pong(self, body):
+        frame = create_pong_frame(
+            body,
+            self._options.mask_send,
+            self._options.outgoing_frame_filters)
+        self._write(frame)
+
+    def get_last_received_opcode(self):
+        """Returns the opcode of the WebSocket message which the last received
+        frame belongs to. The return value is valid iff immediately after
+        receive_message call.
+        """
+
+        return self._original_opcode
+
+    def _drain_received_data(self):
+        """Drains unread data in the receive buffer to avoid sending out TCP
+        RST packet. This is because when deflate-stream is enabled, some
+        DEFLATE block for flushing data may follow a close frame. If any data
+        remains in the receive buffer of a socket when the socket is closed,
+        it sends out TCP RST packet to the other peer.
+
+        Since mod_python's mp_conn object doesn't support non-blocking read,
+        we perform this only when pywebsocket is running in standalone mode.
+        """
+
+        # If self._options.deflate_stream is true, self._request is
+        # DeflateRequest, so we can get wrapped request object by
+        # self._request._request.
+        #
+        # Only _StandaloneRequest has _drain_received_data method.
+        if (self._options.deflate_stream and
+            ('_drain_received_data' in dir(self._request._request))):
+            self._request._request._drain_received_data()
+
+
+# vi:sts=4 sw=4 et
diff --git a/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/common.py b/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/common.py
new file mode 100644
index 0000000..2388379
--- /dev/null
+++ b/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/common.py
@@ -0,0 +1,307 @@
+# Copyright 2012, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""This file must not depend on any module specific to the WebSocket protocol.
+"""
+
+
+from mod_pywebsocket import http_header_util
+
+
+# Additional log level definitions.
+LOGLEVEL_FINE = 9
+
+# Constants indicating WebSocket protocol version.
+VERSION_HIXIE75 = -1
+VERSION_HYBI00 = 0
+VERSION_HYBI01 = 1
+VERSION_HYBI02 = 2
+VERSION_HYBI03 = 2
+VERSION_HYBI04 = 4
+VERSION_HYBI05 = 5
+VERSION_HYBI06 = 6
+VERSION_HYBI07 = 7
+VERSION_HYBI08 = 8
+VERSION_HYBI09 = 8
+VERSION_HYBI10 = 8
+VERSION_HYBI11 = 8
+VERSION_HYBI12 = 8
+VERSION_HYBI13 = 13
+VERSION_HYBI14 = 13
+VERSION_HYBI15 = 13
+VERSION_HYBI16 = 13
+VERSION_HYBI17 = 13
+
+# Constants indicating WebSocket protocol latest version.
+VERSION_HYBI_LATEST = VERSION_HYBI13
+
+# Port numbers
+DEFAULT_WEB_SOCKET_PORT = 80
+DEFAULT_WEB_SOCKET_SECURE_PORT = 443
+
+# Schemes
+WEB_SOCKET_SCHEME = 'ws'
+WEB_SOCKET_SECURE_SCHEME = 'wss'
+
+# Frame opcodes defined in the spec.
+OPCODE_CONTINUATION = 0x0
+OPCODE_TEXT = 0x1
+OPCODE_BINARY = 0x2
+OPCODE_CLOSE = 0x8
+OPCODE_PING = 0x9
+OPCODE_PONG = 0xa
+
+# UUIDs used by HyBi 04 and later opening handshake and frame masking.
+WEBSOCKET_ACCEPT_UUID = '258EAFA5-E914-47DA-95CA-C5AB0DC85B11'
+
+# Opening handshake header names and expected values.
+UPGRADE_HEADER = 'Upgrade'
+WEBSOCKET_UPGRADE_TYPE = 'websocket'
+WEBSOCKET_UPGRADE_TYPE_HIXIE75 = 'WebSocket'
+CONNECTION_HEADER = 'Connection'
+UPGRADE_CONNECTION_TYPE = 'Upgrade'
+HOST_HEADER = 'Host'
+ORIGIN_HEADER = 'Origin'
+SEC_WEBSOCKET_ORIGIN_HEADER = 'Sec-WebSocket-Origin'
+SEC_WEBSOCKET_KEY_HEADER = 'Sec-WebSocket-Key'
+SEC_WEBSOCKET_ACCEPT_HEADER = 'Sec-WebSocket-Accept'
+SEC_WEBSOCKET_VERSION_HEADER = 'Sec-WebSocket-Version'
+SEC_WEBSOCKET_PROTOCOL_HEADER = 'Sec-WebSocket-Protocol'
+SEC_WEBSOCKET_EXTENSIONS_HEADER = 'Sec-WebSocket-Extensions'
+SEC_WEBSOCKET_DRAFT_HEADER = 'Sec-WebSocket-Draft'
+SEC_WEBSOCKET_KEY1_HEADER = 'Sec-WebSocket-Key1'
+SEC_WEBSOCKET_KEY2_HEADER = 'Sec-WebSocket-Key2'
+SEC_WEBSOCKET_LOCATION_HEADER = 'Sec-WebSocket-Location'
+
+# Extensions
+DEFLATE_STREAM_EXTENSION = 'deflate-stream'
+DEFLATE_FRAME_EXTENSION = 'deflate-frame'
+PERFRAME_COMPRESSION_EXTENSION = 'perframe-compress'
+PERMESSAGE_COMPRESSION_EXTENSION = 'permessage-compress'
+X_WEBKIT_DEFLATE_FRAME_EXTENSION = 'x-webkit-deflate-frame'
+X_WEBKIT_PERMESSAGE_COMPRESSION_EXTENSION = 'x-webkit-permessage-compress'
+MUX_EXTENSION = 'mux_DO_NOT_USE'
+
+# Status codes
+# Code STATUS_NO_STATUS_RECEIVED, STATUS_ABNORMAL_CLOSURE, and
+# STATUS_TLS_HANDSHAKE are pseudo codes to indicate specific error cases.
+# Could not be used for codes in actual closing frames.
+# Application level errors must use codes in the range
+# STATUS_USER_REGISTERED_BASE to STATUS_USER_PRIVATE_MAX. The codes in the
+# range STATUS_USER_REGISTERED_BASE to STATUS_USER_REGISTERED_MAX are managed
+# by IANA. Usually application must define user protocol level errors in the
+# range STATUS_USER_PRIVATE_BASE to STATUS_USER_PRIVATE_MAX.
+STATUS_NORMAL_CLOSURE = 1000
+STATUS_GOING_AWAY = 1001
+STATUS_PROTOCOL_ERROR = 1002
+STATUS_UNSUPPORTED_DATA = 1003
+STATUS_NO_STATUS_RECEIVED = 1005
+STATUS_ABNORMAL_CLOSURE = 1006
+STATUS_INVALID_FRAME_PAYLOAD_DATA = 1007
+STATUS_POLICY_VIOLATION = 1008
+STATUS_MESSAGE_TOO_BIG = 1009
+STATUS_MANDATORY_EXTENSION = 1010
+STATUS_INTERNAL_ENDPOINT_ERROR = 1011
+STATUS_TLS_HANDSHAKE = 1015
+STATUS_USER_REGISTERED_BASE = 3000
+STATUS_USER_REGISTERED_MAX = 3999
+STATUS_USER_PRIVATE_BASE = 4000
+STATUS_USER_PRIVATE_MAX = 4999
+# Following definitions are aliases to keep compatibility. Applications must
+# not use these obsoleted definitions anymore.
+STATUS_NORMAL = STATUS_NORMAL_CLOSURE
+STATUS_UNSUPPORTED = STATUS_UNSUPPORTED_DATA
+STATUS_CODE_NOT_AVAILABLE = STATUS_NO_STATUS_RECEIVED
+STATUS_ABNORMAL_CLOSE = STATUS_ABNORMAL_CLOSURE
+STATUS_INVALID_FRAME_PAYLOAD = STATUS_INVALID_FRAME_PAYLOAD_DATA
+STATUS_MANDATORY_EXT = STATUS_MANDATORY_EXTENSION
+
+# HTTP status codes
+HTTP_STATUS_BAD_REQUEST = 400
+HTTP_STATUS_FORBIDDEN = 403
+HTTP_STATUS_NOT_FOUND = 404
+
+
+def is_control_opcode(opcode):
+    return (opcode >> 3) == 1
+
+
+class ExtensionParameter(object):
+    """Holds information about an extension which is exchanged on extension
+    negotiation in opening handshake.
+    """
+
+    def __init__(self, name):
+        self._name = name
+        # TODO(tyoshino): Change the data structure to more efficient one such
+        # as dict when the spec changes to say like
+        # - Parameter names must be unique
+        # - The order of parameters is not significant
+        self._parameters = []
+
+    def name(self):
+        return self._name
+
+    def add_parameter(self, name, value):
+        self._parameters.append((name, value))
+
+    def get_parameters(self):
+        return self._parameters
+
+    def get_parameter_names(self):
+        return [name for name, unused_value in self._parameters]
+
+    def has_parameter(self, name):
+        for param_name, param_value in self._parameters:
+            if param_name == name:
+                return True
+        return False
+
+    def get_parameter_value(self, name):
+        for param_name, param_value in self._parameters:
+            if param_name == name:
+                return param_value
+
+
+class ExtensionParsingException(Exception):
+    def __init__(self, name):
+        super(ExtensionParsingException, self).__init__(name)
+
+
+def _parse_extension_param(state, definition, allow_quoted_string):
+    param_name = http_header_util.consume_token(state)
+
+    if param_name is None:
+        raise ExtensionParsingException('No valid parameter name found')
+
+    http_header_util.consume_lwses(state)
+
+    if not http_header_util.consume_string(state, '='):
+        definition.add_parameter(param_name, None)
+        return
+
+    http_header_util.consume_lwses(state)
+
+    if allow_quoted_string:
+        # TODO(toyoshim): Add code to validate that parsed param_value is token
+        param_value = http_header_util.consume_token_or_quoted_string(state)
+    else:
+        param_value = http_header_util.consume_token(state)
+    if param_value is None:
+        raise ExtensionParsingException(
+            'No valid parameter value found on the right-hand side of '
+            'parameter %r' % param_name)
+
+    definition.add_parameter(param_name, param_value)
+
+
+def _parse_extension(state, allow_quoted_string):
+    extension_token = http_header_util.consume_token(state)
+    if extension_token is None:
+        return None
+
+    extension = ExtensionParameter(extension_token)
+
+    while True:
+        http_header_util.consume_lwses(state)
+
+        if not http_header_util.consume_string(state, ';'):
+            break
+
+        http_header_util.consume_lwses(state)
+
+        try:
+            _parse_extension_param(state, extension, allow_quoted_string)
+        except ExtensionParsingException, e:
+            raise ExtensionParsingException(
+                'Failed to parse parameter for %r (%r)' %
+                (extension_token, e))
+
+    return extension
+
+
+def parse_extensions(data, allow_quoted_string=False):
+    """Parses Sec-WebSocket-Extensions header value returns a list of
+    ExtensionParameter objects.
+
+    Leading LWSes must be trimmed.
+    """
+
+    state = http_header_util.ParsingState(data)
+
+    extension_list = []
+    while True:
+        extension = _parse_extension(state, allow_quoted_string)
+        if extension is not None:
+            extension_list.append(extension)
+
+        http_header_util.consume_lwses(state)
+
+        if http_header_util.peek(state) is None:
+            break
+
+        if not http_header_util.consume_string(state, ','):
+            raise ExtensionParsingException(
+                'Failed to parse Sec-WebSocket-Extensions header: '
+                'Expected a comma but found %r' %
+                http_header_util.peek(state))
+
+        http_header_util.consume_lwses(state)
+
+    if len(extension_list) == 0:
+        raise ExtensionParsingException(
+            'No valid extension entry found')
+
+    return extension_list
+
+
+def format_extension(extension):
+    """Formats an ExtensionParameter object."""
+
+    formatted_params = [extension.name()]
+    for param_name, param_value in extension.get_parameters():
+        if param_value is None:
+            formatted_params.append(param_name)
+        else:
+            quoted_value = http_header_util.quote_if_necessary(param_value)
+            formatted_params.append('%s=%s' % (param_name, quoted_value))
+    return '; '.join(formatted_params)
+
+
+def format_extensions(extension_list):
+    """Formats a list of ExtensionParameter objects."""
+
+    formatted_extension_list = []
+    for extension in extension_list:
+        formatted_extension_list.append(format_extension(extension))
+    return ', '.join(formatted_extension_list)
+
+
+# vi:sts=4 sw=4 et
diff --git a/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/dispatch.py b/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/dispatch.py
new file mode 100644
index 0000000..25905f1
--- /dev/null
+++ b/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/dispatch.py
@@ -0,0 +1,387 @@
+# Copyright 2012, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""Dispatch WebSocket request.
+"""
+
+
+import logging
+import os
+import re
+
+from mod_pywebsocket import common
+from mod_pywebsocket import handshake
+from mod_pywebsocket import msgutil
+from mod_pywebsocket import mux
+from mod_pywebsocket import stream
+from mod_pywebsocket import util
+
+
+_SOURCE_PATH_PATTERN = re.compile(r'(?i)_wsh\.py$')
+_SOURCE_SUFFIX = '_wsh.py'
+_DO_EXTRA_HANDSHAKE_HANDLER_NAME = 'web_socket_do_extra_handshake'
+_TRANSFER_DATA_HANDLER_NAME = 'web_socket_transfer_data'
+_PASSIVE_CLOSING_HANDSHAKE_HANDLER_NAME = (
+    'web_socket_passive_closing_handshake')
+
+
+class DispatchException(Exception):
+    """Exception in dispatching WebSocket request."""
+
+    def __init__(self, name, status=common.HTTP_STATUS_NOT_FOUND):
+        super(DispatchException, self).__init__(name)
+        self.status = status
+
+
+def _default_passive_closing_handshake_handler(request):
+    """Default web_socket_passive_closing_handshake handler."""
+
+    return common.STATUS_NORMAL_CLOSURE, ''
+
+
+def _normalize_path(path):
+    """Normalize path.
+
+    Args:
+        path: the path to normalize.
+
+    Path is converted to the absolute path.
+    The input path can use either '\\' or '/' as the separator.
+    The normalized path always uses '/' regardless of the platform.
+    """
+
+    path = path.replace('\\', os.path.sep)
+    path = os.path.realpath(path)
+    path = path.replace('\\', '/')
+    return path
+
+
+def _create_path_to_resource_converter(base_dir):
+    """Returns a function that converts the path of a WebSocket handler source
+    file to a resource string by removing the path to the base directory from
+    its head, removing _SOURCE_SUFFIX from its tail, and replacing path
+    separators in it with '/'.
+
+    Args:
+        base_dir: the path to the base directory.
+    """
+
+    base_dir = _normalize_path(base_dir)
+
+    base_len = len(base_dir)
+    suffix_len = len(_SOURCE_SUFFIX)
+
+    def converter(path):
+        if not path.endswith(_SOURCE_SUFFIX):
+            return None
+        # _normalize_path must not be used because resolving symlink breaks
+        # following path check.
+        path = path.replace('\\', '/')
+        if not path.startswith(base_dir):
+            return None
+        return path[base_len:-suffix_len]
+
+    return converter
+
+
+def _enumerate_handler_file_paths(directory):
+    """Returns a generator that enumerates WebSocket Handler source file names
+    in the given directory.
+    """
+
+    for root, unused_dirs, files in os.walk(directory):
+        for base in files:
+            path = os.path.join(root, base)
+            if _SOURCE_PATH_PATTERN.search(path):
+                yield path
+
+
+class _HandlerSuite(object):
+    """A handler suite holder class."""
+
+    def __init__(self, do_extra_handshake, transfer_data,
+                 passive_closing_handshake):
+        self.do_extra_handshake = do_extra_handshake
+        self.transfer_data = transfer_data
+        self.passive_closing_handshake = passive_closing_handshake
+
+
+def _source_handler_file(handler_definition):
+    """Source a handler definition string.
+
+    Args:
+        handler_definition: a string containing Python statements that define
+                            handler functions.
+    """
+
+    global_dic = {}
+    try:
+        exec handler_definition in global_dic
+    except Exception:
+        raise DispatchException('Error in sourcing handler:' +
+                                util.get_stack_trace())
+    passive_closing_handshake_handler = None
+    try:
+        passive_closing_handshake_handler = _extract_handler(
+            global_dic, _PASSIVE_CLOSING_HANDSHAKE_HANDLER_NAME)
+    except Exception:
+        passive_closing_handshake_handler = (
+            _default_passive_closing_handshake_handler)
+    return _HandlerSuite(
+        _extract_handler(global_dic, _DO_EXTRA_HANDSHAKE_HANDLER_NAME),
+        _extract_handler(global_dic, _TRANSFER_DATA_HANDLER_NAME),
+        passive_closing_handshake_handler)
+
+
+def _extract_handler(dic, name):
+    """Extracts a callable with the specified name from the given dictionary
+    dic.
+    """
+
+    if name not in dic:
+        raise DispatchException('%s is not defined.' % name)
+    handler = dic[name]
+    if not callable(handler):
+        raise DispatchException('%s is not callable.' % name)
+    return handler
+
+
+class Dispatcher(object):
+    """Dispatches WebSocket requests.
+
+    This class maintains a map from resource name to handlers.
+    """
+
+    def __init__(
+        self, root_dir, scan_dir=None,
+        allow_handlers_outside_root_dir=True):
+        """Construct an instance.
+
+        Args:
+            root_dir: The directory where handler definition files are
+                      placed.
+            scan_dir: The directory where handler definition files are
+                      searched. scan_dir must be a directory under root_dir,
+                      including root_dir itself.  If scan_dir is None,
+                      root_dir is used as scan_dir. scan_dir can be useful
+                      in saving scan time when root_dir contains many
+                      subdirectories.
+            allow_handlers_outside_root_dir: Scans handler files even if their
+                      canonical path is not under root_dir.
+        """
+
+        self._logger = util.get_class_logger(self)
+
+        self._handler_suite_map = {}
+        self._source_warnings = []
+        if scan_dir is None:
+            scan_dir = root_dir
+        if not os.path.realpath(scan_dir).startswith(
+                os.path.realpath(root_dir)):
+            raise DispatchException('scan_dir:%s must be a directory under '
+                                    'root_dir:%s.' % (scan_dir, root_dir))
+        self._source_handler_files_in_dir(
+            root_dir, scan_dir, allow_handlers_outside_root_dir)
+
+    def add_resource_path_alias(self,
+                                alias_resource_path, existing_resource_path):
+        """Add resource path alias.
+
+        Once added, request to alias_resource_path would be handled by
+        handler registered for existing_resource_path.
+
+        Args:
+            alias_resource_path: alias resource path
+            existing_resource_path: existing resource path
+        """
+        try:
+            handler_suite = self._handler_suite_map[existing_resource_path]
+            self._handler_suite_map[alias_resource_path] = handler_suite
+        except KeyError:
+            raise DispatchException('No handler for: %r' %
+                                    existing_resource_path)
+
+    def source_warnings(self):
+        """Return warnings in sourcing handlers."""
+
+        return self._source_warnings
+
+    def do_extra_handshake(self, request):
+        """Do extra checking in WebSocket handshake.
+
+        Select a handler based on request.uri and call its
+        web_socket_do_extra_handshake function.
+
+        Args:
+            request: mod_python request.
+
+        Raises:
+            DispatchException: when handler was not found
+            AbortedByUserException: when user handler abort connection
+            HandshakeException: when opening handshake failed
+        """
+
+        handler_suite = self.get_handler_suite(request.ws_resource)
+        if handler_suite is None:
+            raise DispatchException('No handler for: %r' % request.ws_resource)
+        do_extra_handshake_ = handler_suite.do_extra_handshake
+        try:
+            do_extra_handshake_(request)
+        except handshake.AbortedByUserException, e:
+            raise
+        except Exception, e:
+            util.prepend_message_to_exception(
+                    '%s raised exception for %s: ' % (
+                            _DO_EXTRA_HANDSHAKE_HANDLER_NAME,
+                            request.ws_resource),
+                    e)
+            raise handshake.HandshakeException(e, common.HTTP_STATUS_FORBIDDEN)
+
+    def transfer_data(self, request):
+        """Let a handler transfer_data with a WebSocket client.
+
+        Select a handler based on request.ws_resource and call its
+        web_socket_transfer_data function.
+
+        Args:
+            request: mod_python request.
+
+        Raises:
+            DispatchException: when handler was not found
+            AbortedByUserException: when user handler abort connection
+        """
+
+        # TODO(tyoshino): Terminate underlying TCP connection if possible.
+        try:
+            if mux.use_mux(request):
+                mux.start(request, self)
+            else:
+                handler_suite = self.get_handler_suite(request.ws_resource)
+                if handler_suite is None:
+                    raise DispatchException('No handler for: %r' %
+                                            request.ws_resource)
+                transfer_data_ = handler_suite.transfer_data
+                transfer_data_(request)
+
+            if not request.server_terminated:
+                request.ws_stream.close_connection()
+        # Catch non-critical exceptions the handler didn't handle.
+        except handshake.AbortedByUserException, e:
+            self._logger.debug('%s', e)
+            raise
+        except msgutil.BadOperationException, e:
+            self._logger.debug('%s', e)
+            request.ws_stream.close_connection(common.STATUS_ABNORMAL_CLOSURE)
+        except msgutil.InvalidFrameException, e:
+            # InvalidFrameException must be caught before
+            # ConnectionTerminatedException that catches InvalidFrameException.
+            self._logger.debug('%s', e)
+            request.ws_stream.close_connection(common.STATUS_PROTOCOL_ERROR)
+        except msgutil.UnsupportedFrameException, e:
+            self._logger.debug('%s', e)
+            request.ws_stream.close_connection(common.STATUS_UNSUPPORTED_DATA)
+        except stream.InvalidUTF8Exception, e:
+            self._logger.debug('%s', e)
+            request.ws_stream.close_connection(
+                common.STATUS_INVALID_FRAME_PAYLOAD_DATA)
+        except msgutil.ConnectionTerminatedException, e:
+            self._logger.debug('%s', e)
+        except Exception, e:
+            util.prepend_message_to_exception(
+                '%s raised exception for %s: ' % (
+                    _TRANSFER_DATA_HANDLER_NAME, request.ws_resource),
+                e)
+            raise
+
+    def passive_closing_handshake(self, request):
+        """Prepare code and reason for responding client initiated closing
+        handshake.
+        """
+
+        handler_suite = self.get_handler_suite(request.ws_resource)
+        if handler_suite is None:
+            return _default_passive_closing_handshake_handler(request)
+        return handler_suite.passive_closing_handshake(request)
+
+    def get_handler_suite(self, resource):
+        """Retrieves two handlers (one for extra handshake processing, and one
+        for data transfer) for the given request as a HandlerSuite object.
+        """
+
+        fragment = None
+        if '#' in resource:
+            resource, fragment = resource.split('#', 1)
+        if '?' in resource:
+            resource = resource.split('?', 1)[0]
+        handler_suite = self._handler_suite_map.get(resource)
+        if handler_suite and fragment:
+            raise DispatchException('Fragment identifiers MUST NOT be used on '
+                                    'WebSocket URIs',
+                                    common.HTTP_STATUS_BAD_REQUEST)
+        return handler_suite
+
+    def _source_handler_files_in_dir(
+        self, root_dir, scan_dir, allow_handlers_outside_root_dir):
+        """Source all the handler source files in the scan_dir directory.
+
+        The resource path is determined relative to root_dir.
+        """
+
+        # We build a map from resource to handler code assuming that there's
+        # only one path from root_dir to scan_dir and it can be obtained by
+        # comparing realpath of them.
+
+        # Here we cannot use abspath. See
+        # https://bugs.webkit.org/show_bug.cgi?id=31603
+
+        convert = _create_path_to_resource_converter(root_dir)
+        scan_realpath = os.path.realpath(scan_dir)
+        root_realpath = os.path.realpath(root_dir)
+        for path in _enumerate_handler_file_paths(scan_realpath):
+            if (not allow_handlers_outside_root_dir and
+                (not os.path.realpath(path).startswith(root_realpath))):
+                self._logger.debug(
+                    'Canonical path of %s is not under root directory' %
+                    path)
+                continue
+            try:
+                handler_suite = _source_handler_file(open(path).read())
+            except DispatchException, e:
+                self._source_warnings.append('%s: %s' % (path, e))
+                continue
+            resource = convert(path)
+            if resource is None:
+                self._logger.debug(
+                    'Path to resource conversion on %s failed' % path)
+            else:
+                self._handler_suite_map[convert(path)] = handler_suite
+
+
+# vi:sts=4 sw=4 et
diff --git a/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/extensions.py b/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/extensions.py
new file mode 100644
index 0000000..03dbf9e
--- /dev/null
+++ b/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/extensions.py
@@ -0,0 +1,727 @@
+# Copyright 2012, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+from mod_pywebsocket import common
+from mod_pywebsocket import util
+from mod_pywebsocket.http_header_util import quote_if_necessary
+
+
+_available_processors = {}
+
+
+class ExtensionProcessorInterface(object):
+
+    def name(self):
+        return None
+
+    def get_extension_response(self):
+        return None
+
+    def setup_stream_options(self, stream_options):
+        pass
+
+
+class DeflateStreamExtensionProcessor(ExtensionProcessorInterface):
+    """WebSocket DEFLATE stream extension processor.
+
+    Specification:
+    Section 9.2.1 in
+    http://tools.ietf.org/html/draft-ietf-hybi-thewebsocketprotocol-10
+    """
+
+    def __init__(self, request):
+        self._logger = util.get_class_logger(self)
+
+        self._request = request
+
+    def name(self):
+        return common.DEFLATE_STREAM_EXTENSION
+
+    def get_extension_response(self):
+        if len(self._request.get_parameter_names()) != 0:
+            return None
+
+        self._logger.debug(
+            'Enable %s extension', common.DEFLATE_STREAM_EXTENSION)
+
+        return common.ExtensionParameter(common.DEFLATE_STREAM_EXTENSION)
+
+    def setup_stream_options(self, stream_options):
+        stream_options.deflate_stream = True
+
+
+_available_processors[common.DEFLATE_STREAM_EXTENSION] = (
+    DeflateStreamExtensionProcessor)
+
+
+def _log_compression_ratio(logger, original_bytes, total_original_bytes,
+                           filtered_bytes, total_filtered_bytes):
+    # Print inf when ratio is not available.
+    ratio = float('inf')
+    average_ratio = float('inf')
+    if original_bytes != 0:
+        ratio = float(filtered_bytes) / original_bytes
+    if total_original_bytes != 0:
+        average_ratio = (
+            float(total_filtered_bytes) / total_original_bytes)
+    logger.debug('Outgoing compress ratio: %f (average: %f)' %
+        (ratio, average_ratio))
+
+
+def _log_decompression_ratio(logger, received_bytes, total_received_bytes,
+                             filtered_bytes, total_filtered_bytes):
+    # Print inf when ratio is not available.
+    ratio = float('inf')
+    average_ratio = float('inf')
+    if received_bytes != 0:
+        ratio = float(received_bytes) / filtered_bytes
+    if total_filtered_bytes != 0:
+        average_ratio = (
+            float(total_received_bytes) / total_filtered_bytes)
+    logger.debug('Incoming compress ratio: %f (average: %f)' %
+        (ratio, average_ratio))
+
+
+class DeflateFrameExtensionProcessor(ExtensionProcessorInterface):
+    """WebSocket Per-frame DEFLATE extension processor.
+
+    Specification:
+    http://tools.ietf.org/html/draft-tyoshino-hybi-websocket-perframe-deflate
+    """
+
+    _WINDOW_BITS_PARAM = 'max_window_bits'
+    _NO_CONTEXT_TAKEOVER_PARAM = 'no_context_takeover'
+
+    def __init__(self, request):
+        self._logger = util.get_class_logger(self)
+
+        self._request = request
+
+        self._response_window_bits = None
+        self._response_no_context_takeover = False
+        self._bfinal = False
+
+        # Counters for statistics.
+
+        # Total number of outgoing bytes supplied to this filter.
+        self._total_outgoing_payload_bytes = 0
+        # Total number of bytes sent to the network after applying this filter.
+        self._total_filtered_outgoing_payload_bytes = 0
+
+        # Total number of bytes received from the network.
+        self._total_incoming_payload_bytes = 0
+        # Total number of incoming bytes obtained after applying this filter.
+        self._total_filtered_incoming_payload_bytes = 0
+
+    def name(self):
+        return common.DEFLATE_FRAME_EXTENSION
+
+    def get_extension_response(self):
+        # Any unknown parameter will be just ignored.
+
+        window_bits = self._request.get_parameter_value(
+            self._WINDOW_BITS_PARAM)
+        no_context_takeover = self._request.has_parameter(
+            self._NO_CONTEXT_TAKEOVER_PARAM)
+        if (no_context_takeover and
+            self._request.get_parameter_value(
+                self._NO_CONTEXT_TAKEOVER_PARAM) is not None):
+            return None
+
+        if window_bits is not None:
+            try:
+                window_bits = int(window_bits)
+            except ValueError, e:
+                return None
+            if window_bits < 8 or window_bits > 15:
+                return None
+
+        self._deflater = util._RFC1979Deflater(
+            window_bits, no_context_takeover)
+
+        self._inflater = util._RFC1979Inflater()
+
+        self._compress_outgoing = True
+
+        response = common.ExtensionParameter(self._request.name())
+
+        if self._response_window_bits is not None:
+            response.add_parameter(
+                self._WINDOW_BITS_PARAM, str(self._response_window_bits))
+        if self._response_no_context_takeover:
+            response.add_parameter(
+                self._NO_CONTEXT_TAKEOVER_PARAM, None)
+
+        self._logger.debug(
+            'Enable %s extension ('
+            'request: window_bits=%s; no_context_takeover=%r, '
+            'response: window_wbits=%s; no_context_takeover=%r)' %
+            (self._request.name(),
+             window_bits,
+             no_context_takeover,
+             self._response_window_bits,
+             self._response_no_context_takeover))
+
+        return response
+
+    def setup_stream_options(self, stream_options):
+
+        class _OutgoingFilter(object):
+
+            def __init__(self, parent):
+                self._parent = parent
+
+            def filter(self, frame):
+                self._parent._outgoing_filter(frame)
+
+        class _IncomingFilter(object):
+
+            def __init__(self, parent):
+                self._parent = parent
+
+            def filter(self, frame):
+                self._parent._incoming_filter(frame)
+
+        stream_options.outgoing_frame_filters.append(
+            _OutgoingFilter(self))
+        stream_options.incoming_frame_filters.insert(
+            0, _IncomingFilter(self))
+
+    def set_response_window_bits(self, value):
+        self._response_window_bits = value
+
+    def set_response_no_context_takeover(self, value):
+        self._response_no_context_takeover = value
+
+    def set_bfinal(self, value):
+        self._bfinal = value
+
+    def enable_outgoing_compression(self):
+        self._compress_outgoing = True
+
+    def disable_outgoing_compression(self):
+        self._compress_outgoing = False
+
+    def _outgoing_filter(self, frame):
+        """Transform outgoing frames. This method is called only by
+        an _OutgoingFilter instance.
+        """
+
+        original_payload_size = len(frame.payload)
+        self._total_outgoing_payload_bytes += original_payload_size
+
+        if (not self._compress_outgoing or
+            common.is_control_opcode(frame.opcode)):
+            self._total_filtered_outgoing_payload_bytes += (
+                original_payload_size)
+            return
+
+        frame.payload = self._deflater.filter(
+            frame.payload, bfinal=self._bfinal)
+        frame.rsv1 = 1
+
+        filtered_payload_size = len(frame.payload)
+        self._total_filtered_outgoing_payload_bytes += filtered_payload_size
+
+        _log_compression_ratio(self._logger, original_payload_size,
+                               self._total_outgoing_payload_bytes,
+                               filtered_payload_size,
+                               self._total_filtered_outgoing_payload_bytes)
+
+    def _incoming_filter(self, frame):
+        """Transform incoming frames. This method is called only by
+        an _IncomingFilter instance.
+        """
+
+        received_payload_size = len(frame.payload)
+        self._total_incoming_payload_bytes += received_payload_size
+
+        if frame.rsv1 != 1 or common.is_control_opcode(frame.opcode):
+            self._total_filtered_incoming_payload_bytes += (
+                received_payload_size)
+            return
+
+        frame.payload = self._inflater.filter(frame.payload)
+        frame.rsv1 = 0
+
+        filtered_payload_size = len(frame.payload)
+        self._total_filtered_incoming_payload_bytes += filtered_payload_size
+
+        _log_decompression_ratio(self._logger, received_payload_size,
+                                 self._total_incoming_payload_bytes,
+                                 filtered_payload_size,
+                                 self._total_filtered_incoming_payload_bytes)
+
+
+_available_processors[common.DEFLATE_FRAME_EXTENSION] = (
+    DeflateFrameExtensionProcessor)
+
+
+# Adding vendor-prefixed deflate-frame extension.
+# TODO(bashi): Remove this after WebKit stops using vendor prefix.
+_available_processors[common.X_WEBKIT_DEFLATE_FRAME_EXTENSION] = (
+    DeflateFrameExtensionProcessor)
+
+
+def _parse_compression_method(data):
+    """Parses the value of "method" extension parameter."""
+
+    return common.parse_extensions(data, allow_quoted_string=True)
+
+
+def _create_accepted_method_desc(method_name, method_params):
+    """Creates accepted-method-desc from given method name and parameters"""
+
+    extension = common.ExtensionParameter(method_name)
+    for name, value in method_params:
+        extension.add_parameter(name, value)
+    return common.format_extension(extension)
+
+
+class CompressionExtensionProcessorBase(ExtensionProcessorInterface):
+    """Base class for Per-frame and Per-message compression extension."""
+
+    _METHOD_PARAM = 'method'
+
+    def __init__(self, request):
+        self._logger = util.get_class_logger(self)
+        self._request = request
+        self._compression_method_name = None
+        self._compression_processor = None
+        self._compression_processor_hook = None
+
+    def name(self):
+        return ''
+
+    def _lookup_compression_processor(self, method_desc):
+        return None
+
+    def _get_compression_processor_response(self):
+        """Looks up the compression processor based on the self._request and
+           returns the compression processor's response.
+        """
+
+        method_list = self._request.get_parameter_value(self._METHOD_PARAM)
+        if method_list is None:
+            return None
+        methods = _parse_compression_method(method_list)
+        if methods is None:
+            return None
+        comression_processor = None
+        # The current implementation tries only the first method that matches
+        # supported algorithm. Following methods aren't tried even if the
+        # first one is rejected.
+        # TODO(bashi): Need to clarify this behavior.
+        for method_desc in methods:
+            compression_processor = self._lookup_compression_processor(
+                method_desc)
+            if compression_processor is not None:
+                self._compression_method_name = method_desc.name()
+                break
+        if compression_processor is None:
+            return None
+
+        if self._compression_processor_hook:
+            self._compression_processor_hook(compression_processor)
+
+        processor_response = compression_processor.get_extension_response()
+        if processor_response is None:
+            return None
+        self._compression_processor = compression_processor
+        return processor_response
+
+    def get_extension_response(self):
+        processor_response = self._get_compression_processor_response()
+        if processor_response is None:
+            return None
+
+        response = common.ExtensionParameter(self._request.name())
+        accepted_method_desc = _create_accepted_method_desc(
+                                   self._compression_method_name,
+                                   processor_response.get_parameters())
+        response.add_parameter(self._METHOD_PARAM, accepted_method_desc)
+        self._logger.debug(
+            'Enable %s extension (method: %s)' %
+            (self._request.name(), self._compression_method_name))
+        return response
+
+    def setup_stream_options(self, stream_options):
+        if self._compression_processor is None:
+            return
+        self._compression_processor.setup_stream_options(stream_options)
+
+    def set_compression_processor_hook(self, hook):
+        self._compression_processor_hook = hook
+
+    def get_compression_processor(self):
+        return self._compression_processor
+
+
+class PerFrameCompressionExtensionProcessor(CompressionExtensionProcessorBase):
+    """WebSocket Per-frame compression extension processor.
+
+    Specification:
+    http://tools.ietf.org/html/draft-ietf-hybi-websocket-perframe-compression
+    """
+
+    _DEFLATE_METHOD = 'deflate'
+
+    def __init__(self, request):
+        CompressionExtensionProcessorBase.__init__(self, request)
+
+    def name(self):
+        return common.PERFRAME_COMPRESSION_EXTENSION
+
+    def _lookup_compression_processor(self, method_desc):
+        if method_desc.name() == self._DEFLATE_METHOD:
+            return DeflateFrameExtensionProcessor(method_desc)
+        return None
+
+
+_available_processors[common.PERFRAME_COMPRESSION_EXTENSION] = (
+    PerFrameCompressionExtensionProcessor)
+
+
+class DeflateMessageProcessor(ExtensionProcessorInterface):
+    """Per-message deflate processor."""
+
+    _S2C_MAX_WINDOW_BITS_PARAM = 's2c_max_window_bits'
+    _S2C_NO_CONTEXT_TAKEOVER_PARAM = 's2c_no_context_takeover'
+    _C2S_MAX_WINDOW_BITS_PARAM = 'c2s_max_window_bits'
+    _C2S_NO_CONTEXT_TAKEOVER_PARAM = 'c2s_no_context_takeover'
+
+    def __init__(self, request):
+        self._request = request
+        self._logger = util.get_class_logger(self)
+
+        self._c2s_max_window_bits = None
+        self._c2s_no_context_takeover = False
+        self._bfinal = False
+
+        self._compress_outgoing_enabled = False
+
+        # True if a message is fragmented and compression is ongoing.
+        self._compress_ongoing = False
+
+        # Counters for statistics.
+
+        # Total number of outgoing bytes supplied to this filter.
+        self._total_outgoing_payload_bytes = 0
+        # Total number of bytes sent to the network after applying this filter.
+        self._total_filtered_outgoing_payload_bytes = 0
+
+        # Total number of bytes received from the network.
+        self._total_incoming_payload_bytes = 0
+        # Total number of incoming bytes obtained after applying this filter.
+        self._total_filtered_incoming_payload_bytes = 0
+
+    def name(self):
+        return 'deflate'
+
+    def get_extension_response(self):
+        # Any unknown parameter will be just ignored.
+
+        s2c_max_window_bits = self._request.get_parameter_value(
+            self._S2C_MAX_WINDOW_BITS_PARAM)
+        if s2c_max_window_bits is not None:
+            try:
+                s2c_max_window_bits = int(s2c_max_window_bits)
+            except ValueError, e:
+                return None
+            if s2c_max_window_bits < 8 or s2c_max_window_bits > 15:
+                return None
+
+        s2c_no_context_takeover = self._request.has_parameter(
+            self._S2C_NO_CONTEXT_TAKEOVER_PARAM)
+        if (s2c_no_context_takeover and
+            self._request.get_parameter_value(
+                self._S2C_NO_CONTEXT_TAKEOVER_PARAM) is not None):
+            return None
+
+        self._deflater = util._RFC1979Deflater(
+            s2c_max_window_bits, s2c_no_context_takeover)
+
+        self._inflater = util._RFC1979Inflater()
+
+        self._compress_outgoing_enabled = True
+
+        response = common.ExtensionParameter(self._request.name())
+
+        if s2c_max_window_bits is not None:
+            response.add_parameter(
+                self._S2C_MAX_WINDOW_BITS_PARAM, str(s2c_max_window_bits))
+
+        if s2c_no_context_takeover:
+            response.add_parameter(
+                self._S2C_NO_CONTEXT_TAKEOVER_PARAM, None)
+
+        if self._c2s_max_window_bits is not None:
+            response.add_parameter(
+                self._C2S_MAX_WINDOW_BITS_PARAM,
+                str(self._c2s_max_window_bits))
+        if self._c2s_no_context_takeover:
+            response.add_parameter(
+                self._C2S_NO_CONTEXT_TAKEOVER_PARAM, None)
+
+        self._logger.debug(
+            'Enable %s extension ('
+            'request: s2c_max_window_bits=%s; s2c_no_context_takeover=%r, '
+            'response: c2s_max_window_bits=%s; c2s_no_context_takeover=%r)' %
+            (self._request.name(),
+             s2c_max_window_bits,
+             s2c_no_context_takeover,
+             self._c2s_max_window_bits,
+             self._c2s_no_context_takeover))
+
+        return response
+
+    def setup_stream_options(self, stream_options):
+        class _OutgoingMessageFilter(object):
+
+            def __init__(self, parent):
+                self._parent = parent
+
+            def filter(self, message, end=True, binary=False):
+                return self._parent._process_outgoing_message(
+                    message, end, binary)
+
+        class _IncomingMessageFilter(object):
+
+            def __init__(self, parent):
+                self._parent = parent
+                self._decompress_next_message = False
+
+            def decompress_next_message(self):
+                self._decompress_next_message = True
+
+            def filter(self, message):
+                message = self._parent._process_incoming_message(
+                    message, self._decompress_next_message)
+                self._decompress_next_message = False
+                return message
+
+        self._outgoing_message_filter = _OutgoingMessageFilter(self)
+        self._incoming_message_filter = _IncomingMessageFilter(self)
+        stream_options.outgoing_message_filters.append(
+            self._outgoing_message_filter)
+        stream_options.incoming_message_filters.append(
+            self._incoming_message_filter)
+
+        class _OutgoingFrameFilter(object):
+
+            def __init__(self, parent):
+                self._parent = parent
+                self._set_compression_bit = False
+
+            def set_compression_bit(self):
+                self._set_compression_bit = True
+
+            def filter(self, frame):
+                self._parent._process_outgoing_frame(
+                    frame, self._set_compression_bit)
+                self._set_compression_bit = False
+
+        class _IncomingFrameFilter(object):
+
+            def __init__(self, parent):
+                self._parent = parent
+
+            def filter(self, frame):
+                self._parent._process_incoming_frame(frame)
+
+        self._outgoing_frame_filter = _OutgoingFrameFilter(self)
+        self._incoming_frame_filter = _IncomingFrameFilter(self)
+        stream_options.outgoing_frame_filters.append(
+            self._outgoing_frame_filter)
+        stream_options.incoming_frame_filters.append(
+            self._incoming_frame_filter)
+
+        stream_options.encode_text_message_to_utf8 = False
+
+    def set_c2s_max_window_bits(self, value):
+        self._c2s_max_window_bits = value
+
+    def set_c2s_no_context_takeover(self, value):
+        self._c2s_no_context_takeover = value
+
+    def set_bfinal(self, value):
+        self._bfinal = value
+
+    def enable_outgoing_compression(self):
+        self._compress_outgoing_enabled = True
+
+    def disable_outgoing_compression(self):
+        self._compress_outgoing_enabled = False
+
+    def _process_incoming_message(self, message, decompress):
+        if not decompress:
+            return message
+
+        received_payload_size = len(message)
+        self._total_incoming_payload_bytes += received_payload_size
+
+        message = self._inflater.filter(message)
+
+        filtered_payload_size = len(message)
+        self._total_filtered_incoming_payload_bytes += filtered_payload_size
+
+        _log_decompression_ratio(self._logger, received_payload_size,
+                                 self._total_incoming_payload_bytes,
+                                 filtered_payload_size,
+                                 self._total_filtered_incoming_payload_bytes)
+
+        return message
+
+    def _process_outgoing_message(self, message, end, binary):
+        if not binary:
+            message = message.encode('utf-8')
+
+        if not self._compress_outgoing_enabled:
+            return message
+
+        original_payload_size = len(message)
+        self._total_outgoing_payload_bytes += original_payload_size
+
+        message = self._deflater.filter(
+            message, flush=end, bfinal=self._bfinal)
+
+        filtered_payload_size = len(message)
+        self._total_filtered_outgoing_payload_bytes += filtered_payload_size
+
+        _log_compression_ratio(self._logger, original_payload_size,
+                               self._total_outgoing_payload_bytes,
+                               filtered_payload_size,
+                               self._total_filtered_outgoing_payload_bytes)
+
+        if not self._compress_ongoing:
+            self._outgoing_frame_filter.set_compression_bit()
+        self._compress_ongoing = not end
+        return message
+
+    def _process_incoming_frame(self, frame):
+        if frame.rsv1 == 1 and not common.is_control_opcode(frame.opcode):
+            self._incoming_message_filter.decompress_next_message()
+            frame.rsv1 = 0
+
+    def _process_outgoing_frame(self, frame, compression_bit):
+        if (not compression_bit or
+            common.is_control_opcode(frame.opcode)):
+            return
+
+        frame.rsv1 = 1
+
+
+class PerMessageCompressionExtensionProcessor(
+    CompressionExtensionProcessorBase):
+    """WebSocket Per-message compression extension processor.
+
+    Specification:
+    http://tools.ietf.org/html/draft-ietf-hybi-permessage-compression
+    """
+
+    _DEFLATE_METHOD = 'deflate'
+
+    def __init__(self, request):
+        CompressionExtensionProcessorBase.__init__(self, request)
+
+    def name(self):
+        return common.PERMESSAGE_COMPRESSION_EXTENSION
+
+    def _lookup_compression_processor(self, method_desc):
+        if method_desc.name() == self._DEFLATE_METHOD:
+            return DeflateMessageProcessor(method_desc)
+        return None
+
+
+_available_processors[common.PERMESSAGE_COMPRESSION_EXTENSION] = (
+    PerMessageCompressionExtensionProcessor)
+
+
+# Adding vendor-prefixed permessage-compress extension.
+# TODO(bashi): Remove this after WebKit stops using vendor prefix.
+_available_processors[common.X_WEBKIT_PERMESSAGE_COMPRESSION_EXTENSION] = (
+    PerMessageCompressionExtensionProcessor)
+
+
+class MuxExtensionProcessor(ExtensionProcessorInterface):
+    """WebSocket multiplexing extension processor."""
+
+    _QUOTA_PARAM = 'quota'
+
+    def __init__(self, request):
+        self._request = request
+
+    def name(self):
+        return common.MUX_EXTENSION
+
+    def get_extension_response(self, ws_request,
+                               logical_channel_extensions):
+        # Mux extension cannot be used after extensions that depend on
+        # frame boundary, extension data field, or any reserved bits
+        # which are attributed to each frame.
+        for extension in logical_channel_extensions:
+            name = extension.name()
+            if (name == common.PERFRAME_COMPRESSION_EXTENSION or
+                name == common.DEFLATE_FRAME_EXTENSION or
+                name == common.X_WEBKIT_DEFLATE_FRAME_EXTENSION):
+                return None
+
+        quota = self._request.get_parameter_value(self._QUOTA_PARAM)
+        if quota is None:
+            ws_request.mux_quota = 0
+        else:
+            try:
+                quota = int(quota)
+            except ValueError, e:
+                return None
+            if quota < 0 or quota >= 2 ** 32:
+                return None
+            ws_request.mux_quota = quota
+
+        ws_request.mux = True
+        ws_request.mux_extensions = logical_channel_extensions
+        return common.ExtensionParameter(common.MUX_EXTENSION)
+
+    def setup_stream_options(self, stream_options):
+        pass
+
+
+_available_processors[common.MUX_EXTENSION] = MuxExtensionProcessor
+
+
+def get_extension_processor(extension_request):
+    global _available_processors
+    processor_class = _available_processors.get(extension_request.name())
+    if processor_class is None:
+        return None
+    return processor_class(extension_request)
+
+
+# vi:sts=4 sw=4 et
diff --git a/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/handshake/__init__.py b/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/handshake/__init__.py
new file mode 100644
index 0000000..194f6b3
--- /dev/null
+++ b/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/handshake/__init__.py
@@ -0,0 +1,110 @@
+# Copyright 2011, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""WebSocket opening handshake processor. This class try to apply available
+opening handshake processors for each protocol version until a connection is
+successfully established.
+"""
+
+
+import logging
+
+from mod_pywebsocket import common
+from mod_pywebsocket.handshake import hybi00
+from mod_pywebsocket.handshake import hybi
+# Export AbortedByUserException, HandshakeException, and VersionException
+# symbol from this module.
+from mod_pywebsocket.handshake._base import AbortedByUserException
+from mod_pywebsocket.handshake._base import HandshakeException
+from mod_pywebsocket.handshake._base import VersionException
+
+
+_LOGGER = logging.getLogger(__name__)
+
+
+def do_handshake(request, dispatcher, allowDraft75=False, strict=False):
+    """Performs WebSocket handshake.
+
+    Args:
+        request: mod_python request.
+        dispatcher: Dispatcher (dispatch.Dispatcher).
+        allowDraft75: obsolete argument. ignored.
+        strict: obsolete argument. ignored.
+
+    Handshaker will add attributes such as ws_resource in performing
+    handshake.
+    """
+
+    _LOGGER.debug('Client\'s opening handshake resource: %r', request.uri)
+    # To print mimetools.Message as escaped one-line string, we converts
+    # headers_in to dict object. Without conversion, if we use %r, it just
+    # prints the type and address, and if we use %s, it prints the original
+    # header string as multiple lines.
+    #
+    # Both mimetools.Message and MpTable_Type of mod_python can be
+    # converted to dict.
+    #
+    # mimetools.Message.__str__ returns the original header string.
+    # dict(mimetools.Message object) returns the map from header names to
+    # header values. While MpTable_Type doesn't have such __str__ but just
+    # __repr__ which formats itself as well as dictionary object.
+    _LOGGER.debug(
+        'Client\'s opening handshake headers: %r', dict(request.headers_in))
+
+    handshakers = []
+    handshakers.append(
+        ('RFC 6455', hybi.Handshaker(request, dispatcher)))
+    handshakers.append(
+        ('HyBi 00', hybi00.Handshaker(request, dispatcher)))
+
+    for name, handshaker in handshakers:
+        _LOGGER.debug('Trying protocol version %s', name)
+        try:
+            handshaker.do_handshake()
+            _LOGGER.info('Established (%s protocol)', name)
+            return
+        except HandshakeException, e:
+            _LOGGER.debug(
+                'Failed to complete opening handshake as %s protocol: %r',
+                name, e)
+            if e.status:
+                raise e
+        except AbortedByUserException, e:
+            raise
+        except VersionException, e:
+            raise
+
+    # TODO(toyoshim): Add a test to cover the case all handshakers fail.
+    raise HandshakeException(
+        'Failed to complete opening handshake for all available protocols',
+        status=common.HTTP_STATUS_BAD_REQUEST)
+
+
+# vi:sts=4 sw=4 et
diff --git a/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/handshake/_base.py b/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/handshake/_base.py
new file mode 100644
index 0000000..e5c94ca
--- /dev/null
+++ b/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/handshake/_base.py
@@ -0,0 +1,226 @@
+# Copyright 2012, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""Common functions and exceptions used by WebSocket opening handshake
+processors.
+"""
+
+
+from mod_pywebsocket import common
+from mod_pywebsocket import http_header_util
+
+
+class AbortedByUserException(Exception):
+    """Exception for aborting a connection intentionally.
+
+    If this exception is raised in do_extra_handshake handler, the connection
+    will be abandoned. No other WebSocket or HTTP(S) handler will be invoked.
+
+    If this exception is raised in transfer_data_handler, the connection will
+    be closed without closing handshake. No other WebSocket or HTTP(S) handler
+    will be invoked.
+    """
+
+    pass
+
+
+class HandshakeException(Exception):
+    """This exception will be raised when an error occurred while processing
+    WebSocket initial handshake.
+    """
+
+    def __init__(self, name, status=None):
+        super(HandshakeException, self).__init__(name)
+        self.status = status
+
+
+class VersionException(Exception):
+    """This exception will be raised when a version of client request does not
+    match with version the server supports.
+    """
+
+    def __init__(self, name, supported_versions=''):
+        """Construct an instance.
+
+        Args:
+            supported_version: a str object to show supported hybi versions.
+                               (e.g. '8, 13')
+        """
+        super(VersionException, self).__init__(name)
+        self.supported_versions = supported_versions
+
+
+def get_default_port(is_secure):
+    if is_secure:
+        return common.DEFAULT_WEB_SOCKET_SECURE_PORT
+    else:
+        return common.DEFAULT_WEB_SOCKET_PORT
+
+
+def validate_subprotocol(subprotocol, hixie):
+    """Validate a value in the Sec-WebSocket-Protocol field.
+
+    See
+    - RFC 6455: Section 4.1., 4.2.2., and 4.3.
+    - HyBi 00: Section 4.1. Opening handshake
+
+    Args:
+         hixie: if True, checks if characters in subprotocol are in range
+                between U+0020 and U+007E. It's required by HyBi 00 but not by
+                RFC 6455.
+    """
+
+    if not subprotocol:
+        raise HandshakeException('Invalid subprotocol name: empty')
+    if hixie:
+        # Parameter should be in the range U+0020 to U+007E.
+        for c in subprotocol:
+            if not 0x20 <= ord(c) <= 0x7e:
+                raise HandshakeException(
+                    'Illegal character in subprotocol name: %r' % c)
+    else:
+        # Parameter should be encoded HTTP token.
+        state = http_header_util.ParsingState(subprotocol)
+        token = http_header_util.consume_token(state)
+        rest = http_header_util.peek(state)
+        # If |rest| is not None, |subprotocol| is not one token or invalid. If
+        # |rest| is None, |token| must not be None because |subprotocol| is
+        # concatenation of |token| and |rest| and is not None.
+        if rest is not None:
+            raise HandshakeException('Invalid non-token string in subprotocol '
+                                     'name: %r' % rest)
+
+
+def parse_host_header(request):
+    fields = request.headers_in['Host'].split(':', 1)
+    if len(fields) == 1:
+        return fields[0], get_default_port(request.is_https())
+    try:
+        return fields[0], int(fields[1])
+    except ValueError, e:
+        raise HandshakeException('Invalid port number format: %r' % e)
+
+
+def format_header(name, value):
+    return '%s: %s\r\n' % (name, value)
+
+
+def build_location(request):
+    """Build WebSocket location for request."""
+    location_parts = []
+    if request.is_https():
+        location_parts.append(common.WEB_SOCKET_SECURE_SCHEME)
+    else:
+        location_parts.append(common.WEB_SOCKET_SCHEME)
+    location_parts.append('://')
+    host, port = parse_host_header(request)
+    connection_port = request.connection.local_addr[1]
+    if port != connection_port:
+        raise HandshakeException('Header/connection port mismatch: %d/%d' %
+                                 (port, connection_port))
+    location_parts.append(host)
+    if (port != get_default_port(request.is_https())):
+        location_parts.append(':')
+        location_parts.append(str(port))
+    location_parts.append(request.uri)
+    return ''.join(location_parts)
+
+
+def get_mandatory_header(request, key):
+    value = request.headers_in.get(key)
+    if value is None:
+        raise HandshakeException('Header %s is not defined' % key)
+    return value
+
+
+def validate_mandatory_header(request, key, expected_value, fail_status=None):
+    value = get_mandatory_header(request, key)
+
+    if value.lower() != expected_value.lower():
+        raise HandshakeException(
+            'Expected %r for header %s but found %r (case-insensitive)' %
+            (expected_value, key, value), status=fail_status)
+
+
+def check_request_line(request):
+    # 5.1 1. The three character UTF-8 string "GET".
+    # 5.1 2. A UTF-8-encoded U+0020 SPACE character (0x20 byte).
+    if request.method != 'GET':
+        raise HandshakeException('Method is not GET: %r' % request.method)
+
+    if request.protocol != 'HTTP/1.1':
+        raise HandshakeException('Version is not HTTP/1.1: %r' %
+                                 request.protocol)
+
+
+def check_header_lines(request, mandatory_headers):
+    check_request_line(request)
+
+    # The expected field names, and the meaning of their corresponding
+    # values, are as follows.
+    #  |Upgrade| and |Connection|
+    for key, expected_value in mandatory_headers:
+        validate_mandatory_header(request, key, expected_value)
+
+
+def parse_token_list(data):
+    """Parses a header value which follows 1#token and returns parsed elements
+    as a list of strings.
+
+    Leading LWSes must be trimmed.
+    """
+
+    state = http_header_util.ParsingState(data)
+
+    token_list = []
+
+    while True:
+        token = http_header_util.consume_token(state)
+        if token is not None:
+            token_list.append(token)
+
+        http_header_util.consume_lwses(state)
+
+        if http_header_util.peek(state) is None:
+            break
+
+        if not http_header_util.consume_string(state, ','):
+            raise HandshakeException(
+                'Expected a comma but found %r' % http_header_util.peek(state))
+
+        http_header_util.consume_lwses(state)
+
+    if len(token_list) == 0:
+        raise HandshakeException('No valid token found')
+
+    return token_list
+
+
+# vi:sts=4 sw=4 et
diff --git a/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/handshake/hybi.py b/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/handshake/hybi.py
new file mode 100644
index 0000000..fc0e2a0
--- /dev/null
+++ b/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/handshake/hybi.py
@@ -0,0 +1,404 @@
+# Copyright 2012, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""This file provides the opening handshake processor for the WebSocket
+protocol (RFC 6455).
+
+Specification:
+http://tools.ietf.org/html/rfc6455
+"""
+
+
+# Note: request.connection.write is used in this module, even though mod_python
+# document says that it should be used only in connection handlers.
+# Unfortunately, we have no other options. For example, request.write is not
+# suitable because it doesn't allow direct raw bytes writing.
+
+
+import base64
+import logging
+import os
+import re
+
+from mod_pywebsocket import common
+from mod_pywebsocket.extensions import get_extension_processor
+from mod_pywebsocket.handshake._base import check_request_line
+from mod_pywebsocket.handshake._base import format_header
+from mod_pywebsocket.handshake._base import get_mandatory_header
+from mod_pywebsocket.handshake._base import HandshakeException
+from mod_pywebsocket.handshake._base import parse_token_list
+from mod_pywebsocket.handshake._base import validate_mandatory_header
+from mod_pywebsocket.handshake._base import validate_subprotocol
+from mod_pywebsocket.handshake._base import VersionException
+from mod_pywebsocket.stream import Stream
+from mod_pywebsocket.stream import StreamOptions
+from mod_pywebsocket import util
+
+
+# Used to validate the value in the Sec-WebSocket-Key header strictly. RFC 4648
+# disallows non-zero padding, so the character right before == must be any of
+# A, Q, g and w.
+_SEC_WEBSOCKET_KEY_REGEX = re.compile('^[+/0-9A-Za-z]{21}[AQgw]==$')
+
+# Defining aliases for values used frequently.
+_VERSION_HYBI08 = common.VERSION_HYBI08
+_VERSION_HYBI08_STRING = str(_VERSION_HYBI08)
+_VERSION_LATEST = common.VERSION_HYBI_LATEST
+_VERSION_LATEST_STRING = str(_VERSION_LATEST)
+_SUPPORTED_VERSIONS = [
+    _VERSION_LATEST,
+    _VERSION_HYBI08,
+]
+
+
+def compute_accept(key):
+    """Computes value for the Sec-WebSocket-Accept header from value of the
+    Sec-WebSocket-Key header.
+    """
+
+    accept_binary = util.sha1_hash(
+        key + common.WEBSOCKET_ACCEPT_UUID).digest()
+    accept = base64.b64encode(accept_binary)
+
+    return (accept, accept_binary)
+
+
+class Handshaker(object):
+    """Opening handshake processor for the WebSocket protocol (RFC 6455)."""
+
+    def __init__(self, request, dispatcher):
+        """Construct an instance.
+
+        Args:
+            request: mod_python request.
+            dispatcher: Dispatcher (dispatch.Dispatcher).
+
+        Handshaker will add attributes such as ws_resource during handshake.
+        """
+
+        self._logger = util.get_class_logger(self)
+
+        self._request = request
+        self._dispatcher = dispatcher
+
+    def _validate_connection_header(self):
+        connection = get_mandatory_header(
+            self._request, common.CONNECTION_HEADER)
+
+        try:
+            connection_tokens = parse_token_list(connection)
+        except HandshakeException, e:
+            raise HandshakeException(
+                'Failed to parse %s: %s' % (common.CONNECTION_HEADER, e))
+
+        connection_is_valid = False
+        for token in connection_tokens:
+            if token.lower() == common.UPGRADE_CONNECTION_TYPE.lower():
+                connection_is_valid = True
+                break
+        if not connection_is_valid:
+            raise HandshakeException(
+                '%s header doesn\'t contain "%s"' %
+                (common.CONNECTION_HEADER, common.UPGRADE_CONNECTION_TYPE))
+
+    def do_handshake(self):
+        self._request.ws_close_code = None
+        self._request.ws_close_reason = None
+
+        # Parsing.
+
+        check_request_line(self._request)
+
+        validate_mandatory_header(
+            self._request,
+            common.UPGRADE_HEADER,
+            common.WEBSOCKET_UPGRADE_TYPE)
+
+        self._validate_connection_header()
+
+        self._request.ws_resource = self._request.uri
+
+        unused_host = get_mandatory_header(self._request, common.HOST_HEADER)
+
+        self._request.ws_version = self._check_version()
+
+        # This handshake must be based on latest hybi. We are responsible to
+        # fallback to HTTP on handshake failure as latest hybi handshake
+        # specifies.
+        try:
+            self._get_origin()
+            self._set_protocol()
+            self._parse_extensions()
+
+            # Key validation, response generation.
+
+            key = self._get_key()
+            (accept, accept_binary) = compute_accept(key)
+            self._logger.debug(
+                '%s: %r (%s)',
+                common.SEC_WEBSOCKET_ACCEPT_HEADER,
+                accept,
+                util.hexify(accept_binary))
+
+            self._logger.debug('Protocol version is RFC 6455')
+
+            # Setup extension processors.
+
+            processors = []
+            if self._request.ws_requested_extensions is not None:
+                for extension_request in self._request.ws_requested_extensions:
+                    processor = get_extension_processor(extension_request)
+                    # Unknown extension requests are just ignored.
+                    if processor is not None:
+                        processors.append(processor)
+            self._request.ws_extension_processors = processors
+
+            # Extra handshake handler may modify/remove processors.
+            self._dispatcher.do_extra_handshake(self._request)
+            processors = filter(lambda processor: processor is not None,
+                                self._request.ws_extension_processors)
+
+            accepted_extensions = []
+
+            # We need to take care of mux extension here. Extensions that
+            # are placed before mux should be applied to logical channels.
+            mux_index = -1
+            for i, processor in enumerate(processors):
+                if processor.name() == common.MUX_EXTENSION:
+                    mux_index = i
+                    break
+            if mux_index >= 0:
+                mux_processor = processors[mux_index]
+                logical_channel_processors = processors[:mux_index]
+                processors = processors[mux_index+1:]
+
+                for processor in logical_channel_processors:
+                    extension_response = processor.get_extension_response()
+                    if extension_response is None:
+                        # Rejected.
+                        continue
+                    accepted_extensions.append(extension_response)
+                # Pass a shallow copy of accepted_extensions as extensions for
+                # logical channels.
+                mux_response = mux_processor.get_extension_response(
+                    self._request, accepted_extensions[:])
+                if mux_response is not None:
+                    accepted_extensions.append(mux_response)
+
+            stream_options = StreamOptions()
+
+            # When there is mux extension, here, |processors| contain only
+            # prosessors for extensions placed after mux.
+            for processor in processors:
+
+                extension_response = processor.get_extension_response()
+                if extension_response is None:
+                    # Rejected.
+                    continue
+
+                accepted_extensions.append(extension_response)
+
+                processor.setup_stream_options(stream_options)
+
+            if len(accepted_extensions) > 0:
+                self._request.ws_extensions = accepted_extensions
+                self._logger.debug(
+                    'Extensions accepted: %r',
+                    map(common.ExtensionParameter.name, accepted_extensions))
+            else:
+                self._request.ws_extensions = None
+
+            self._request.ws_stream = self._create_stream(stream_options)
+
+            if self._request.ws_requested_protocols is not None:
+                if self._request.ws_protocol is None:
+                    raise HandshakeException(
+                        'do_extra_handshake must choose one subprotocol from '
+                        'ws_requested_protocols and set it to ws_protocol')
+                validate_subprotocol(self._request.ws_protocol, hixie=False)
+
+                self._logger.debug(
+                    'Subprotocol accepted: %r',
+                    self._request.ws_protocol)
+            else:
+                if self._request.ws_protocol is not None:
+                    raise HandshakeException(
+                        'ws_protocol must be None when the client didn\'t '
+                        'request any subprotocol')
+
+            self._send_handshake(accept)
+        except HandshakeException, e:
+            if not e.status:
+                # Fallback to 400 bad request by default.
+                e.status = common.HTTP_STATUS_BAD_REQUEST
+            raise e
+
+    def _get_origin(self):
+        if self._request.ws_version is _VERSION_HYBI08:
+            origin_header = common.SEC_WEBSOCKET_ORIGIN_HEADER
+        else:
+            origin_header = common.ORIGIN_HEADER
+        origin = self._request.headers_in.get(origin_header)
+        if origin is None:
+            self._logger.debug('Client request does not have origin header')
+        self._request.ws_origin = origin
+
+    def _check_version(self):
+        version = get_mandatory_header(self._request,
+                                       common.SEC_WEBSOCKET_VERSION_HEADER)
+        if version == _VERSION_HYBI08_STRING:
+            return _VERSION_HYBI08
+        if version == _VERSION_LATEST_STRING:
+            return _VERSION_LATEST
+
+        if version.find(',') >= 0:
+            raise HandshakeException(
+                'Multiple versions (%r) are not allowed for header %s' %
+                (version, common.SEC_WEBSOCKET_VERSION_HEADER),
+                status=common.HTTP_STATUS_BAD_REQUEST)
+        raise VersionException(
+            'Unsupported version %r for header %s' %
+            (version, common.SEC_WEBSOCKET_VERSION_HEADER),
+            supported_versions=', '.join(map(str, _SUPPORTED_VERSIONS)))
+
+    def _set_protocol(self):
+        self._request.ws_protocol = None
+
+        protocol_header = self._request.headers_in.get(
+            common.SEC_WEBSOCKET_PROTOCOL_HEADER)
+
+        if protocol_header is None:
+            self._request.ws_requested_protocols = None
+            return
+
+        self._request.ws_requested_protocols = parse_token_list(
+            protocol_header)
+        self._logger.debug('Subprotocols requested: %r',
+                           self._request.ws_requested_protocols)
+
+    def _parse_extensions(self):
+        extensions_header = self._request.headers_in.get(
+            common.SEC_WEBSOCKET_EXTENSIONS_HEADER)
+        if not extensions_header:
+            self._request.ws_requested_extensions = None
+            return
+
+        if self._request.ws_version is common.VERSION_HYBI08:
+            allow_quoted_string=False
+        else:
+            allow_quoted_string=True
+        try:
+            self._request.ws_requested_extensions = common.parse_extensions(
+                extensions_header, allow_quoted_string=allow_quoted_string)
+        except common.ExtensionParsingException, e:
+            raise HandshakeException(
+                'Failed to parse Sec-WebSocket-Extensions header: %r' % e)
+
+        self._logger.debug(
+            'Extensions requested: %r',
+            map(common.ExtensionParameter.name,
+                self._request.ws_requested_extensions))
+
+    def _validate_key(self, key):
+        if key.find(',') >= 0:
+            raise HandshakeException('Request has multiple %s header lines or '
+                                     'contains illegal character \',\': %r' %
+                                     (common.SEC_WEBSOCKET_KEY_HEADER, key))
+
+        # Validate
+        key_is_valid = False
+        try:
+            # Validate key by quick regex match before parsing by base64
+            # module. Because base64 module skips invalid characters, we have
+            # to do this in advance to make this server strictly reject illegal
+            # keys.
+            if _SEC_WEBSOCKET_KEY_REGEX.match(key):
+                decoded_key = base64.b64decode(key)
+                if len(decoded_key) == 16:
+                    key_is_valid = True
+        except TypeError, e:
+            pass
+
+        if not key_is_valid:
+            raise HandshakeException(
+                'Illegal value for header %s: %r' %
+                (common.SEC_WEBSOCKET_KEY_HEADER, key))
+
+        return decoded_key
+
+    def _get_key(self):
+        key = get_mandatory_header(
+            self._request, common.SEC_WEBSOCKET_KEY_HEADER)
+
+        decoded_key = self._validate_key(key)
+
+        self._logger.debug(
+            '%s: %r (%s)',
+            common.SEC_WEBSOCKET_KEY_HEADER,
+            key,
+            util.hexify(decoded_key))
+
+        return key
+
+    def _create_stream(self, stream_options):
+        return Stream(self._request, stream_options)
+
+    def _create_handshake_response(self, accept):
+        response = []
+
+        response.append('HTTP/1.1 101 Switching Protocols\r\n')
+
+        response.append(format_header(
+            common.UPGRADE_HEADER, common.WEBSOCKET_UPGRADE_TYPE))
+        response.append(format_header(
+            common.CONNECTION_HEADER, common.UPGRADE_CONNECTION_TYPE))
+        response.append(format_header(
+            common.SEC_WEBSOCKET_ACCEPT_HEADER, accept))
+        if self._request.ws_protocol is not None:
+            response.append(format_header(
+                common.SEC_WEBSOCKET_PROTOCOL_HEADER,
+                self._request.ws_protocol))
+        if (self._request.ws_extensions is not None and
+            len(self._request.ws_extensions) != 0):
+            response.append(format_header(
+                common.SEC_WEBSOCKET_EXTENSIONS_HEADER,
+                common.format_extensions(self._request.ws_extensions)))
+        response.append('\r\n')
+
+        return ''.join(response)
+
+    def _send_handshake(self, accept):
+        raw_response = self._create_handshake_response(accept)
+        self._request.connection.write(raw_response)
+        self._logger.debug('Sent server\'s opening handshake: %r',
+                           raw_response)
+
+
+# vi:sts=4 sw=4 et
diff --git a/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/handshake/hybi00.py b/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/handshake/hybi00.py
new file mode 100644
index 0000000..cc6f8dc
--- /dev/null
+++ b/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/handshake/hybi00.py
@@ -0,0 +1,242 @@
+# Copyright 2011, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""This file provides the opening handshake processor for the WebSocket
+protocol version HyBi 00.
+
+Specification:
+http://tools.ietf.org/html/draft-ietf-hybi-thewebsocketprotocol-00
+"""
+
+
+# Note: request.connection.write/read are used in this module, even though
+# mod_python document says that they should be used only in connection
+# handlers. Unfortunately, we have no other options. For example,
+# request.write/read are not suitable because they don't allow direct raw bytes
+# writing/reading.
+
+
+import logging
+import re
+import struct
+
+from mod_pywebsocket import common
+from mod_pywebsocket.stream import StreamHixie75
+from mod_pywebsocket import util
+from mod_pywebsocket.handshake._base import HandshakeException
+from mod_pywebsocket.handshake._base import build_location
+from mod_pywebsocket.handshake._base import check_header_lines
+from mod_pywebsocket.handshake._base import format_header
+from mod_pywebsocket.handshake._base import get_mandatory_header
+from mod_pywebsocket.handshake._base import validate_subprotocol
+
+
+_MANDATORY_HEADERS = [
+    # key, expected value or None
+    [common.UPGRADE_HEADER, common.WEBSOCKET_UPGRADE_TYPE_HIXIE75],
+    [common.CONNECTION_HEADER, common.UPGRADE_CONNECTION_TYPE],
+]
+
+
+class Handshaker(object):
+    """Opening handshake processor for the WebSocket protocol version HyBi 00.
+    """
+
+    def __init__(self, request, dispatcher):
+        """Construct an instance.
+
+        Args:
+            request: mod_python request.
+            dispatcher: Dispatcher (dispatch.Dispatcher).
+
+        Handshaker will add attributes such as ws_resource in performing
+        handshake.
+        """
+
+        self._logger = util.get_class_logger(self)
+
+        self._request = request
+        self._dispatcher = dispatcher
+
+    def do_handshake(self):
+        """Perform WebSocket Handshake.
+
+        On _request, we set
+            ws_resource, ws_protocol, ws_location, ws_origin, ws_challenge,
+            ws_challenge_md5: WebSocket handshake information.
+            ws_stream: Frame generation/parsing class.
+            ws_version: Protocol version.
+
+        Raises:
+            HandshakeException: when any error happened in parsing the opening
+                                handshake request.
+        """
+
+        # 5.1 Reading the client's opening handshake.
+        # dispatcher sets it in self._request.
+        check_header_lines(self._request, _MANDATORY_HEADERS)
+        self._set_resource()
+        self._set_subprotocol()
+        self._set_location()
+        self._set_origin()
+        self._set_challenge_response()
+        self._set_protocol_version()
+
+        self._dispatcher.do_extra_handshake(self._request)
+
+        self._send_handshake()
+
+    def _set_resource(self):
+        self._request.ws_resource = self._request.uri
+
+    def _set_subprotocol(self):
+        # |Sec-WebSocket-Protocol|
+        subprotocol = self._request.headers_in.get(
+            common.SEC_WEBSOCKET_PROTOCOL_HEADER)
+        if subprotocol is not None:
+            validate_subprotocol(subprotocol, hixie=True)
+        self._request.ws_protocol = subprotocol
+
+    def _set_location(self):
+        # |Host|
+        host = self._request.headers_in.get(common.HOST_HEADER)
+        if host is not None:
+            self._request.ws_location = build_location(self._request)
+        # TODO(ukai): check host is this host.
+
+    def _set_origin(self):
+        # |Origin|
+        origin = self._request.headers_in.get(common.ORIGIN_HEADER)
+        if origin is not None:
+            self._request.ws_origin = origin
+
+    def _set_protocol_version(self):
+        # |Sec-WebSocket-Draft|
+        draft = self._request.headers_in.get(common.SEC_WEBSOCKET_DRAFT_HEADER)
+        if draft is not None and draft != '0':
+            raise HandshakeException('Illegal value for %s: %s' %
+                                     (common.SEC_WEBSOCKET_DRAFT_HEADER,
+                                      draft))
+
+        self._logger.debug('Protocol version is HyBi 00')
+        self._request.ws_version = common.VERSION_HYBI00
+        self._request.ws_stream = StreamHixie75(self._request, True)
+
+    def _set_challenge_response(self):
+        # 5.2 4-8.
+        self._request.ws_challenge = self._get_challenge()
+        # 5.2 9. let /response/ be the MD5 finterprint of /challenge/
+        self._request.ws_challenge_md5 = util.md5_hash(
+            self._request.ws_challenge).digest()
+        self._logger.debug(
+            'Challenge: %r (%s)',
+            self._request.ws_challenge,
+            util.hexify(self._request.ws_challenge))
+        self._logger.debug(
+            'Challenge response: %r (%s)',
+            self._request.ws_challenge_md5,
+            util.hexify(self._request.ws_challenge_md5))
+
+    def _get_key_value(self, key_field):
+        key_value = get_mandatory_header(self._request, key_field)
+
+        self._logger.debug('%s: %r', key_field, key_value)
+
+        # 5.2 4. let /key-number_n/ be the digits (characters in the range
+        # U+0030 DIGIT ZERO (0) to U+0039 DIGIT NINE (9)) in /key_n/,
+        # interpreted as a base ten integer, ignoring all other characters
+        # in /key_n/.
+        try:
+            key_number = int(re.sub("\\D", "", key_value))
+        except:
+            raise HandshakeException('%s field contains no digit' % key_field)
+        # 5.2 5. let /spaces_n/ be the number of U+0020 SPACE characters
+        # in /key_n/.
+        spaces = re.subn(" ", "", key_value)[1]
+        if spaces == 0:
+            raise HandshakeException('%s field contains no space' % key_field)
+
+        self._logger.debug(
+            '%s: Key-number is %d and number of spaces is %d',
+            key_field, key_number, spaces)
+
+        # 5.2 6. if /key-number_n/ is not an integral multiple of /spaces_n/
+        # then abort the WebSocket connection.
+        if key_number % spaces != 0:
+            raise HandshakeException(
+                '%s: Key-number (%d) is not an integral multiple of spaces '
+                '(%d)' % (key_field, key_number, spaces))
+        # 5.2 7. let /part_n/ be /key-number_n/ divided by /spaces_n/.
+        part = key_number / spaces
+        self._logger.debug('%s: Part is %d', key_field, part)
+        return part
+
+    def _get_challenge(self):
+        # 5.2 4-7.
+        key1 = self._get_key_value(common.SEC_WEBSOCKET_KEY1_HEADER)
+        key2 = self._get_key_value(common.SEC_WEBSOCKET_KEY2_HEADER)
+        # 5.2 8. let /challenge/ be the concatenation of /part_1/,
+        challenge = ''
+        challenge += struct.pack('!I', key1)  # network byteorder int
+        challenge += struct.pack('!I', key2)  # network byteorder int
+        challenge += self._request.connection.read(8)
+        return challenge
+
+    def _send_handshake(self):
+        response = []
+
+        # 5.2 10. send the following line.
+        response.append('HTTP/1.1 101 WebSocket Protocol Handshake\r\n')
+
+        # 5.2 11. send the following fields to the client.
+        response.append(format_header(
+            common.UPGRADE_HEADER, common.WEBSOCKET_UPGRADE_TYPE_HIXIE75))
+        response.append(format_header(
+            common.CONNECTION_HEADER, common.UPGRADE_CONNECTION_TYPE))
+        response.append(format_header(
+            common.SEC_WEBSOCKET_LOCATION_HEADER, self._request.ws_location))
+        response.append(format_header(
+            common.SEC_WEBSOCKET_ORIGIN_HEADER, self._request.ws_origin))
+        if self._request.ws_protocol:
+            response.append(format_header(
+                common.SEC_WEBSOCKET_PROTOCOL_HEADER,
+                self._request.ws_protocol))
+        # 5.2 12. send two bytes 0x0D 0x0A.
+        response.append('\r\n')
+        # 5.2 13. send /response/
+        response.append(self._request.ws_challenge_md5)
+
+        raw_response = ''.join(response)
+        self._request.connection.write(raw_response)
+        self._logger.debug('Sent server\'s opening handshake: %r',
+                           raw_response)
+
+
+# vi:sts=4 sw=4 et
diff --git a/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/headerparserhandler.py b/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/headerparserhandler.py
new file mode 100644
index 0000000..2cc62de
--- /dev/null
+++ b/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/headerparserhandler.py
@@ -0,0 +1,244 @@
+# Copyright 2011, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""PythonHeaderParserHandler for mod_pywebsocket.
+
+Apache HTTP Server and mod_python must be configured such that this
+function is called to handle WebSocket request.
+"""
+
+
+import logging
+
+from mod_python import apache
+
+from mod_pywebsocket import common
+from mod_pywebsocket import dispatch
+from mod_pywebsocket import handshake
+from mod_pywebsocket import util
+
+
+# PythonOption to specify the handler root directory.
+_PYOPT_HANDLER_ROOT = 'mod_pywebsocket.handler_root'
+
+# PythonOption to specify the handler scan directory.
+# This must be a directory under the root directory.
+# The default is the root directory.
+_PYOPT_HANDLER_SCAN = 'mod_pywebsocket.handler_scan'
+
+# PythonOption to allow handlers whose canonical path is
+# not under the root directory. It's disallowed by default.
+# Set this option with value of 'yes' to allow.
+_PYOPT_ALLOW_HANDLERS_OUTSIDE_ROOT = (
+    'mod_pywebsocket.allow_handlers_outside_root_dir')
+# Map from values to their meanings. 'Yes' and 'No' are allowed just for
+# compatibility.
+_PYOPT_ALLOW_HANDLERS_OUTSIDE_ROOT_DEFINITION = {
+    'off': False, 'no': False, 'on': True, 'yes': True}
+
+# (Obsolete option. Ignored.)
+# PythonOption to specify to allow handshake defined in Hixie 75 version
+# protocol. The default is None (Off)
+_PYOPT_ALLOW_DRAFT75 = 'mod_pywebsocket.allow_draft75'
+# Map from values to their meanings.
+_PYOPT_ALLOW_DRAFT75_DEFINITION = {'off': False, 'on': True}
+
+
+class ApacheLogHandler(logging.Handler):
+    """Wrapper logging.Handler to emit log message to apache's error.log."""
+
+    _LEVELS = {
+        logging.DEBUG: apache.APLOG_DEBUG,
+        logging.INFO: apache.APLOG_INFO,
+        logging.WARNING: apache.APLOG_WARNING,
+        logging.ERROR: apache.APLOG_ERR,
+        logging.CRITICAL: apache.APLOG_CRIT,
+        }
+
+    def __init__(self, request=None):
+        logging.Handler.__init__(self)
+        self._log_error = apache.log_error
+        if request is not None:
+            self._log_error = request.log_error
+
+        # Time and level will be printed by Apache.
+        self._formatter = logging.Formatter('%(name)s: %(message)s')
+
+    def emit(self, record):
+        apache_level = apache.APLOG_DEBUG
+        if record.levelno in ApacheLogHandler._LEVELS:
+            apache_level = ApacheLogHandler._LEVELS[record.levelno]
+
+        msg = self._formatter.format(record)
+
+        # "server" parameter must be passed to have "level" parameter work.
+        # If only "level" parameter is passed, nothing shows up on Apache's
+        # log. However, at this point, we cannot get the server object of the
+        # virtual host which will process WebSocket requests. The only server
+        # object we can get here is apache.main_server. But Wherever (server
+        # configuration context or virtual host context) we put
+        # PythonHeaderParserHandler directive, apache.main_server just points
+        # the main server instance (not any of virtual server instance). Then,
+        # Apache follows LogLevel directive in the server configuration context
+        # to filter logs. So, we need to specify LogLevel in the server
+        # configuration context. Even if we specify "LogLevel debug" in the
+        # virtual host context which actually handles WebSocket connections,
+        # DEBUG level logs never show up unless "LogLevel debug" is specified
+        # in the server configuration context.
+        #
+        # TODO(tyoshino): Provide logging methods on request object. When
+        # request is mp_request object (when used together with Apache), the
+        # methods call request.log_error indirectly. When request is
+        # _StandaloneRequest, the methods call Python's logging facility which
+        # we create in standalone.py.
+        self._log_error(msg, apache_level, apache.main_server)
+
+
+def _configure_logging():
+    logger = logging.getLogger()
+    # Logs are filtered by Apache based on LogLevel directive in Apache
+    # configuration file. We must just pass logs for all levels to
+    # ApacheLogHandler.
+    logger.setLevel(logging.DEBUG)
+    logger.addHandler(ApacheLogHandler())
+
+
+_configure_logging()
+
+_LOGGER = logging.getLogger(__name__)
+
+
+def _parse_option(name, value, definition):
+    if value is None:
+        return False
+
+    meaning = definition.get(value.lower())
+    if meaning is None:
+        raise Exception('Invalid value for PythonOption %s: %r' %
+                        (name, value))
+    return meaning
+
+
+def _create_dispatcher():
+    _LOGGER.info('Initializing Dispatcher')
+
+    options = apache.main_server.get_options()
+
+    handler_root = options.get(_PYOPT_HANDLER_ROOT, None)
+    if not handler_root:
+        raise Exception('PythonOption %s is not defined' % _PYOPT_HANDLER_ROOT,
+                        apache.APLOG_ERR)
+
+    handler_scan = options.get(_PYOPT_HANDLER_SCAN, handler_root)
+
+    allow_handlers_outside_root = _parse_option(
+        _PYOPT_ALLOW_HANDLERS_OUTSIDE_ROOT,
+        options.get(_PYOPT_ALLOW_HANDLERS_OUTSIDE_ROOT),
+        _PYOPT_ALLOW_HANDLERS_OUTSIDE_ROOT_DEFINITION)
+
+    dispatcher = dispatch.Dispatcher(
+        handler_root, handler_scan, allow_handlers_outside_root)
+
+    for warning in dispatcher.source_warnings():
+        apache.log_error('mod_pywebsocket: %s' % warning, apache.APLOG_WARNING)
+
+    return dispatcher
+
+
+# Initialize
+_dispatcher = _create_dispatcher()
+
+
+def headerparserhandler(request):
+    """Handle request.
+
+    Args:
+        request: mod_python request.
+
+    This function is named headerparserhandler because it is the default
+    name for a PythonHeaderParserHandler.
+    """
+
+    handshake_is_done = False
+    try:
+        # Fallback to default http handler for request paths for which
+        # we don't have request handlers.
+        if not _dispatcher.get_handler_suite(request.uri):
+            request.log_error('No handler for resource: %r' % request.uri,
+                              apache.APLOG_INFO)
+            request.log_error('Fallback to Apache', apache.APLOG_INFO)
+            return apache.DECLINED
+    except dispatch.DispatchException, e:
+        request.log_error('mod_pywebsocket: %s' % e, apache.APLOG_INFO)
+        if not handshake_is_done:
+            return e.status
+
+    try:
+        allow_draft75 = _parse_option(
+            _PYOPT_ALLOW_DRAFT75,
+            apache.main_server.get_options().get(_PYOPT_ALLOW_DRAFT75),
+            _PYOPT_ALLOW_DRAFT75_DEFINITION)
+
+        try:
+            handshake.do_handshake(
+                request, _dispatcher, allowDraft75=allow_draft75)
+        except handshake.VersionException, e:
+            request.log_error('mod_pywebsocket: %s' % e, apache.APLOG_INFO)
+            request.err_headers_out.add(common.SEC_WEBSOCKET_VERSION_HEADER,
+                                        e.supported_versions)
+            return apache.HTTP_BAD_REQUEST
+        except handshake.HandshakeException, e:
+            # Handshake for ws/wss failed.
+            # Send http response with error status.
+            request.log_error('mod_pywebsocket: %s' % e, apache.APLOG_INFO)
+            return e.status
+
+        handshake_is_done = True
+        request._dispatcher = _dispatcher
+        _dispatcher.transfer_data(request)
+    except handshake.AbortedByUserException, e:
+        request.log_error('mod_pywebsocket: %s' % e, apache.APLOG_INFO)
+    except Exception, e:
+        # DispatchException can also be thrown if something is wrong in
+        # pywebsocket code. It's caught here, then.
+
+        request.log_error('mod_pywebsocket: %s\n%s' %
+                          (e, util.get_stack_trace()),
+                          apache.APLOG_ERR)
+        # Unknown exceptions before handshake mean Apache must handle its
+        # request with another handler.
+        if not handshake_is_done:
+            return apache.DECLINED
+    # Set assbackwards to suppress response header generation by Apache.
+    request.assbackwards = 1
+    return apache.DONE  # Return DONE such that no other handlers are invoked.
+
+
+# vi:sts=4 sw=4 et
diff --git a/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/http_header_util.py b/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/http_header_util.py
new file mode 100644
index 0000000..b774653
--- /dev/null
+++ b/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/http_header_util.py
@@ -0,0 +1,263 @@
+# Copyright 2011, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""Utilities for parsing and formatting headers that follow the grammar defined
+in HTTP RFC http://www.ietf.org/rfc/rfc2616.txt.
+"""
+
+
+import urlparse
+
+
+_SEPARATORS = '()<>@,;:\\"/[]?={} \t'
+
+
+def _is_char(c):
+    """Returns true iff c is in CHAR as specified in HTTP RFC."""
+
+    return ord(c) <= 127
+
+
+def _is_ctl(c):
+    """Returns true iff c is in CTL as specified in HTTP RFC."""
+
+    return ord(c) <= 31 or ord(c) == 127
+
+
+class ParsingState(object):
+
+    def __init__(self, data):
+        self.data = data
+        self.head = 0
+
+
+def peek(state, pos=0):
+    """Peeks the character at pos from the head of data."""
+
+    if state.head + pos >= len(state.data):
+        return None
+
+    return state.data[state.head + pos]
+
+
+def consume(state, amount=1):
+    """Consumes specified amount of bytes from the head and returns the
+    consumed bytes. If there's not enough bytes to consume, returns None.
+    """
+
+    if state.head + amount > len(state.data):
+        return None
+
+    result = state.data[state.head:state.head + amount]
+    state.head = state.head + amount
+    return result
+
+
+def consume_string(state, expected):
+    """Given a parsing state and a expected string, consumes the string from
+    the head. Returns True if consumed successfully. Otherwise, returns
+    False.
+    """
+
+    pos = 0
+
+    for c in expected:
+        if c != peek(state, pos):
+            return False
+        pos += 1
+
+    consume(state, pos)
+    return True
+
+
+def consume_lws(state):
+    """Consumes a LWS from the head. Returns True if any LWS is consumed.
+    Otherwise, returns False.
+
+    LWS = [CRLF] 1*( SP | HT )
+    """
+
+    original_head = state.head
+
+    consume_string(state, '\r\n')
+
+    pos = 0
+
+    while True:
+        c = peek(state, pos)
+        if c == ' ' or c == '\t':
+            pos += 1
+        else:
+            if pos == 0:
+                state.head = original_head
+                return False
+            else:
+                consume(state, pos)
+                return True
+
+
+def consume_lwses(state):
+    """Consumes *LWS from the head."""
+
+    while consume_lws(state):
+        pass
+
+
+def consume_token(state):
+    """Consumes a token from the head. Returns the token or None if no token
+    was found.
+    """
+
+    pos = 0
+
+    while True:
+        c = peek(state, pos)
+        if c is None or c in _SEPARATORS or _is_ctl(c) or not _is_char(c):
+            if pos == 0:
+                return None
+
+            return consume(state, pos)
+        else:
+            pos += 1
+
+
+def consume_token_or_quoted_string(state):
+    """Consumes a token or a quoted-string, and returns the token or unquoted
+    string. If no token or quoted-string was found, returns None.
+    """
+
+    original_head = state.head
+
+    if not consume_string(state, '"'):
+        return consume_token(state)
+
+    result = []
+
+    expect_quoted_pair = False
+
+    while True:
+        if not expect_quoted_pair and consume_lws(state):
+            result.append(' ')
+            continue
+
+        c = consume(state)
+        if c is None:
+            # quoted-string is not enclosed with double quotation
+            state.head = original_head
+            return None
+        elif expect_quoted_pair:
+            expect_quoted_pair = False
+            if _is_char(c):
+                result.append(c)
+            else:
+                # Non CHAR character found in quoted-pair
+                state.head = original_head
+                return None
+        elif c == '\\':
+            expect_quoted_pair = True
+        elif c == '"':
+            return ''.join(result)
+        elif _is_ctl(c):
+            # Invalid character %r found in qdtext
+            state.head = original_head
+            return None
+        else:
+            result.append(c)
+
+
+def quote_if_necessary(s):
+    """Quotes arbitrary string into quoted-string."""
+
+    quote = False
+    if s == '':
+        return '""'
+
+    result = []
+    for c in s:
+        if c == '"' or c in _SEPARATORS or _is_ctl(c) or not _is_char(c):
+            quote = True
+
+        if c == '"' or _is_ctl(c):
+            result.append('\\' + c)
+        else:
+            result.append(c)
+
+    if quote:
+        return '"' + ''.join(result) + '"'
+    else:
+        return ''.join(result)
+
+
+def parse_uri(uri):
+    """Parse absolute URI then return host, port and resource."""
+
+    parsed = urlparse.urlsplit(uri)
+    if parsed.scheme != 'wss' and parsed.scheme != 'ws':
+        # |uri| must be a relative URI.
+        # TODO(toyoshim): Should validate |uri|.
+        return None, None, uri
+
+    if parsed.hostname is None:
+        return None, None, None
+
+    port = None
+    try:
+        port = parsed.port
+    except ValueError, e:
+        # port property cause ValueError on invalid null port description like
+        # 'ws://host:/path'.
+        return None, None, None
+
+    if port is None:
+        if parsed.scheme == 'ws':
+            port = 80
+        else:
+            port = 443
+
+    path = parsed.path
+    if not path:
+        path += '/'
+    if parsed.query:
+        path += '?' + parsed.query
+    if parsed.fragment:
+        path += '#' + parsed.fragment
+
+    return parsed.hostname, port, path
+
+
+try:
+    urlparse.uses_netloc.index('ws')
+except ValueError, e:
+    # urlparse in Python2.5.1 doesn't have 'ws' and 'wss' entries.
+    urlparse.uses_netloc.append('ws')
+    urlparse.uses_netloc.append('wss')
+
+
+# vi:sts=4 sw=4 et
diff --git a/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/memorizingfile.py b/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/memorizingfile.py
new file mode 100644
index 0000000..4d4cd95
--- /dev/null
+++ b/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/memorizingfile.py
@@ -0,0 +1,99 @@
+#!/usr/bin/env python
+#
+# Copyright 2011, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""Memorizing file.
+
+A memorizing file wraps a file and memorizes lines read by readline.
+"""
+
+
+import sys
+
+
+class MemorizingFile(object):
+    """MemorizingFile wraps a file and memorizes lines read by readline.
+
+    Note that data read by other methods are not memorized. This behavior
+    is good enough for memorizing lines SimpleHTTPServer reads before
+    the control reaches WebSocketRequestHandler.
+    """
+
+    def __init__(self, file_, max_memorized_lines=sys.maxint):
+        """Construct an instance.
+
+        Args:
+            file_: the file object to wrap.
+            max_memorized_lines: the maximum number of lines to memorize.
+                Only the first max_memorized_lines are memorized.
+                Default: sys.maxint.
+        """
+
+        self._file = file_
+        self._memorized_lines = []
+        self._max_memorized_lines = max_memorized_lines
+        self._buffered = False
+        self._buffered_line = None
+
+    def __getattribute__(self, name):
+        if name in ('_file', '_memorized_lines', '_max_memorized_lines',
+                    '_buffered', '_buffered_line', 'readline',
+                    'get_memorized_lines'):
+            return object.__getattribute__(self, name)
+        return self._file.__getattribute__(name)
+
+    def readline(self, size=-1):
+        """Override file.readline and memorize the line read.
+
+        Note that even if size is specified and smaller than actual size,
+        the whole line will be read out from underlying file object by
+        subsequent readline calls.
+        """
+
+        if self._buffered:
+            line = self._buffered_line
+            self._buffered = False
+        else:
+            line = self._file.readline()
+            if line and len(self._memorized_lines) < self._max_memorized_lines:
+                self._memorized_lines.append(line)
+        if size >= 0 and size < len(line):
+            self._buffered = True
+            self._buffered_line = line[size:]
+            return line[:size]
+        return line
+
+    def get_memorized_lines(self):
+        """Get lines memorized so far."""
+        return self._memorized_lines
+
+
+# vi:sts=4 sw=4 et
diff --git a/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/msgutil.py b/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/msgutil.py
new file mode 100644
index 0000000..4c1a011
--- /dev/null
+++ b/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/msgutil.py
@@ -0,0 +1,219 @@
+# Copyright 2011, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""Message related utilities.
+
+Note: request.connection.write/read are used in this module, even though
+mod_python document says that they should be used only in connection
+handlers. Unfortunately, we have no other options. For example,
+request.write/read are not suitable because they don't allow direct raw
+bytes writing/reading.
+"""
+
+
+import Queue
+import threading
+
+
+# Export Exception symbols from msgutil for backward compatibility
+from mod_pywebsocket._stream_base import ConnectionTerminatedException
+from mod_pywebsocket._stream_base import InvalidFrameException
+from mod_pywebsocket._stream_base import BadOperationException
+from mod_pywebsocket._stream_base import UnsupportedFrameException
+
+
+# An API for handler to send/receive WebSocket messages.
+def close_connection(request):
+    """Close connection.
+
+    Args:
+        request: mod_python request.
+    """
+    request.ws_stream.close_connection()
+
+
+def send_message(request, payload_data, end=True, binary=False):
+    """Send a message (or part of a message).
+
+    Args:
+        request: mod_python request.
+        payload_data: unicode text or str binary to send.
+        end: True to terminate a message.
+             False to send payload_data as part of a message that is to be
+             terminated by next or later send_message call with end=True.
+        binary: send payload_data as binary frame(s).
+    Raises:
+        BadOperationException: when server already terminated.
+    """
+    request.ws_stream.send_message(payload_data, end, binary)
+
+
+def receive_message(request):
+    """Receive a WebSocket frame and return its payload as a text in
+    unicode or a binary in str.
+
+    Args:
+        request: mod_python request.
+    Raises:
+        InvalidFrameException:     when client send invalid frame.
+        UnsupportedFrameException: when client send unsupported frame e.g. some
+                                   of reserved bit is set but no extension can
+                                   recognize it.
+        InvalidUTF8Exception:      when client send a text frame containing any
+                                   invalid UTF-8 string.
+        ConnectionTerminatedException: when the connection is closed
+                                   unexpectedly.
+        BadOperationException:     when client already terminated.
+    """
+    return request.ws_stream.receive_message()
+
+
+def send_ping(request, body=''):
+    request.ws_stream.send_ping(body)
+
+
+class MessageReceiver(threading.Thread):
+    """This class receives messages from the client.
+
+    This class provides three ways to receive messages: blocking,
+    non-blocking, and via callback. Callback has the highest precedence.
+
+    Note: This class should not be used with the standalone server for wss
+    because pyOpenSSL used by the server raises a fatal error if the socket
+    is accessed from multiple threads.
+    """
+
+    def __init__(self, request, onmessage=None):
+        """Construct an instance.
+
+        Args:
+            request: mod_python request.
+            onmessage: a function to be called when a message is received.
+                       May be None. If not None, the function is called on
+                       another thread. In that case, MessageReceiver.receive
+                       and MessageReceiver.receive_nowait are useless
+                       because they will never return any messages.
+        """
+
+        threading.Thread.__init__(self)
+        self._request = request
+        self._queue = Queue.Queue()
+        self._onmessage = onmessage
+        self._stop_requested = False
+        self.setDaemon(True)
+        self.start()
+
+    def run(self):
+        try:
+            while not self._stop_requested:
+                message = receive_message(self._request)
+                if self._onmessage:
+                    self._onmessage(message)
+                else:
+                    self._queue.put(message)
+        finally:
+            close_connection(self._request)
+
+    def receive(self):
+        """ Receive a message from the channel, blocking.
+
+        Returns:
+            message as a unicode string.
+        """
+        return self._queue.get()
+
+    def receive_nowait(self):
+        """ Receive a message from the channel, non-blocking.
+
+        Returns:
+            message as a unicode string if available. None otherwise.
+        """
+        try:
+            message = self._queue.get_nowait()
+        except Queue.Empty:
+            message = None
+        return message
+
+    def stop(self):
+        """Request to stop this instance.
+
+        The instance will be stopped after receiving the next message.
+        This method may not be very useful, but there is no clean way
+        in Python to forcefully stop a running thread.
+        """
+        self._stop_requested = True
+
+
+class MessageSender(threading.Thread):
+    """This class sends messages to the client.
+
+    This class provides both synchronous and asynchronous ways to send
+    messages.
+
+    Note: This class should not be used with the standalone server for wss
+    because pyOpenSSL used by the server raises a fatal error if the socket
+    is accessed from multiple threads.
+    """
+
+    def __init__(self, request):
+        """Construct an instance.
+
+        Args:
+            request: mod_python request.
+        """
+        threading.Thread.__init__(self)
+        self._request = request
+        self._queue = Queue.Queue()
+        self.setDaemon(True)
+        self.start()
+
+    def run(self):
+        while True:
+            message, condition = self._queue.get()
+            condition.acquire()
+            send_message(self._request, message)
+            condition.notify()
+            condition.release()
+
+    def send(self, message):
+        """Send a message, blocking."""
+
+        condition = threading.Condition()
+        condition.acquire()
+        self._queue.put((message, condition))
+        condition.wait()
+
+    def send_nowait(self, message):
+        """Send a message, non-blocking."""
+
+        self._queue.put((message, threading.Condition()))
+
+
+# vi:sts=4 sw=4 et
diff --git a/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/mux.py b/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/mux.py
new file mode 100644
index 0000000..f0bdd24
--- /dev/null
+++ b/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/mux.py
@@ -0,0 +1,1636 @@
+# Copyright 2012, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""This file provides classes and helper functions for multiplexing extension.
+
+Specification:
+http://tools.ietf.org/html/draft-ietf-hybi-websocket-multiplexing-06
+"""
+
+
+import collections
+import copy
+import email
+import email.parser
+import logging
+import math
+import struct
+import threading
+import traceback
+
+from mod_pywebsocket import common
+from mod_pywebsocket import handshake
+from mod_pywebsocket import util
+from mod_pywebsocket._stream_base import BadOperationException
+from mod_pywebsocket._stream_base import ConnectionTerminatedException
+from mod_pywebsocket._stream_hybi import Frame
+from mod_pywebsocket._stream_hybi import Stream
+from mod_pywebsocket._stream_hybi import StreamOptions
+from mod_pywebsocket._stream_hybi import create_binary_frame
+from mod_pywebsocket._stream_hybi import create_closing_handshake_body
+from mod_pywebsocket._stream_hybi import create_header
+from mod_pywebsocket._stream_hybi import create_length_header
+from mod_pywebsocket._stream_hybi import parse_frame
+from mod_pywebsocket.handshake import hybi
+
+
+_CONTROL_CHANNEL_ID = 0
+_DEFAULT_CHANNEL_ID = 1
+
+_MUX_OPCODE_ADD_CHANNEL_REQUEST = 0
+_MUX_OPCODE_ADD_CHANNEL_RESPONSE = 1
+_MUX_OPCODE_FLOW_CONTROL = 2
+_MUX_OPCODE_DROP_CHANNEL = 3
+_MUX_OPCODE_NEW_CHANNEL_SLOT = 4
+
+_MAX_CHANNEL_ID = 2 ** 29 - 1
+
+_INITIAL_NUMBER_OF_CHANNEL_SLOTS = 64
+_INITIAL_QUOTA_FOR_CLIENT = 8 * 1024
+
+_HANDSHAKE_ENCODING_IDENTITY = 0
+_HANDSHAKE_ENCODING_DELTA = 1
+
+# We need only these status code for now.
+_HTTP_BAD_RESPONSE_MESSAGES = {
+    common.HTTP_STATUS_BAD_REQUEST: 'Bad Request',
+}
+
+# DropChannel reason code
+# TODO(bashi): Define all reason code defined in -05 draft.
+_DROP_CODE_NORMAL_CLOSURE = 1000
+
+_DROP_CODE_INVALID_ENCAPSULATING_MESSAGE = 2001
+_DROP_CODE_CHANNEL_ID_TRUNCATED = 2002
+_DROP_CODE_ENCAPSULATED_FRAME_IS_TRUNCATED = 2003
+_DROP_CODE_UNKNOWN_MUX_OPCODE = 2004
+_DROP_CODE_INVALID_MUX_CONTROL_BLOCK = 2005
+_DROP_CODE_CHANNEL_ALREADY_EXISTS = 2006
+_DROP_CODE_NEW_CHANNEL_SLOT_VIOLATION = 2007
+
+_DROP_CODE_UNKNOWN_REQUEST_ENCODING = 3002
+_DROP_CODE_SEND_QUOTA_VIOLATION = 3005
+_DROP_CODE_ACKNOWLEDGED = 3008
+
+
+class MuxUnexpectedException(Exception):
+    """Exception in handling multiplexing extension."""
+    pass
+
+
+# Temporary
+class MuxNotImplementedException(Exception):
+    """Raised when a flow enters unimplemented code path."""
+    pass
+
+
+class LogicalConnectionClosedException(Exception):
+    """Raised when logical connection is gracefully closed."""
+    pass
+
+
+class PhysicalConnectionError(Exception):
+    """Raised when there is a physical connection error."""
+    def __init__(self, drop_code, message=''):
+        super(PhysicalConnectionError, self).__init__(
+            'code=%d, message=%r' % (drop_code, message))
+        self.drop_code = drop_code
+        self.message = message
+
+
+class LogicalChannelError(Exception):
+    """Raised when there is a logical channel error."""
+    def __init__(self, channel_id, drop_code, message=''):
+        super(LogicalChannelError, self).__init__(
+            'channel_id=%d, code=%d, message=%r' % (
+                channel_id, drop_code, message))
+        self.channel_id = channel_id
+        self.drop_code = drop_code
+        self.message = message
+
+
+def _encode_channel_id(channel_id):
+    if channel_id < 0:
+        raise ValueError('Channel id %d must not be negative' % channel_id)
+
+    if channel_id < 2 ** 7:
+        return chr(channel_id)
+    if channel_id < 2 ** 14:
+        return struct.pack('!H', 0x8000 + channel_id)
+    if channel_id < 2 ** 21:
+        first = chr(0xc0 + (channel_id >> 16))
+        return first + struct.pack('!H', channel_id & 0xffff)
+    if channel_id < 2 ** 29:
+        return struct.pack('!L', 0xe0000000 + channel_id)
+
+    raise ValueError('Channel id %d is too large' % channel_id)
+
+
+def _encode_number(number):
+    return create_length_header(number, False)
+
+
+def _create_add_channel_response(channel_id, encoded_handshake,
+                                 encoding=0, rejected=False,
+                                 outer_frame_mask=False):
+    if encoding != 0 and encoding != 1:
+        raise ValueError('Invalid encoding %d' % encoding)
+
+    first_byte = ((_MUX_OPCODE_ADD_CHANNEL_RESPONSE << 5) |
+                  (rejected << 4) | encoding)
+    block = (chr(first_byte) +
+             _encode_channel_id(channel_id) +
+             _encode_number(len(encoded_handshake)) +
+             encoded_handshake)
+    payload = _encode_channel_id(_CONTROL_CHANNEL_ID) + block
+    return create_binary_frame(payload, mask=outer_frame_mask)
+
+
+def _create_drop_channel(channel_id, code=None, message='',
+                         outer_frame_mask=False):
+    if len(message) > 0 and code is None:
+        raise ValueError('Code must be specified if message is specified')
+
+    first_byte = _MUX_OPCODE_DROP_CHANNEL << 5
+    block = chr(first_byte) + _encode_channel_id(channel_id)
+    if code is None:
+        block += _encode_number(0) # Reason size
+    else:
+        reason = struct.pack('!H', code) + message
+        reason_size = _encode_number(len(reason))
+        block += reason_size + reason
+
+    payload = _encode_channel_id(_CONTROL_CHANNEL_ID) + block
+    return create_binary_frame(payload, mask=outer_frame_mask)
+
+
+def _create_flow_control(channel_id, replenished_quota,
+                         outer_frame_mask=False):
+    first_byte = _MUX_OPCODE_FLOW_CONTROL << 5
+    block = (chr(first_byte) +
+             _encode_channel_id(channel_id) +
+             _encode_number(replenished_quota))
+    payload = _encode_channel_id(_CONTROL_CHANNEL_ID) + block
+    return create_binary_frame(payload, mask=outer_frame_mask)
+
+
+def _create_new_channel_slot(slots, send_quota, outer_frame_mask=False):
+    if slots < 0 or send_quota < 0:
+        raise ValueError('slots and send_quota must be non-negative.')
+    first_byte = _MUX_OPCODE_NEW_CHANNEL_SLOT << 5
+    block = (chr(first_byte) +
+             _encode_number(slots) +
+             _encode_number(send_quota))
+    payload = _encode_channel_id(_CONTROL_CHANNEL_ID) + block
+    return create_binary_frame(payload, mask=outer_frame_mask)
+
+
+def _create_fallback_new_channel_slot(outer_frame_mask=False):
+    first_byte = (_MUX_OPCODE_NEW_CHANNEL_SLOT << 5) | 1 # Set the F flag
+    block = (chr(first_byte) + _encode_number(0) + _encode_number(0))
+    payload = _encode_channel_id(_CONTROL_CHANNEL_ID) + block
+    return create_binary_frame(payload, mask=outer_frame_mask)
+
+
+def _parse_request_text(request_text):
+    request_line, header_lines = request_text.split('\r\n', 1)
+
+    words = request_line.split(' ')
+    if len(words) != 3:
+        raise ValueError('Bad Request-Line syntax %r' % request_line)
+    [command, path, version] = words
+    if version != 'HTTP/1.1':
+        raise ValueError('Bad request version %r' % version)
+
+    # email.parser.Parser() parses RFC 2822 (RFC 822) style headers.
+    # RFC 6455 refers RFC 2616 for handshake parsing, and RFC 2616 refers
+    # RFC 822.
+    headers = email.parser.Parser().parsestr(header_lines)
+    return command, path, version, headers
+
+
+class _ControlBlock(object):
+    """A structure that holds parsing result of multiplexing control block.
+    Control block specific attributes will be added by _MuxFramePayloadParser.
+    (e.g. encoded_handshake will be added for AddChannelRequest and
+    AddChannelResponse)
+    """
+
+    def __init__(self, opcode):
+        self.opcode = opcode
+
+
+class _MuxFramePayloadParser(object):
+    """A class that parses multiplexed frame payload."""
+
+    def __init__(self, payload):
+        self._data = payload
+        self._read_position = 0
+        self._logger = util.get_class_logger(self)
+
+    def read_channel_id(self):
+        """Reads channel id.
+
+        Raises:
+            ValueError: when the payload doesn't contain
+                valid channel id.
+        """
+
+        remaining_length = len(self._data) - self._read_position
+        pos = self._read_position
+        if remaining_length == 0:
+            raise ValueError('Invalid channel id format')
+
+        channel_id = ord(self._data[pos])
+        channel_id_length = 1
+        if channel_id & 0xe0 == 0xe0:
+            if remaining_length < 4:
+                raise ValueError('Invalid channel id format')
+            channel_id = struct.unpack('!L',
+                                       self._data[pos:pos+4])[0] & 0x1fffffff
+            channel_id_length = 4
+        elif channel_id & 0xc0 == 0xc0:
+            if remaining_length < 3:
+                raise ValueError('Invalid channel id format')
+            channel_id = (((channel_id & 0x1f) << 16) +
+                          struct.unpack('!H', self._data[pos+1:pos+3])[0])
+            channel_id_length = 3
+        elif channel_id & 0x80 == 0x80:
+            if remaining_length < 2:
+                raise ValueError('Invalid channel id format')
+            channel_id = struct.unpack('!H',
+                                       self._data[pos:pos+2])[0] & 0x3fff
+            channel_id_length = 2
+        self._read_position += channel_id_length
+
+        return channel_id
+
+    def read_inner_frame(self):
+        """Reads an inner frame.
+
+        Raises:
+            PhysicalConnectionError: when the inner frame is invalid.
+        """
+
+        if len(self._data) == self._read_position:
+            raise PhysicalConnectionError(
+                _DROP_CODE_ENCAPSULATED_FRAME_IS_TRUNCATED)
+
+        bits = ord(self._data[self._read_position])
+        self._read_position += 1
+        fin = (bits & 0x80) == 0x80
+        rsv1 = (bits & 0x40) == 0x40
+        rsv2 = (bits & 0x20) == 0x20
+        rsv3 = (bits & 0x10) == 0x10
+        opcode = bits & 0xf
+        payload = self.remaining_data()
+        # Consume rest of the message which is payload data of the original
+        # frame.
+        self._read_position = len(self._data)
+        return fin, rsv1, rsv2, rsv3, opcode, payload
+
+    def _read_number(self):
+        if self._read_position + 1 > len(self._data):
+            raise PhysicalConnectionError(
+                _DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
+                'Cannot read the first byte of number field')
+
+        number = ord(self._data[self._read_position])
+        if number & 0x80 == 0x80:
+            raise PhysicalConnectionError(
+                _DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
+                'The most significant bit of the first byte of number should '
+                'be unset')
+        self._read_position += 1
+        pos = self._read_position
+        if number == 127:
+            if pos + 8 > len(self._data):
+                raise PhysicalConnectionError(
+                    _DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
+                    'Invalid number field')
+            self._read_position += 8
+            number = struct.unpack('!Q', self._data[pos:pos+8])[0]
+            if number > 0x7FFFFFFFFFFFFFFF:
+                raise PhysicalConnectionError(
+                    _DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
+                    'Encoded number >= 2^63')
+            if number <= 0xFFFF:
+                raise PhysicalConnectionError(
+                    _DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
+                    '%d should not be encoded by 9 bytes encoding' % number)
+            return number
+        if number == 126:
+            if pos + 2 > len(self._data):
+                raise PhysicalConnectionError(
+                    _DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
+                    'Invalid number field')
+            self._read_position += 2
+            number = struct.unpack('!H', self._data[pos:pos+2])[0]
+            if number <= 125:
+                raise PhysicalConnectionError(
+                    _DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
+                    '%d should not be encoded by 3 bytes encoding' % number)
+        return number
+
+    def _read_size_and_contents(self):
+        """Reads data that consists of followings:
+            - the size of the contents encoded the same way as payload length
+              of the WebSocket Protocol with 1 bit padding at the head.
+            - the contents.
+        """
+
+        size = self._read_number()
+        pos = self._read_position
+        if pos + size > len(self._data):
+            raise PhysicalConnectionError(
+                _DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
+                'Cannot read %d bytes data' % size)
+
+        self._read_position += size
+        return self._data[pos:pos+size]
+
+    def _read_add_channel_request(self, first_byte, control_block):
+        reserved = (first_byte >> 2) & 0x7
+        if reserved != 0:
+            raise PhysicalConnectionError(
+                _DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
+                'Reserved bits must be unset')
+
+        # Invalid encoding will be handled by MuxHandler.
+        encoding = first_byte & 0x3
+        try:
+            control_block.channel_id = self.read_channel_id()
+        except ValueError, e:
+            raise PhysicalConnectionError(_DROP_CODE_INVALID_MUX_CONTROL_BLOCK)
+        control_block.encoding = encoding
+        encoded_handshake = self._read_size_and_contents()
+        control_block.encoded_handshake = encoded_handshake
+        return control_block
+
+    def _read_add_channel_response(self, first_byte, control_block):
+        reserved = (first_byte >> 2) & 0x3
+        if reserved != 0:
+            raise PhysicalConnectionError(
+                _DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
+                'Reserved bits must be unset')
+
+        control_block.accepted = (first_byte >> 4) & 1
+        control_block.encoding = first_byte & 0x3
+        try:
+            control_block.channel_id = self.read_channel_id()
+        except ValueError, e:
+            raise PhysicalConnectionError(_DROP_CODE_INVALID_MUX_CONTROL_BLOCK)
+        control_block.encoded_handshake = self._read_size_and_contents()
+        return control_block
+
+    def _read_flow_control(self, first_byte, control_block):
+        reserved = first_byte & 0x1f
+        if reserved != 0:
+            raise PhysicalConnectionError(
+                _DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
+                'Reserved bits must be unset')
+
+        try:
+            control_block.channel_id = self.read_channel_id()
+        except ValueError, e:
+            raise PhysicalConnectionError(_DROP_CODE_INVALID_MUX_CONTROL_BLOCK)
+        control_block.send_quota = self._read_number()
+        return control_block
+
+    def _read_drop_channel(self, first_byte, control_block):
+        reserved = first_byte & 0x1f
+        if reserved != 0:
+            raise PhysicalConnectionError(
+                _DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
+                'Reserved bits must be unset')
+
+        try:
+            control_block.channel_id = self.read_channel_id()
+        except ValueError, e:
+            raise PhysicalConnectionError(_DROP_CODE_INVALID_MUX_CONTROL_BLOCK)
+        reason = self._read_size_and_contents()
+        if len(reason) == 0:
+            control_block.drop_code = None
+            control_block.drop_message = ''
+        elif len(reason) >= 2:
+            control_block.drop_code = struct.unpack('!H', reason[:2])[0]
+            control_block.drop_message = reason[2:]
+        else:
+            raise PhysicalConnectionError(
+                _DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
+                'Received DropChannel that conains only 1-byte reason')
+        return control_block
+
+    def _read_new_channel_slot(self, first_byte, control_block):
+        reserved = first_byte & 0x1e
+        if reserved != 0:
+            raise PhysicalConnectionError(
+                _DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
+                'Reserved bits must be unset')
+        control_block.fallback = first_byte & 1
+        control_block.slots = self._read_number()
+        control_block.send_quota = self._read_number()
+        return control_block
+
+    def read_control_blocks(self):
+        """Reads control block(s).
+
+        Raises:
+           PhysicalConnectionError: when the payload contains invalid control
+               block(s).
+           StopIteration: when no control blocks left.
+        """
+
+        while self._read_position < len(self._data):
+            first_byte = ord(self._data[self._read_position])
+            self._read_position += 1
+            opcode = (first_byte >> 5) & 0x7
+            control_block = _ControlBlock(opcode=opcode)
+            if opcode == _MUX_OPCODE_ADD_CHANNEL_REQUEST:
+                yield self._read_add_channel_request(first_byte, control_block)
+            elif opcode == _MUX_OPCODE_ADD_CHANNEL_RESPONSE:
+                yield self._read_add_channel_response(
+                    first_byte, control_block)
+            elif opcode == _MUX_OPCODE_FLOW_CONTROL:
+                yield self._read_flow_control(first_byte, control_block)
+            elif opcode == _MUX_OPCODE_DROP_CHANNEL:
+                yield self._read_drop_channel(first_byte, control_block)
+            elif opcode == _MUX_OPCODE_NEW_CHANNEL_SLOT:
+                yield self._read_new_channel_slot(first_byte, control_block)
+            else:
+                raise PhysicalConnectionError(
+                    _DROP_CODE_UNKNOWN_MUX_OPCODE,
+                    'Invalid opcode %d' % opcode)
+
+        assert self._read_position == len(self._data)
+        raise StopIteration
+
+    def remaining_data(self):
+        """Returns remaining data."""
+
+        return self._data[self._read_position:]
+
+
+class _LogicalRequest(object):
+    """Mimics mod_python request."""
+
+    def __init__(self, channel_id, command, path, protocol, headers,
+                 connection):
+        """Constructs an instance.
+
+        Args:
+            channel_id: the channel id of the logical channel.
+            command: HTTP request command.
+            path: HTTP request path.
+            headers: HTTP headers.
+            connection: _LogicalConnection instance.
+        """
+
+        self.channel_id = channel_id
+        self.method = command
+        self.uri = path
+        self.protocol = protocol
+        self.headers_in = headers
+        self.connection = connection
+        self.server_terminated = False
+        self.client_terminated = False
+
+    def is_https(self):
+        """Mimics request.is_https(). Returns False because this method is
+        used only by old protocols (hixie and hybi00).
+        """
+
+        return False
+
+
+class _LogicalConnection(object):
+    """Mimics mod_python mp_conn."""
+
+    # For details, see the comment of set_read_state().
+    STATE_ACTIVE = 1
+    STATE_GRACEFULLY_CLOSED = 2
+    STATE_TERMINATED = 3
+
+    def __init__(self, mux_handler, channel_id):
+        """Constructs an instance.
+
+        Args:
+            mux_handler: _MuxHandler instance.
+            channel_id: channel id of this connection.
+        """
+
+        self._mux_handler = mux_handler
+        self._channel_id = channel_id
+        self._incoming_data = ''
+        self._write_condition = threading.Condition()
+        self._waiting_write_completion = False
+        self._read_condition = threading.Condition()
+        self._read_state = self.STATE_ACTIVE
+
+    def get_local_addr(self):
+        """Getter to mimic mp_conn.local_addr."""
+
+        return self._mux_handler.physical_connection.get_local_addr()
+    local_addr = property(get_local_addr)
+
+    def get_remote_addr(self):
+        """Getter to mimic mp_conn.remote_addr."""
+
+        return self._mux_handler.physical_connection.get_remote_addr()
+    remote_addr = property(get_remote_addr)
+
+    def get_memorized_lines(self):
+        """Gets memorized lines. Not supported."""
+
+        raise MuxUnexpectedException('_LogicalConnection does not support '
+                                     'get_memorized_lines')
+
+    def write(self, data):
+        """Writes data. mux_handler sends data asynchronously. The caller will
+        be suspended until write done.
+
+        Args:
+            data: data to be written.
+
+        Raises:
+            MuxUnexpectedException: when called before finishing the previous
+                write.
+        """
+
+        try:
+            self._write_condition.acquire()
+            if self._waiting_write_completion:
+                raise MuxUnexpectedException(
+                    'Logical connection %d is already waiting the completion '
+                    'of write' % self._channel_id)
+
+            self._waiting_write_completion = True
+            self._mux_handler.send_data(self._channel_id, data)
+            self._write_condition.wait()
+        finally:
+            self._write_condition.release()
+
+    def write_control_data(self, data):
+        """Writes data via the control channel. Don't wait finishing write
+        because this method can be called by mux dispatcher.
+
+        Args:
+            data: data to be written.
+        """
+
+        self._mux_handler.send_control_data(data)
+
+    def notify_write_done(self):
+        """Called when sending data is completed."""
+
+        try:
+            self._write_condition.acquire()
+            if not self._waiting_write_completion:
+                raise MuxUnexpectedException(
+                    'Invalid call of notify_write_done for logical connection'
+                    ' %d' % self._channel_id)
+            self._waiting_write_completion = False
+            self._write_condition.notify()
+        finally:
+            self._write_condition.release()
+
+    def append_frame_data(self, frame_data):
+        """Appends incoming frame data. Called when mux_handler dispatches
+        frame data to the corresponding application.
+
+        Args:
+            frame_data: incoming frame data.
+        """
+
+        self._read_condition.acquire()
+        self._incoming_data += frame_data
+        self._read_condition.notify()
+        self._read_condition.release()
+
+    def read(self, length):
+        """Reads data. Blocks until enough data has arrived via physical
+        connection.
+
+        Args:
+            length: length of data to be read.
+        Raises:
+            LogicalConnectionClosedException: when closing handshake for this
+                logical channel has been received.
+            ConnectionTerminatedException: when the physical connection has
+                closed, or an error is caused on the reader thread.
+        """
+
+        self._read_condition.acquire()
+        while (self._read_state == self.STATE_ACTIVE and
+               len(self._incoming_data) < length):
+            self._read_condition.wait()
+
+        try:
+            if self._read_state == self.STATE_GRACEFULLY_CLOSED:
+                raise LogicalConnectionClosedException(
+                    'Logical channel %d has closed.' % self._channel_id)
+            elif self._read_state == self.STATE_TERMINATED:
+                raise ConnectionTerminatedException(
+                    'Receiving %d byte failed. Logical channel (%d) closed' %
+                    (length, self._channel_id))
+
+            value = self._incoming_data[:length]
+            self._incoming_data = self._incoming_data[length:]
+        finally:
+            self._read_condition.release()
+
+        return value
+
+    def set_read_state(self, new_state):
+        """Sets the state of this connection. Called when an event for this
+        connection has occurred.
+
+        Args:
+            new_state: state to be set. new_state must be one of followings:
+            - STATE_GRACEFULLY_CLOSED: when closing handshake for this
+                connection has been received.
+            - STATE_TERMINATED: when the physical connection has closed or
+                DropChannel of this connection has received.
+        """
+
+        self._read_condition.acquire()
+        self._read_state = new_state
+        self._read_condition.notify()
+        self._read_condition.release()
+
+
+class _LogicalStream(Stream):
+    """Mimics the Stream class. This class interprets multiplexed WebSocket
+    frames.
+    """
+
+    def __init__(self, request, send_quota, receive_quota):
+        """Constructs an instance.
+
+        Args:
+            request: _LogicalRequest instance.
+            send_quota: Initial send quota.
+            receive_quota: Initial receive quota.
+        """
+
+        # TODO(bashi): Support frame filters.
+        stream_options = StreamOptions()
+        # Physical stream is responsible for masking.
+        stream_options.unmask_receive = False
+        # Control frames can be fragmented on logical channel.
+        stream_options.allow_fragmented_control_frame = True
+        Stream.__init__(self, request, stream_options)
+        self._send_quota = send_quota
+        self._send_quota_condition = threading.Condition()
+        self._receive_quota = receive_quota
+        self._write_inner_frame_semaphore = threading.Semaphore()
+
+    def _create_inner_frame(self, opcode, payload, end=True):
+        # TODO(bashi): Support extensions that use reserved bits.
+        first_byte = (end << 7) | opcode
+        return (_encode_channel_id(self._request.channel_id) +
+                chr(first_byte) + payload)
+
+    def _write_inner_frame(self, opcode, payload, end=True):
+        payload_length = len(payload)
+        write_position = 0
+
+        try:
+            # An inner frame will be fragmented if there is no enough send
+            # quota. This semaphore ensures that fragmented inner frames are
+            # sent in order on the logical channel.
+            # Note that frames that come from other logical channels or
+            # multiplexing control blocks can be inserted between fragmented
+            # inner frames on the physical channel.
+            self._write_inner_frame_semaphore.acquire()
+            while write_position < payload_length:
+                try:
+                    self._send_quota_condition.acquire()
+                    while self._send_quota == 0:
+                        self._logger.debug(
+                            'No quota. Waiting FlowControl message for %d.' %
+                            self._request.channel_id)
+                        self._send_quota_condition.wait()
+
+                    remaining = payload_length - write_position
+                    write_length = min(self._send_quota, remaining)
+                    inner_frame_end = (
+                        end and
+                        (write_position + write_length == payload_length))
+
+                    inner_frame = self._create_inner_frame(
+                        opcode,
+                        payload[write_position:write_position+write_length],
+                        inner_frame_end)
+                    frame_data = self._writer.build(
+                        inner_frame, end=True, binary=True)
+                    self._send_quota -= write_length
+                    self._logger.debug('Consumed quota=%d, remaining=%d' %
+                                       (write_length, self._send_quota))
+                finally:
+                    self._send_quota_condition.release()
+
+                # Writing data will block the worker so we need to release
+                # _send_quota_condition before writing.
+                self._logger.debug('Sending inner frame: %r' % frame_data)
+                self._request.connection.write(frame_data)
+                write_position += write_length
+
+                opcode = common.OPCODE_CONTINUATION
+
+        except ValueError, e:
+            raise BadOperationException(e)
+        finally:
+            self._write_inner_frame_semaphore.release()
+
+    def replenish_send_quota(self, send_quota):
+        """Replenish send quota."""
+
+        self._send_quota_condition.acquire()
+        self._send_quota += send_quota
+        self._logger.debug('Replenished send quota for channel id %d: %d' %
+                           (self._request.channel_id, self._send_quota))
+        self._send_quota_condition.notify()
+        self._send_quota_condition.release()
+
+    def consume_receive_quota(self, amount):
+        """Consumes receive quota. Returns False on failure."""
+
+        if self._receive_quota < amount:
+            self._logger.debug('Violate quota on channel id %d: %d < %d' %
+                               (self._request.channel_id,
+                                self._receive_quota, amount))
+            return False
+        self._receive_quota -= amount
+        return True
+
+    def send_message(self, message, end=True, binary=False):
+        """Override Stream.send_message."""
+
+        if self._request.server_terminated:
+            raise BadOperationException(
+                'Requested send_message after sending out a closing handshake')
+
+        if binary and isinstance(message, unicode):
+            raise BadOperationException(
+                'Message for binary frame must be instance of str')
+
+        if binary:
+            opcode = common.OPCODE_BINARY
+        else:
+            opcode = common.OPCODE_TEXT
+            message = message.encode('utf-8')
+
+        self._write_inner_frame(opcode, message, end)
+
+    def _receive_frame(self):
+        """Overrides Stream._receive_frame.
+
+        In addition to call Stream._receive_frame, this method adds the amount
+        of payload to receiving quota and sends FlowControl to the client.
+        We need to do it here because Stream.receive_message() handles
+        control frames internally.
+        """
+
+        opcode, payload, fin, rsv1, rsv2, rsv3 = Stream._receive_frame(self)
+        amount = len(payload)
+        self._receive_quota += amount
+        frame_data = _create_flow_control(self._request.channel_id,
+                                          amount)
+        self._logger.debug('Sending flow control for %d, replenished=%d' %
+                           (self._request.channel_id, amount))
+        self._request.connection.write_control_data(frame_data)
+        return opcode, payload, fin, rsv1, rsv2, rsv3
+
+    def receive_message(self):
+        """Overrides Stream.receive_message."""
+
+        # Just call Stream.receive_message(), but catch
+        # LogicalConnectionClosedException, which is raised when the logical
+        # connection has closed gracefully.
+        try:
+            return Stream.receive_message(self)
+        except LogicalConnectionClosedException, e:
+            self._logger.debug('%s', e)
+            return None
+
+    def _send_closing_handshake(self, code, reason):
+        """Overrides Stream._send_closing_handshake."""
+
+        body = create_closing_handshake_body(code, reason)
+        self._logger.debug('Sending closing handshake for %d: (%r, %r)' %
+                           (self._request.channel_id, code, reason))
+        self._write_inner_frame(common.OPCODE_CLOSE, body, end=True)
+
+        self._request.server_terminated = True
+
+    def send_ping(self, body=''):
+        """Overrides Stream.send_ping"""
+
+        self._logger.debug('Sending ping on logical channel %d: %r' %
+                           (self._request.channel_id, body))
+        self._write_inner_frame(common.OPCODE_PING, body, end=True)
+
+        self._ping_queue.append(body)
+
+    def _send_pong(self, body):
+        """Overrides Stream._send_pong"""
+
+        self._logger.debug('Sending pong on logical channel %d: %r' %
+                           (self._request.channel_id, body))
+        self._write_inner_frame(common.OPCODE_PONG, body, end=True)
+
+    def close_connection(self, code=common.STATUS_NORMAL_CLOSURE, reason=''):
+        """Overrides Stream.close_connection."""
+
+        # TODO(bashi): Implement
+        self._logger.debug('Closing logical connection %d' %
+                           self._request.channel_id)
+        self._request.server_terminated = True
+
+    def _drain_received_data(self):
+        """Overrides Stream._drain_received_data. Nothing need to be done for
+        logical channel.
+        """
+
+        pass
+
+
+class _OutgoingData(object):
+    """A structure that holds data to be sent via physical connection and
+    origin of the data.
+    """
+
+    def __init__(self, channel_id, data):
+        self.channel_id = channel_id
+        self.data = data
+
+
+class _PhysicalConnectionWriter(threading.Thread):
+    """A thread that is responsible for writing data to physical connection.
+
+    TODO(bashi): Make sure there is no thread-safety problem when the reader
+    thread reads data from the same socket at a time.
+    """
+
+    def __init__(self, mux_handler):
+        """Constructs an instance.
+
+        Args:
+            mux_handler: _MuxHandler instance.
+        """
+
+        threading.Thread.__init__(self)
+        self._logger = util.get_class_logger(self)
+        self._mux_handler = mux_handler
+        self.setDaemon(True)
+        self._stop_requested = False
+        self._deque = collections.deque()
+        self._deque_condition = threading.Condition()
+
+    def put_outgoing_data(self, data):
+        """Puts outgoing data.
+
+        Args:
+            data: _OutgoingData instance.
+
+        Raises:
+            BadOperationException: when the thread has been requested to
+                terminate.
+        """
+
+        try:
+            self._deque_condition.acquire()
+            if self._stop_requested:
+                raise BadOperationException('Cannot write data anymore')
+
+            self._deque.append(data)
+            self._deque_condition.notify()
+        finally:
+            self._deque_condition.release()
+
+    def _write_data(self, outgoing_data):
+        try:
+            self._mux_handler.physical_connection.write(outgoing_data.data)
+        except Exception, e:
+            util.prepend_message_to_exception(
+                'Failed to send message to %r: ' %
+                (self._mux_handler.physical_connection.remote_addr,), e)
+            raise
+
+        # TODO(bashi): It would be better to block the thread that sends
+        # control data as well.
+        if outgoing_data.channel_id != _CONTROL_CHANNEL_ID:
+            self._mux_handler.notify_write_done(outgoing_data.channel_id)
+
+    def run(self):
+        self._deque_condition.acquire()
+        while not self._stop_requested:
+            if len(self._deque) == 0:
+                self._deque_condition.wait()
+                continue
+
+            outgoing_data = self._deque.popleft()
+            self._deque_condition.release()
+            self._write_data(outgoing_data)
+            self._deque_condition.acquire()
+
+        # Flush deque
+        try:
+            while len(self._deque) > 0:
+                outgoing_data = self._deque.popleft()
+                self._write_data(outgoing_data)
+        finally:
+            self._deque_condition.release()
+
+    def stop(self):
+        """Stops the writer thread."""
+
+        self._deque_condition.acquire()
+        self._stop_requested = True
+        self._deque_condition.notify()
+        self._deque_condition.release()
+
+
+class _PhysicalConnectionReader(threading.Thread):
+    """A thread that is responsible for reading data from physical connection.
+    """
+
+    def __init__(self, mux_handler):
+        """Constructs an instance.
+
+        Args:
+            mux_handler: _MuxHandler instance.
+        """
+
+        threading.Thread.__init__(self)
+        self._logger = util.get_class_logger(self)
+        self._mux_handler = mux_handler
+        self.setDaemon(True)
+
+    def run(self):
+        while True:
+            try:
+                physical_stream = self._mux_handler.physical_stream
+                message = physical_stream.receive_message()
+                if message is None:
+                    break
+                # Below happens only when a data message is received.
+                opcode = physical_stream.get_last_received_opcode()
+                if opcode != common.OPCODE_BINARY:
+                    self._mux_handler.fail_physical_connection(
+                        _DROP_CODE_INVALID_ENCAPSULATING_MESSAGE,
+                        'Received a text message on physical connection')
+                    break
+
+            except ConnectionTerminatedException, e:
+                self._logger.debug('%s', e)
+                break
+
+            try:
+                self._mux_handler.dispatch_message(message)
+            except PhysicalConnectionError, e:
+                self._mux_handler.fail_physical_connection(
+                    e.drop_code, e.message)
+                break
+            except LogicalChannelError, e:
+                self._mux_handler.fail_logical_channel(
+                    e.channel_id, e.drop_code, e.message)
+            except Exception, e:
+                self._logger.debug(traceback.format_exc())
+                break
+
+        self._mux_handler.notify_reader_done()
+
+
+class _Worker(threading.Thread):
+    """A thread that is responsible for running the corresponding application
+    handler.
+    """
+
+    def __init__(self, mux_handler, request):
+        """Constructs an instance.
+
+        Args:
+            mux_handler: _MuxHandler instance.
+            request: _LogicalRequest instance.
+        """
+
+        threading.Thread.__init__(self)
+        self._logger = util.get_class_logger(self)
+        self._mux_handler = mux_handler
+        self._request = request
+        self.setDaemon(True)
+
+    def run(self):
+        self._logger.debug('Logical channel worker started. (id=%d)' %
+                           self._request.channel_id)
+        try:
+            # Non-critical exceptions will be handled by dispatcher.
+            self._mux_handler.dispatcher.transfer_data(self._request)
+        finally:
+            self._mux_handler.notify_worker_done(self._request.channel_id)
+
+
+class _MuxHandshaker(hybi.Handshaker):
+    """Opening handshake processor for multiplexing."""
+
+    _DUMMY_WEBSOCKET_KEY = 'dGhlIHNhbXBsZSBub25jZQ=='
+
+    def __init__(self, request, dispatcher, send_quota, receive_quota):
+        """Constructs an instance.
+        Args:
+            request: _LogicalRequest instance.
+            dispatcher: Dispatcher instance (dispatch.Dispatcher).
+            send_quota: Initial send quota.
+            receive_quota: Initial receive quota.
+        """
+
+        hybi.Handshaker.__init__(self, request, dispatcher)
+        self._send_quota = send_quota
+        self._receive_quota = receive_quota
+
+        # Append headers which should not be included in handshake field of
+        # AddChannelRequest.
+        # TODO(bashi): Make sure whether we should raise exception when
+        #     these headers are included already.
+        request.headers_in[common.UPGRADE_HEADER] = (
+            common.WEBSOCKET_UPGRADE_TYPE)
+        request.headers_in[common.CONNECTION_HEADER] = (
+            common.UPGRADE_CONNECTION_TYPE)
+        request.headers_in[common.SEC_WEBSOCKET_VERSION_HEADER] = (
+            str(common.VERSION_HYBI_LATEST))
+        request.headers_in[common.SEC_WEBSOCKET_KEY_HEADER] = (
+            self._DUMMY_WEBSOCKET_KEY)
+
+    def _create_stream(self, stream_options):
+        """Override hybi.Handshaker._create_stream."""
+
+        self._logger.debug('Creating logical stream for %d' %
+                           self._request.channel_id)
+        return _LogicalStream(self._request, self._send_quota,
+                              self._receive_quota)
+
+    def _create_handshake_response(self, accept):
+        """Override hybi._create_handshake_response."""
+
+        response = []
+
+        response.append('HTTP/1.1 101 Switching Protocols\r\n')
+
+        # Upgrade, Connection and Sec-WebSocket-Accept should be excluded.
+        if self._request.ws_protocol is not None:
+            response.append('%s: %s\r\n' % (
+                common.SEC_WEBSOCKET_PROTOCOL_HEADER,
+                self._request.ws_protocol))
+        if (self._request.ws_extensions is not None and
+            len(self._request.ws_extensions) != 0):
+            response.append('%s: %s\r\n' % (
+                common.SEC_WEBSOCKET_EXTENSIONS_HEADER,
+                common.format_extensions(self._request.ws_extensions)))
+        response.append('\r\n')
+
+        return ''.join(response)
+
+    def _send_handshake(self, accept):
+        """Override hybi.Handshaker._send_handshake."""
+
+        # Don't send handshake response for the default channel
+        if self._request.channel_id == _DEFAULT_CHANNEL_ID:
+            return
+
+        handshake_response = self._create_handshake_response(accept)
+        frame_data = _create_add_channel_response(
+                         self._request.channel_id,
+                         handshake_response)
+        self._logger.debug('Sending handshake response for %d: %r' %
+                           (self._request.channel_id, frame_data))
+        self._request.connection.write_control_data(frame_data)
+
+
+class _LogicalChannelData(object):
+    """A structure that holds information about logical channel.
+    """
+
+    def __init__(self, request, worker):
+        self.request = request
+        self.worker = worker
+        self.drop_code = _DROP_CODE_NORMAL_CLOSURE
+        self.drop_message = ''
+
+
+class _HandshakeDeltaBase(object):
+    """A class that holds information for delta-encoded handshake."""
+
+    def __init__(self, headers):
+        self._headers = headers
+
+    def create_headers(self, delta=None):
+        """Creates request headers for an AddChannelRequest that has
+        delta-encoded handshake.
+
+        Args:
+            delta: headers should be overridden.
+        """
+
+        headers = copy.copy(self._headers)
+        if delta:
+            for key, value in delta.items():
+                # The spec requires that a header with an empty value is
+                # removed from the delta base.
+                if len(value) == 0 and headers.has_key(key):
+                    del headers[key]
+                else:
+                    headers[key] = value
+        # TODO(bashi): Support extensions
+        headers['Sec-WebSocket-Extensions'] = ''
+        return headers
+
+
+class _MuxHandler(object):
+    """Multiplexing handler. When a handler starts, it launches three
+    threads; the reader thread, the writer thread, and a worker thread.
+
+    The reader thread reads data from the physical stream, i.e., the
+    ws_stream object of the underlying websocket connection. The reader
+    thread interprets multiplexed frames and dispatches them to logical
+    channels. Methods of this class are mostly called by the reader thread.
+
+    The writer thread sends multiplexed frames which are created by
+    logical channels via the physical connection.
+
+    The worker thread launched at the starting point handles the
+    "Implicitly Opened Connection". If multiplexing handler receives
+    an AddChannelRequest and accepts it, the handler will launch a new worker
+    thread and dispatch the request to it.
+    """
+
+    def __init__(self, request, dispatcher):
+        """Constructs an instance.
+
+        Args:
+            request: mod_python request of the physical connection.
+            dispatcher: Dispatcher instance (dispatch.Dispatcher).
+        """
+
+        self.original_request = request
+        self.dispatcher = dispatcher
+        self.physical_connection = request.connection
+        self.physical_stream = request.ws_stream
+        self._logger = util.get_class_logger(self)
+        self._logical_channels = {}
+        self._logical_channels_condition = threading.Condition()
+        # Holds client's initial quota
+        self._channel_slots = collections.deque()
+        self._handshake_base = None
+        self._worker_done_notify_received = False
+        self._reader = None
+        self._writer = None
+
+    def start(self):
+        """Starts the handler.
+
+        Raises:
+            MuxUnexpectedException: when the handler already started, or when
+                opening handshake of the default channel fails.
+        """
+
+        if self._reader or self._writer:
+            raise MuxUnexpectedException('MuxHandler already started')
+
+        self._reader = _PhysicalConnectionReader(self)
+        self._writer = _PhysicalConnectionWriter(self)
+        self._reader.start()
+        self._writer.start()
+
+        # Create "Implicitly Opened Connection".
+        logical_connection = _LogicalConnection(self, _DEFAULT_CHANNEL_ID)
+        self._handshake_base = _HandshakeDeltaBase(
+            self.original_request.headers_in)
+        logical_request = _LogicalRequest(
+            _DEFAULT_CHANNEL_ID,
+            self.original_request.method,
+            self.original_request.uri,
+            self.original_request.protocol,
+            self._handshake_base.create_headers(),
+            logical_connection)
+        # Client's send quota for the implicitly opened connection is zero,
+        # but we will send FlowControl later so set the initial quota to
+        # _INITIAL_QUOTA_FOR_CLIENT.
+        self._channel_slots.append(_INITIAL_QUOTA_FOR_CLIENT)
+        if not self._do_handshake_for_logical_request(
+            logical_request, send_quota=self.original_request.mux_quota):
+            raise MuxUnexpectedException(
+                'Failed handshake on the default channel id')
+        self._add_logical_channel(logical_request)
+
+        # Send FlowControl for the implicitly opened connection.
+        frame_data = _create_flow_control(_DEFAULT_CHANNEL_ID,
+                                          _INITIAL_QUOTA_FOR_CLIENT)
+        logical_request.connection.write_control_data(frame_data)
+
+    def add_channel_slots(self, slots, send_quota):
+        """Adds channel slots.
+
+        Args:
+            slots: number of slots to be added.
+            send_quota: initial send quota for slots.
+        """
+
+        self._channel_slots.extend([send_quota] * slots)
+        # Send NewChannelSlot to client.
+        frame_data = _create_new_channel_slot(slots, send_quota)
+        self.send_control_data(frame_data)
+
+    def wait_until_done(self, timeout=None):
+        """Waits until all workers are done. Returns False when timeout has
+        occurred. Returns True on success.
+
+        Args:
+            timeout: timeout in sec.
+        """
+
+        self._logical_channels_condition.acquire()
+        try:
+            while len(self._logical_channels) > 0:
+                self._logger.debug('Waiting workers(%d)...' %
+                                   len(self._logical_channels))
+                self._worker_done_notify_received = False
+                self._logical_channels_condition.wait(timeout)
+                if not self._worker_done_notify_received:
+                    self._logger.debug('Waiting worker(s) timed out')
+                    return False
+
+        finally:
+            self._logical_channels_condition.release()
+
+        # Flush pending outgoing data
+        self._writer.stop()
+        self._writer.join()
+
+        return True
+
+    def notify_write_done(self, channel_id):
+        """Called by the writer thread when a write operation has done.
+
+        Args:
+            channel_id: objective channel id.
+        """
+
+        try:
+            self._logical_channels_condition.acquire()
+            if channel_id in self._logical_channels:
+                channel_data = self._logical_channels[channel_id]
+                channel_data.request.connection.notify_write_done()
+            else:
+                self._logger.debug('Seems that logical channel for %d has gone'
+                                   % channel_id)
+        finally:
+            self._logical_channels_condition.release()
+
+    def send_control_data(self, data):
+        """Sends data via the control channel.
+
+        Args:
+            data: data to be sent.
+        """
+
+        self._writer.put_outgoing_data(_OutgoingData(
+                channel_id=_CONTROL_CHANNEL_ID, data=data))
+
+    def send_data(self, channel_id, data):
+        """Sends data via given logical channel. This method is called by
+        worker threads.
+
+        Args:
+            channel_id: objective channel id.
+            data: data to be sent.
+        """
+
+        self._writer.put_outgoing_data(_OutgoingData(
+                channel_id=channel_id, data=data))
+
+    def _send_drop_channel(self, channel_id, code=None, message=''):
+        frame_data = _create_drop_channel(channel_id, code, message)
+        self._logger.debug(
+            'Sending drop channel for channel id %d' % channel_id)
+        self.send_control_data(frame_data)
+
+    def _send_error_add_channel_response(self, channel_id, status=None):
+        if status is None:
+            status = common.HTTP_STATUS_BAD_REQUEST
+
+        if status in _HTTP_BAD_RESPONSE_MESSAGES:
+            message = _HTTP_BAD_RESPONSE_MESSAGES[status]
+        else:
+            self._logger.debug('Response message for %d is not found' % status)
+            message = '???'
+
+        response = 'HTTP/1.1 %d %s\r\n\r\n' % (status, message)
+        frame_data = _create_add_channel_response(channel_id,
+                                                  encoded_handshake=response,
+                                                  encoding=0, rejected=True)
+        self.send_control_data(frame_data)
+
+    def _create_logical_request(self, block):
+        if block.channel_id == _CONTROL_CHANNEL_ID:
+            # TODO(bashi): Raise PhysicalConnectionError with code 2006
+            # instead of MuxUnexpectedException.
+            raise MuxUnexpectedException(
+                'Received the control channel id (0) as objective channel '
+                'id for AddChannel')
+
+        if block.encoding > _HANDSHAKE_ENCODING_DELTA:
+            raise PhysicalConnectionError(
+                _DROP_CODE_UNKNOWN_REQUEST_ENCODING)
+
+        method, path, version, headers = _parse_request_text(
+            block.encoded_handshake)
+        if block.encoding == _HANDSHAKE_ENCODING_DELTA:
+            headers = self._handshake_base.create_headers(headers)
+
+        connection = _LogicalConnection(self, block.channel_id)
+        request = _LogicalRequest(block.channel_id, method, path, version,
+                                  headers, connection)
+        return request
+
+    def _do_handshake_for_logical_request(self, request, send_quota=0):
+        try:
+            receive_quota = self._channel_slots.popleft()
+        except IndexError:
+            raise LogicalChannelError(
+                request.channel_id, _DROP_CODE_NEW_CHANNEL_SLOT_VIOLATION)
+
+        handshaker = _MuxHandshaker(request, self.dispatcher,
+                                    send_quota, receive_quota)
+        try:
+            handshaker.do_handshake()
+        except handshake.VersionException, e:
+            self._logger.info('%s', e)
+            self._send_error_add_channel_response(
+                request.channel_id, status=common.HTTP_STATUS_BAD_REQUEST)
+            return False
+        except handshake.HandshakeException, e:
+            # TODO(bashi): Should we _Fail the Logical Channel_ with 3001
+            # instead?
+            self._logger.info('%s', e)
+            self._send_error_add_channel_response(request.channel_id,
+                                                  status=e.status)
+            return False
+        except handshake.AbortedByUserException, e:
+            self._logger.info('%s', e)
+            self._send_error_add_channel_response(request.channel_id)
+            return False
+
+        return True
+
+    def _add_logical_channel(self, logical_request):
+        try:
+            self._logical_channels_condition.acquire()
+            if logical_request.channel_id in self._logical_channels:
+                self._logger.debug('Channel id %d already exists' %
+                                   logical_request.channel_id)
+                raise PhysicalConnectionError(
+                    _DROP_CODE_CHANNEL_ALREADY_EXISTS,
+                    'Channel id %d already exists' %
+                    logical_request.channel_id)
+            worker = _Worker(self, logical_request)
+            channel_data = _LogicalChannelData(logical_request, worker)
+            self._logical_channels[logical_request.channel_id] = channel_data
+            worker.start()
+        finally:
+            self._logical_channels_condition.release()
+
+    def _process_add_channel_request(self, block):
+        try:
+            logical_request = self._create_logical_request(block)
+        except ValueError, e:
+            self._logger.debug('Failed to create logical request: %r' % e)
+            self._send_error_add_channel_response(
+                block.channel_id, status=common.HTTP_STATUS_BAD_REQUEST)
+            return
+        if self._do_handshake_for_logical_request(logical_request):
+            if block.encoding == _HANDSHAKE_ENCODING_IDENTITY:
+                # Update handshake base.
+                # TODO(bashi): Make sure this is the right place to update
+                # handshake base.
+                self._handshake_base = _HandshakeDeltaBase(
+                    logical_request.headers_in)
+            self._add_logical_channel(logical_request)
+        else:
+            self._send_error_add_channel_response(
+                block.channel_id, status=common.HTTP_STATUS_BAD_REQUEST)
+
+    def _process_flow_control(self, block):
+        try:
+            self._logical_channels_condition.acquire()
+            if not block.channel_id in self._logical_channels:
+                return
+            channel_data = self._logical_channels[block.channel_id]
+            channel_data.request.ws_stream.replenish_send_quota(
+                block.send_quota)
+        finally:
+            self._logical_channels_condition.release()
+
+    def _process_drop_channel(self, block):
+        self._logger.debug(
+            'DropChannel received for %d: code=%r, reason=%r' %
+            (block.channel_id, block.drop_code, block.drop_message))
+        try:
+            self._logical_channels_condition.acquire()
+            if not block.channel_id in self._logical_channels:
+                return
+            channel_data = self._logical_channels[block.channel_id]
+            channel_data.drop_code = _DROP_CODE_ACKNOWLEDGED
+            # Close the logical channel
+            channel_data.request.connection.set_read_state(
+                _LogicalConnection.STATE_TERMINATED)
+        finally:
+            self._logical_channels_condition.release()
+
+    def _process_control_blocks(self, parser):
+        for control_block in parser.read_control_blocks():
+            opcode = control_block.opcode
+            self._logger.debug('control block received, opcode: %d' % opcode)
+            if opcode == _MUX_OPCODE_ADD_CHANNEL_REQUEST:
+                self._process_add_channel_request(control_block)
+            elif opcode == _MUX_OPCODE_ADD_CHANNEL_RESPONSE:
+                raise PhysicalConnectionError(
+                    _DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
+                    'Received AddChannelResponse')
+            elif opcode == _MUX_OPCODE_FLOW_CONTROL:
+                self._process_flow_control(control_block)
+            elif opcode == _MUX_OPCODE_DROP_CHANNEL:
+                self._process_drop_channel(control_block)
+            elif opcode == _MUX_OPCODE_NEW_CHANNEL_SLOT:
+                raise PhysicalConnectionError(
+                    _DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
+                    'Received NewChannelSlot')
+            else:
+                raise MuxUnexpectedException(
+                    'Unexpected opcode %r' % opcode)
+
+    def _process_logical_frame(self, channel_id, parser):
+        self._logger.debug('Received a frame. channel id=%d' % channel_id)
+        try:
+            self._logical_channels_condition.acquire()
+            if not channel_id in self._logical_channels:
+                # We must ignore the message for an inactive channel.
+                return
+            channel_data = self._logical_channels[channel_id]
+            fin, rsv1, rsv2, rsv3, opcode, payload = parser.read_inner_frame()
+            if not channel_data.request.ws_stream.consume_receive_quota(
+                len(payload)):
+                # The client violates quota. Close logical channel.
+                raise LogicalChannelError(
+                    channel_id, _DROP_CODE_SEND_QUOTA_VIOLATION)
+            header = create_header(opcode, len(payload), fin, rsv1, rsv2, rsv3,
+                                   mask=False)
+            frame_data = header + payload
+            channel_data.request.connection.append_frame_data(frame_data)
+        finally:
+            self._logical_channels_condition.release()
+
+    def dispatch_message(self, message):
+        """Dispatches message. The reader thread calls this method.
+
+        Args:
+            message: a message that contains encapsulated frame.
+        Raises:
+            PhysicalConnectionError: if the message contains physical
+                connection level errors.
+            LogicalChannelError: if the message contains logical channel
+                level errors.
+        """
+
+        parser = _MuxFramePayloadParser(message)
+        try:
+            channel_id = parser.read_channel_id()
+        except ValueError, e:
+            raise PhysicalConnectionError(_DROP_CODE_CHANNEL_ID_TRUNCATED)
+        if channel_id == _CONTROL_CHANNEL_ID:
+            self._process_control_blocks(parser)
+        else:
+            self._process_logical_frame(channel_id, parser)
+
+    def notify_worker_done(self, channel_id):
+        """Called when a worker has finished.
+
+        Args:
+            channel_id: channel id corresponded with the worker.
+        """
+
+        self._logger.debug('Worker for channel id %d terminated' % channel_id)
+        try:
+            self._logical_channels_condition.acquire()
+            if not channel_id in self._logical_channels:
+                raise MuxUnexpectedException(
+                    'Channel id %d not found' % channel_id)
+            channel_data = self._logical_channels.pop(channel_id)
+        finally:
+            self._worker_done_notify_received = True
+            self._logical_channels_condition.notify()
+            self._logical_channels_condition.release()
+
+        if not channel_data.request.server_terminated:
+            self._send_drop_channel(
+                channel_id, code=channel_data.drop_code,
+                message=channel_data.drop_message)
+
+    def notify_reader_done(self):
+        """This method is called by the reader thread when the reader has
+        finished.
+        """
+
+        # Terminate all logical connections
+        self._logger.debug('termiating all logical connections...')
+        self._logical_channels_condition.acquire()
+        for channel_data in self._logical_channels.values():
+            try:
+                channel_data.request.connection.set_read_state(
+                    _LogicalConnection.STATE_TERMINATED)
+            except Exception:
+                pass
+        self._logical_channels_condition.release()
+
+    def fail_physical_connection(self, code, message):
+        """Fail the physical connection.
+
+        Args:
+            code: drop reason code.
+            message: drop message.
+        """
+
+        self._logger.debug('Failing the physical connection...')
+        self._send_drop_channel(_CONTROL_CHANNEL_ID, code, message)
+        self.physical_stream.close_connection(
+            common.STATUS_INTERNAL_ENDPOINT_ERROR)
+
+    def fail_logical_channel(self, channel_id, code, message):
+        """Fail a logical channel.
+
+        Args:
+            channel_id: channel id.
+            code: drop reason code.
+            message: drop message.
+        """
+
+        self._logger.debug('Failing logical channel %d...' % channel_id)
+        try:
+            self._logical_channels_condition.acquire()
+            if channel_id in self._logical_channels:
+                channel_data = self._logical_channels[channel_id]
+                # Close the logical channel. notify_worker_done() will be
+                # called later and it will send DropChannel.
+                channel_data.drop_code = code
+                channel_data.drop_message = message
+                channel_data.request.connection.set_read_state(
+                    _LogicalConnection.STATE_TERMINATED)
+            else:
+                self._send_drop_channel(channel_id, code, message)
+        finally:
+            self._logical_channels_condition.release()
+
+
+def use_mux(request):
+    return hasattr(request, 'mux') and request.mux
+
+
+def start(request, dispatcher):
+    mux_handler = _MuxHandler(request, dispatcher)
+    mux_handler.start()
+
+    mux_handler.add_channel_slots(_INITIAL_NUMBER_OF_CHANNEL_SLOTS,
+                                  _INITIAL_QUOTA_FOR_CLIENT)
+
+    mux_handler.wait_until_done()
+
+
+# vi:sts=4 sw=4 et
diff --git a/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/standalone.py b/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/standalone.py
new file mode 100755
index 0000000..07a33d9
--- /dev/null
+++ b/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/standalone.py
@@ -0,0 +1,998 @@
+#!/usr/bin/env python
+#
+# Copyright 2012, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""Standalone WebSocket server.
+
+Use this file to launch pywebsocket without Apache HTTP Server.
+
+
+BASIC USAGE
+
+Go to the src directory and run
+
+  $ python mod_pywebsocket/standalone.py [-p <ws_port>]
+                                         [-w <websock_handlers>]
+                                         [-d <document_root>]
+
+<ws_port> is the port number to use for ws:// connection.
+
+<document_root> is the path to the root directory of HTML files.
+
+<websock_handlers> is the path to the root directory of WebSocket handlers.
+If not specified, <document_root> will be used. See __init__.py (or
+run $ pydoc mod_pywebsocket) for how to write WebSocket handlers.
+
+For more detail and other options, run
+
+  $ python mod_pywebsocket/standalone.py --help
+
+or see _build_option_parser method below.
+
+For trouble shooting, adding "--log_level debug" might help you.
+
+
+TRY DEMO
+
+Go to the src directory and run
+
+  $ python standalone.py -d example
+
+to launch pywebsocket with the sample handler and html on port 80. Open
+http://localhost/console.html, click the connect button, type something into
+the text box next to the send button and click the send button. If everything
+is working, you'll see the message you typed echoed by the server.
+
+
+SUPPORTING TLS
+
+To support TLS, run standalone.py with -t, -k, and -c options.
+
+
+SUPPORTING CLIENT AUTHENTICATION
+
+To support client authentication with TLS, run standalone.py with -t, -k, -c,
+and --tls-client-auth, and --tls-client-ca options.
+
+E.g., $./standalone.py -d ../example -p 10443 -t -c ../test/cert/cert.pem -k
+../test/cert/key.pem --tls-client-auth --tls-client-ca=../test/cert/cacert.pem
+
+
+CONFIGURATION FILE
+
+You can also write a configuration file and use it by specifying the path to
+the configuration file by --config option. Please write a configuration file
+following the documentation of the Python ConfigParser library. Name of each
+entry must be the long version argument name. E.g. to set log level to debug,
+add the following line:
+
+log_level=debug
+
+For options which doesn't take value, please add some fake value. E.g. for
+--tls option, add the following line:
+
+tls=True
+
+Note that tls will be enabled even if you write tls=False as the value part is
+fake.
+
+When both a command line argument and a configuration file entry are set for
+the same configuration item, the command line value will override one in the
+configuration file.
+
+
+THREADING
+
+This server is derived from SocketServer.ThreadingMixIn. Hence a thread is
+used for each request.
+
+
+SECURITY WARNING
+
+This uses CGIHTTPServer and CGIHTTPServer is not secure.
+It may execute arbitrary Python code or external programs. It should not be
+used outside a firewall.
+"""
+
+import BaseHTTPServer
+import CGIHTTPServer
+import SimpleHTTPServer
+import SocketServer
+import ConfigParser
+import base64
+import httplib
+import logging
+import logging.handlers
+import optparse
+import os
+import re
+import select
+import socket
+import sys
+import threading
+import time
+
+_HAS_SSL = False
+_HAS_OPEN_SSL = False
+try:
+    import ssl
+    _HAS_SSL = True
+except ImportError:
+    try:
+        import OpenSSL.SSL
+        _HAS_OPEN_SSL = True
+    except ImportError:
+        pass
+
+from mod_pywebsocket import common
+from mod_pywebsocket import dispatch
+from mod_pywebsocket import handshake
+from mod_pywebsocket import http_header_util
+from mod_pywebsocket import memorizingfile
+from mod_pywebsocket import util
+
+
+_DEFAULT_LOG_MAX_BYTES = 1024 * 256
+_DEFAULT_LOG_BACKUP_COUNT = 5
+
+_DEFAULT_REQUEST_QUEUE_SIZE = 128
+
+# 1024 is practically large enough to contain WebSocket handshake lines.
+_MAX_MEMORIZED_LINES = 1024
+
+
+class _StandaloneConnection(object):
+    """Mimic mod_python mp_conn."""
+
+    def __init__(self, request_handler):
+        """Construct an instance.
+
+        Args:
+            request_handler: A WebSocketRequestHandler instance.
+        """
+
+        self._request_handler = request_handler
+
+    def get_local_addr(self):
+        """Getter to mimic mp_conn.local_addr."""
+
+        return (self._request_handler.server.server_name,
+                self._request_handler.server.server_port)
+    local_addr = property(get_local_addr)
+
+    def get_remote_addr(self):
+        """Getter to mimic mp_conn.remote_addr.
+
+        Setting the property in __init__ won't work because the request
+        handler is not initialized yet there."""
+
+        return self._request_handler.client_address
+    remote_addr = property(get_remote_addr)
+
+    def write(self, data):
+        """Mimic mp_conn.write()."""
+
+        return self._request_handler.wfile.write(data)
+
+    def read(self, length):
+        """Mimic mp_conn.read()."""
+
+        return self._request_handler.rfile.read(length)
+
+    def get_memorized_lines(self):
+        """Get memorized lines."""
+
+        return self._request_handler.rfile.get_memorized_lines()
+
+
+class _StandaloneRequest(object):
+    """Mimic mod_python request."""
+
+    def __init__(self, request_handler, use_tls):
+        """Construct an instance.
+
+        Args:
+            request_handler: A WebSocketRequestHandler instance.
+        """
+
+        self._logger = util.get_class_logger(self)
+
+        self._request_handler = request_handler
+        self.connection = _StandaloneConnection(request_handler)
+        self._use_tls = use_tls
+        self.headers_in = request_handler.headers
+
+    def get_uri(self):
+        """Getter to mimic request.uri."""
+
+        return self._request_handler.path
+    uri = property(get_uri)
+
+    def get_method(self):
+        """Getter to mimic request.method."""
+
+        return self._request_handler.command
+    method = property(get_method)
+
+    def get_protocol(self):
+        """Getter to mimic request.protocol."""
+
+        return self._request_handler.request_version
+    protocol = property(get_protocol)
+
+    def is_https(self):
+        """Mimic request.is_https()."""
+
+        return self._use_tls
+
+    def _drain_received_data(self):
+        """Don't use this method from WebSocket handler. Drains unread data
+        in the receive buffer.
+        """
+
+        raw_socket = self._request_handler.connection
+        drained_data = util.drain_received_data(raw_socket)
+
+        if drained_data:
+            self._logger.debug(
+                'Drained data following close frame: %r', drained_data)
+
+
+class _StandaloneSSLConnection(object):
+    """A wrapper class for OpenSSL.SSL.Connection to provide makefile method
+    which is not supported by the class.
+    """
+
+    def __init__(self, connection):
+        self._connection = connection
+
+    def __getattribute__(self, name):
+        if name in ('_connection', 'makefile'):
+            return object.__getattribute__(self, name)
+        return self._connection.__getattribute__(name)
+
+    def __setattr__(self, name, value):
+        if name in ('_connection', 'makefile'):
+            return object.__setattr__(self, name, value)
+        return self._connection.__setattr__(name, value)
+
+    def makefile(self, mode='r', bufsize=-1):
+        return socket._fileobject(self._connection, mode, bufsize)
+
+
+def _alias_handlers(dispatcher, websock_handlers_map_file):
+    """Set aliases specified in websock_handler_map_file in dispatcher.
+
+    Args:
+        dispatcher: dispatch.Dispatcher instance
+        websock_handler_map_file: alias map file
+    """
+
+    fp = open(websock_handlers_map_file)
+    try:
+        for line in fp:
+            if line[0] == '#' or line.isspace():
+                continue
+            m = re.match('(\S+)\s+(\S+)', line)
+            if not m:
+                logging.warning('Wrong format in map file:' + line)
+                continue
+            try:
+                dispatcher.add_resource_path_alias(
+                    m.group(1), m.group(2))
+            except dispatch.DispatchException, e:
+                logging.error(str(e))
+    finally:
+        fp.close()
+
+
+class WebSocketServer(SocketServer.ThreadingMixIn, BaseHTTPServer.HTTPServer):
+    """HTTPServer specialized for WebSocket."""
+
+    # Overrides SocketServer.ThreadingMixIn.daemon_threads
+    daemon_threads = True
+    # Overrides BaseHTTPServer.HTTPServer.allow_reuse_address
+    allow_reuse_address = True
+
+    def __init__(self, options):
+        """Override SocketServer.TCPServer.__init__ to set SSL enabled
+        socket object to self.socket before server_bind and server_activate,
+        if necessary.
+        """
+
+        # Share a Dispatcher among request handlers to save time for
+        # instantiation.  Dispatcher can be shared because it is thread-safe.
+        options.dispatcher = dispatch.Dispatcher(
+            options.websock_handlers,
+            options.scan_dir,
+            options.allow_handlers_outside_root_dir)
+        if options.websock_handlers_map_file:
+            _alias_handlers(options.dispatcher,
+                            options.websock_handlers_map_file)
+        warnings = options.dispatcher.source_warnings()
+        if warnings:
+            for warning in warnings:
+                logging.warning('mod_pywebsocket: %s' % warning)
+
+        self._logger = util.get_class_logger(self)
+
+        self.request_queue_size = options.request_queue_size
+        self.__ws_is_shut_down = threading.Event()
+        self.__ws_serving = False
+
+        SocketServer.BaseServer.__init__(
+            self, (options.server_host, options.port), WebSocketRequestHandler)
+
+        # Expose the options object to allow handler objects access it. We name
+        # it with websocket_ prefix to avoid conflict.
+        self.websocket_server_options = options
+
+        self._create_sockets()
+        self.server_bind()
+        self.server_activate()
+
+    def _create_sockets(self):
+        self.server_name, self.server_port = self.server_address
+        self._sockets = []
+        if not self.server_name:
+            # On platforms that doesn't support IPv6, the first bind fails.
+            # On platforms that supports IPv6
+            # - If it binds both IPv4 and IPv6 on call with AF_INET6, the
+            #   first bind succeeds and the second fails (we'll see 'Address
+            #   already in use' error).
+            # - If it binds only IPv6 on call with AF_INET6, both call are
+            #   expected to succeed to listen both protocol.
+            addrinfo_array = [
+                (socket.AF_INET6, socket.SOCK_STREAM, '', '', ''),
+                (socket.AF_INET, socket.SOCK_STREAM, '', '', '')]
+        else:
+            addrinfo_array = socket.getaddrinfo(self.server_name,
+                                                self.server_port,
+                                                socket.AF_UNSPEC,
+                                                socket.SOCK_STREAM,
+                                                socket.IPPROTO_TCP)
+        for addrinfo in addrinfo_array:
+            self._logger.info('Create socket on: %r', addrinfo)
+            family, socktype, proto, canonname, sockaddr = addrinfo
+            try:
+                socket_ = socket.socket(family, socktype)
+            except Exception, e:
+                self._logger.info('Skip by failure: %r', e)
+                continue
+            if self.websocket_server_options.use_tls:
+                if _HAS_SSL:
+                    if self.websocket_server_options.tls_client_auth:
+                        client_cert_ = ssl.CERT_REQUIRED
+                    else:
+                        client_cert_ = ssl.CERT_NONE
+                    socket_ = ssl.wrap_socket(socket_,
+                        keyfile=self.websocket_server_options.private_key,
+                        certfile=self.websocket_server_options.certificate,
+                        ssl_version=ssl.PROTOCOL_SSLv23,
+                        ca_certs=self.websocket_server_options.tls_client_ca,
+                        cert_reqs=client_cert_)
+                if _HAS_OPEN_SSL:
+                    ctx = OpenSSL.SSL.Context(OpenSSL.SSL.SSLv23_METHOD)
+                    ctx.use_privatekey_file(
+                        self.websocket_server_options.private_key)
+                    ctx.use_certificate_file(
+                        self.websocket_server_options.certificate)
+                    socket_ = OpenSSL.SSL.Connection(ctx, socket_)
+            self._sockets.append((socket_, addrinfo))
+
+    def server_bind(self):
+        """Override SocketServer.TCPServer.server_bind to enable multiple
+        sockets bind.
+        """
+
+        failed_sockets = []
+
+        for socketinfo in self._sockets:
+            socket_, addrinfo = socketinfo
+            self._logger.info('Bind on: %r', addrinfo)
+            if self.allow_reuse_address:
+                socket_.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+            try:
+                socket_.bind(self.server_address)
+            except Exception, e:
+                self._logger.info('Skip by failure: %r', e)
+                socket_.close()
+                failed_sockets.append(socketinfo)
+            if self.server_address[1] == 0:
+                # The operating system assigns the actual port number for port
+                # number 0. This case, the second and later sockets should use
+                # the same port number. Also self.server_port is rewritten
+                # because it is exported, and will be used by external code.
+                self.server_address = (
+                    self.server_name, socket_.getsockname()[1])
+                self.server_port = self.server_address[1]
+                self._logger.info('Port %r is assigned', self.server_port)
+
+        for socketinfo in failed_sockets:
+            self._sockets.remove(socketinfo)
+
+    def server_activate(self):
+        """Override SocketServer.TCPServer.server_activate to enable multiple
+        sockets listen.
+        """
+
+        failed_sockets = []
+
+        for socketinfo in self._sockets:
+            socket_, addrinfo = socketinfo
+            self._logger.info('Listen on: %r', addrinfo)
+            try:
+                socket_.listen(self.request_queue_size)
+            except Exception, e:
+                self._logger.info('Skip by failure: %r', e)
+                socket_.close()
+                failed_sockets.append(socketinfo)
+
+        for socketinfo in failed_sockets:
+            self._sockets.remove(socketinfo)
+
+        if len(self._sockets) == 0:
+            self._logger.critical(
+                'No sockets activated. Use info log level to see the reason.')
+
+    def server_close(self):
+        """Override SocketServer.TCPServer.server_close to enable multiple
+        sockets close.
+        """
+
+        for socketinfo in self._sockets:
+            socket_, addrinfo = socketinfo
+            self._logger.info('Close on: %r', addrinfo)
+            socket_.close()
+
+    def fileno(self):
+        """Override SocketServer.TCPServer.fileno."""
+
+        self._logger.critical('Not supported: fileno')
+        return self._sockets[0][0].fileno()
+
+    def handle_error(self, rquest, client_address):
+        """Override SocketServer.handle_error."""
+
+        self._logger.error(
+            'Exception in processing request from: %r\n%s',
+            client_address,
+            util.get_stack_trace())
+        # Note: client_address is a tuple.
+
+    def get_request(self):
+        """Override TCPServer.get_request to wrap OpenSSL.SSL.Connection
+        object with _StandaloneSSLConnection to provide makefile method. We
+        cannot substitute OpenSSL.SSL.Connection.makefile since it's readonly
+        attribute.
+        """
+
+        accepted_socket, client_address = self.socket.accept()
+        if self.websocket_server_options.use_tls and _HAS_OPEN_SSL:
+            accepted_socket = _StandaloneSSLConnection(accepted_socket)
+        return accepted_socket, client_address
+
+    def serve_forever(self, poll_interval=0.5):
+        """Override SocketServer.BaseServer.serve_forever."""
+
+        self.__ws_serving = True
+        self.__ws_is_shut_down.clear()
+        handle_request = self.handle_request
+        if hasattr(self, '_handle_request_noblock'):
+            handle_request = self._handle_request_noblock
+        else:
+            self._logger.warning('Fallback to blocking request handler')
+        try:
+            while self.__ws_serving:
+                r, w, e = select.select(
+                    [socket_[0] for socket_ in self._sockets],
+                    [], [], poll_interval)
+                for socket_ in r:
+                    self.socket = socket_
+                    handle_request()
+                self.socket = None
+        finally:
+            self.__ws_is_shut_down.set()
+
+    def shutdown(self):
+        """Override SocketServer.BaseServer.shutdown."""
+
+        self.__ws_serving = False
+        self.__ws_is_shut_down.wait()
+
+
+class WebSocketRequestHandler(CGIHTTPServer.CGIHTTPRequestHandler):
+    """CGIHTTPRequestHandler specialized for WebSocket."""
+
+    # Use httplib.HTTPMessage instead of mimetools.Message.
+    MessageClass = httplib.HTTPMessage
+
+    def setup(self):
+        """Override SocketServer.StreamRequestHandler.setup to wrap rfile
+        with MemorizingFile.
+
+        This method will be called by BaseRequestHandler's constructor
+        before calling BaseHTTPRequestHandler.handle.
+        BaseHTTPRequestHandler.handle will call
+        BaseHTTPRequestHandler.handle_one_request and it will call
+        WebSocketRequestHandler.parse_request.
+        """
+
+        # Call superclass's setup to prepare rfile, wfile, etc. See setup
+        # definition on the root class SocketServer.StreamRequestHandler to
+        # understand what this does.
+        CGIHTTPServer.CGIHTTPRequestHandler.setup(self)
+
+        self.rfile = memorizingfile.MemorizingFile(
+            self.rfile,
+            max_memorized_lines=_MAX_MEMORIZED_LINES)
+
+    def __init__(self, request, client_address, server):
+        self._logger = util.get_class_logger(self)
+
+        self._options = server.websocket_server_options
+
+        # Overrides CGIHTTPServerRequestHandler.cgi_directories.
+        self.cgi_directories = self._options.cgi_directories
+        # Replace CGIHTTPRequestHandler.is_executable method.
+        if self._options.is_executable_method is not None:
+            self.is_executable = self._options.is_executable_method
+
+        # This actually calls BaseRequestHandler.__init__.
+        CGIHTTPServer.CGIHTTPRequestHandler.__init__(
+            self, request, client_address, server)
+
+    def parse_request(self):
+        """Override BaseHTTPServer.BaseHTTPRequestHandler.parse_request.
+
+        Return True to continue processing for HTTP(S), False otherwise.
+
+        See BaseHTTPRequestHandler.handle_one_request method which calls
+        this method to understand how the return value will be handled.
+        """
+
+        # We hook parse_request method, but also call the original
+        # CGIHTTPRequestHandler.parse_request since when we return False,
+        # CGIHTTPRequestHandler.handle_one_request continues processing and
+        # it needs variables set by CGIHTTPRequestHandler.parse_request.
+        #
+        # Variables set by this method will be also used by WebSocket request
+        # handling (self.path, self.command, self.requestline, etc. See also
+        # how _StandaloneRequest's members are implemented using these
+        # attributes).
+        if not CGIHTTPServer.CGIHTTPRequestHandler.parse_request(self):
+            return False
+
+        if self._options.use_basic_auth:
+            auth = self.headers.getheader('Authorization')
+            if auth != self._options.basic_auth_credential:
+                self.send_response(401)
+                self.send_header('WWW-Authenticate',
+                                 'Basic realm="Pywebsocket"')
+                self.end_headers()
+                self._logger.info('Request basic authentication')
+                return True
+
+        host, port, resource = http_header_util.parse_uri(self.path)
+        if resource is None:
+            self._logger.info('Invalid URI: %r', self.path)
+            self._logger.info('Fallback to CGIHTTPRequestHandler')
+            return True
+        server_options = self.server.websocket_server_options
+        if host is not None:
+            validation_host = server_options.validation_host
+            if validation_host is not None and host != validation_host:
+                self._logger.info('Invalid host: %r (expected: %r)',
+                                  host,
+                                  validation_host)
+                self._logger.info('Fallback to CGIHTTPRequestHandler')
+                return True
+        if port is not None:
+            validation_port = server_options.validation_port
+            if validation_port is not None and port != validation_port:
+                self._logger.info('Invalid port: %r (expected: %r)',
+                                  port,
+                                  validation_port)
+                self._logger.info('Fallback to CGIHTTPRequestHandler')
+                return True
+        self.path = resource
+
+        request = _StandaloneRequest(self, self._options.use_tls)
+
+        try:
+            # Fallback to default http handler for request paths for which
+            # we don't have request handlers.
+            if not self._options.dispatcher.get_handler_suite(self.path):
+                self._logger.info('No handler for resource: %r',
+                                  self.path)
+                self._logger.info('Fallback to CGIHTTPRequestHandler')
+                return True
+        except dispatch.DispatchException, e:
+            self._logger.info('%s', e)
+            self.send_error(e.status)
+            return False
+
+        # If any Exceptions without except clause setup (including
+        # DispatchException) is raised below this point, it will be caught
+        # and logged by WebSocketServer.
+
+        try:
+            try:
+                handshake.do_handshake(
+                    request,
+                    self._options.dispatcher,
+                    allowDraft75=self._options.allow_draft75,
+                    strict=self._options.strict)
+            except handshake.VersionException, e:
+                self._logger.info('%s', e)
+                self.send_response(common.HTTP_STATUS_BAD_REQUEST)
+                self.send_header(common.SEC_WEBSOCKET_VERSION_HEADER,
+                                 e.supported_versions)
+                self.end_headers()
+                return False
+            except handshake.HandshakeException, e:
+                # Handshake for ws(s) failed.
+                self._logger.info('%s', e)
+                self.send_error(e.status)
+                return False
+
+            request._dispatcher = self._options.dispatcher
+            self._options.dispatcher.transfer_data(request)
+        except handshake.AbortedByUserException, e:
+            self._logger.info('%s', e)
+        return False
+
+    def log_request(self, code='-', size='-'):
+        """Override BaseHTTPServer.log_request."""
+
+        self._logger.info('"%s" %s %s',
+                          self.requestline, str(code), str(size))
+
+    def log_error(self, *args):
+        """Override BaseHTTPServer.log_error."""
+
+        # Despite the name, this method is for warnings than for errors.
+        # For example, HTTP status code is logged by this method.
+        self._logger.warning('%s - %s',
+                             self.address_string(),
+                             args[0] % args[1:])
+
+    def is_cgi(self):
+        """Test whether self.path corresponds to a CGI script.
+
+        Add extra check that self.path doesn't contains ..
+        Also check if the file is a executable file or not.
+        If the file is not executable, it is handled as static file or dir
+        rather than a CGI script.
+        """
+
+        if CGIHTTPServer.CGIHTTPRequestHandler.is_cgi(self):
+            if '..' in self.path:
+                return False
+            # strip query parameter from request path
+            resource_name = self.path.split('?', 2)[0]
+            # convert resource_name into real path name in filesystem.
+            scriptfile = self.translate_path(resource_name)
+            if not os.path.isfile(scriptfile):
+                return False
+            if not self.is_executable(scriptfile):
+                return False
+            return True
+        return False
+
+
+def _get_logger_from_class(c):
+    return logging.getLogger('%s.%s' % (c.__module__, c.__name__))
+
+
+def _configure_logging(options):
+    logging.addLevelName(common.LOGLEVEL_FINE, 'FINE')
+
+    logger = logging.getLogger()
+    logger.setLevel(logging.getLevelName(options.log_level.upper()))
+    if options.log_file:
+        handler = logging.handlers.RotatingFileHandler(
+                options.log_file, 'a', options.log_max, options.log_count)
+    else:
+        handler = logging.StreamHandler()
+    formatter = logging.Formatter(
+            '[%(asctime)s] [%(levelname)s] %(name)s: %(message)s')
+    handler.setFormatter(formatter)
+    logger.addHandler(handler)
+
+    deflate_log_level_name = logging.getLevelName(
+        options.deflate_log_level.upper())
+    _get_logger_from_class(util._Deflater).setLevel(
+        deflate_log_level_name)
+    _get_logger_from_class(util._Inflater).setLevel(
+        deflate_log_level_name)
+
+
+def _build_option_parser():
+    parser = optparse.OptionParser()
+
+    parser.add_option('--config', dest='config_file', type='string',
+                      default=None,
+                      help=('Path to configuration file. See the file comment '
+                            'at the top of this file for the configuration '
+                            'file format'))
+    parser.add_option('-H', '--server-host', '--server_host',
+                      dest='server_host',
+                      default='',
+                      help='server hostname to listen to')
+    parser.add_option('-V', '--validation-host', '--validation_host',
+                      dest='validation_host',
+                      default=None,
+                      help='server hostname to validate in absolute path.')
+    parser.add_option('-p', '--port', dest='port', type='int',
+                      default=common.DEFAULT_WEB_SOCKET_PORT,
+                      help='port to listen to')
+    parser.add_option('-P', '--validation-port', '--validation_port',
+                      dest='validation_port', type='int',
+                      default=None,
+                      help='server port to validate in absolute path.')
+    parser.add_option('-w', '--websock-handlers', '--websock_handlers',
+                      dest='websock_handlers',
+                      default='.',
+                      help=('The root directory of WebSocket handler files. '
+                            'If the path is relative, --document-root is used '
+                            'as the base.'))
+    parser.add_option('-m', '--websock-handlers-map-file',
+                      '--websock_handlers_map_file',
+                      dest='websock_handlers_map_file',
+                      default=None,
+                      help=('WebSocket handlers map file. '
+                            'Each line consists of alias_resource_path and '
+                            'existing_resource_path, separated by spaces.'))
+    parser.add_option('-s', '--scan-dir', '--scan_dir', dest='scan_dir',
+                      default=None,
+                      help=('Must be a directory under --websock-handlers. '
+                            'Only handlers under this directory are scanned '
+                            'and registered to the server. '
+                            'Useful for saving scan time when the handler '
+                            'root directory contains lots of files that are '
+                            'not handler file or are handler files but you '
+                            'don\'t want them to be registered. '))
+    parser.add_option('--allow-handlers-outside-root-dir',
+                      '--allow_handlers_outside_root_dir',
+                      dest='allow_handlers_outside_root_dir',
+                      action='store_true',
+                      default=False,
+                      help=('Scans WebSocket handlers even if their canonical '
+                            'path is not under --websock-handlers.'))
+    parser.add_option('-d', '--document-root', '--document_root',
+                      dest='document_root', default='.',
+                      help='Document root directory.')
+    parser.add_option('-x', '--cgi-paths', '--cgi_paths', dest='cgi_paths',
+                      default=None,
+                      help=('CGI paths relative to document_root.'
+                            'Comma-separated. (e.g -x /cgi,/htbin) '
+                            'Files under document_root/cgi_path are handled '
+                            'as CGI programs. Must be executable.'))
+    parser.add_option('-t', '--tls', dest='use_tls', action='store_true',
+                      default=False, help='use TLS (wss://)')
+    parser.add_option('-k', '--private-key', '--private_key',
+                      dest='private_key',
+                      default='', help='TLS private key file.')
+    parser.add_option('-c', '--certificate', dest='certificate',
+                      default='', help='TLS certificate file.')
+    parser.add_option('--tls-client-auth', dest='tls_client_auth',
+                      action='store_true', default=False,
+                      help='Requires TLS client auth on every connection.')
+    parser.add_option('--tls-client-ca', dest='tls_client_ca', default='',
+                      help=('Specifies a pem file which contains a set of '
+                            'concatenated CA certificates which are used to '
+                            'validate certificates passed from clients'))
+    parser.add_option('--basic-auth', dest='use_basic_auth',
+                      action='store_true', default=False,
+                      help='Requires Basic authentication.')
+    parser.add_option('--basic-auth-credential',
+                      dest='basic_auth_credential', default='test:test',
+                      help='Specifies the credential of basic authentication '
+                      'by username:password pair (e.g. test:test).')
+    parser.add_option('-l', '--log-file', '--log_file', dest='log_file',
+                      default='', help='Log file.')
+    # Custom log level:
+    # - FINE: Prints status of each frame processing step
+    parser.add_option('--log-level', '--log_level', type='choice',
+                      dest='log_level', default='warn',
+                      choices=['fine',
+                               'debug', 'info', 'warning', 'warn', 'error',
+                               'critical'],
+                      help='Log level.')
+    parser.add_option('--deflate-log-level', '--deflate_log_level',
+                      type='choice',
+                      dest='deflate_log_level', default='warn',
+                      choices=['debug', 'info', 'warning', 'warn', 'error',
+                               'critical'],
+                      help='Log level for _Deflater and _Inflater.')
+    parser.add_option('--thread-monitor-interval-in-sec',
+                      '--thread_monitor_interval_in_sec',
+                      dest='thread_monitor_interval_in_sec',
+                      type='int', default=-1,
+                      help=('If positive integer is specified, run a thread '
+                            'monitor to show the status of server threads '
+                            'periodically in the specified inteval in '
+                            'second. If non-positive integer is specified, '
+                            'disable the thread monitor.'))
+    parser.add_option('--log-max', '--log_max', dest='log_max', type='int',
+                      default=_DEFAULT_LOG_MAX_BYTES,
+                      help='Log maximum bytes')
+    parser.add_option('--log-count', '--log_count', dest='log_count',
+                      type='int', default=_DEFAULT_LOG_BACKUP_COUNT,
+                      help='Log backup count')
+    parser.add_option('--allow-draft75', dest='allow_draft75',
+                      action='store_true', default=False,
+                      help='Obsolete option. Ignored.')
+    parser.add_option('--strict', dest='strict', action='store_true',
+                      default=False, help='Obsolete option. Ignored.')
+    parser.add_option('-q', '--queue', dest='request_queue_size', type='int',
+                      default=_DEFAULT_REQUEST_QUEUE_SIZE,
+                      help='request queue size')
+
+    return parser
+
+
+class ThreadMonitor(threading.Thread):
+    daemon = True
+
+    def __init__(self, interval_in_sec):
+        threading.Thread.__init__(self, name='ThreadMonitor')
+
+        self._logger = util.get_class_logger(self)
+
+        self._interval_in_sec = interval_in_sec
+
+    def run(self):
+        while True:
+            thread_name_list = []
+            for thread in threading.enumerate():
+                thread_name_list.append(thread.name)
+            self._logger.info(
+                "%d active threads: %s",
+                threading.active_count(),
+                ', '.join(thread_name_list))
+            time.sleep(self._interval_in_sec)
+
+
+def _parse_args_and_config(args):
+    parser = _build_option_parser()
+
+    # First, parse options without configuration file.
+    temporary_options, temporary_args = parser.parse_args(args=args)
+    if temporary_args:
+        logging.critical(
+            'Unrecognized positional arguments: %r', temporary_args)
+        sys.exit(1)
+
+    if temporary_options.config_file:
+        try:
+            config_fp = open(temporary_options.config_file, 'r')
+        except IOError, e:
+            logging.critical(
+                'Failed to open configuration file %r: %r',
+                temporary_options.config_file,
+                e)
+            sys.exit(1)
+
+        config_parser = ConfigParser.SafeConfigParser()
+        config_parser.readfp(config_fp)
+        config_fp.close()
+
+        args_from_config = []
+        for name, value in config_parser.items('pywebsocket'):
+            args_from_config.append('--' + name)
+            args_from_config.append(value)
+        if args is None:
+            args = args_from_config
+        else:
+            args = args_from_config + args
+        return parser.parse_args(args=args)
+    else:
+        return temporary_options, temporary_args
+
+
+def _main(args=None):
+    """You can call this function from your own program, but please note that
+    this function has some side-effects that might affect your program. For
+    example, util.wrap_popen3_for_win use in this method replaces implementation
+    of os.popen3.
+    """
+
+    options, args = _parse_args_and_config(args=args)
+
+    os.chdir(options.document_root)
+
+    _configure_logging(options)
+
+    # TODO(tyoshino): Clean up initialization of CGI related values. Move some
+    # of code here to WebSocketRequestHandler class if it's better.
+    options.cgi_directories = []
+    options.is_executable_method = None
+    if options.cgi_paths:
+        options.cgi_directories = options.cgi_paths.split(',')
+        if sys.platform in ('cygwin', 'win32'):
+            cygwin_path = None
+            # For Win32 Python, it is expected that CYGWIN_PATH
+            # is set to a directory of cygwin binaries.
+            # For example, websocket_server.py in Chromium sets CYGWIN_PATH to
+            # full path of third_party/cygwin/bin.
+            if 'CYGWIN_PATH' in os.environ:
+                cygwin_path = os.environ['CYGWIN_PATH']
+            util.wrap_popen3_for_win(cygwin_path)
+
+            def __check_script(scriptpath):
+                return util.get_script_interp(scriptpath, cygwin_path)
+
+            options.is_executable_method = __check_script
+
+    if options.use_tls:
+        if not (_HAS_SSL or _HAS_OPEN_SSL):
+            logging.critical('TLS support requires ssl or pyOpenSSL module.')
+            sys.exit(1)
+        if not options.private_key or not options.certificate:
+            logging.critical(
+                    'To use TLS, specify private_key and certificate.')
+            sys.exit(1)
+
+    if options.tls_client_auth:
+        if not options.use_tls:
+            logging.critical('TLS must be enabled for client authentication.')
+            sys.exit(1)
+        if not _HAS_SSL:
+            logging.critical('Client authentication requires ssl module.')
+
+    if not options.scan_dir:
+        options.scan_dir = options.websock_handlers
+
+    if options.use_basic_auth:
+        options.basic_auth_credential = 'Basic ' + base64.b64encode(
+            options.basic_auth_credential)
+
+    try:
+        if options.thread_monitor_interval_in_sec > 0:
+            # Run a thread monitor to show the status of server threads for
+            # debugging.
+            ThreadMonitor(options.thread_monitor_interval_in_sec).start()
+
+        server = WebSocketServer(options)
+        server.serve_forever()
+    except Exception, e:
+        logging.critical('mod_pywebsocket: %s' % e)
+        logging.critical('mod_pywebsocket: %s' % util.get_stack_trace())
+        sys.exit(1)
+
+
+if __name__ == '__main__':
+    _main(sys.argv[1:])
+
+
+# vi:sts=4 sw=4 et
diff --git a/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/stream.py b/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/stream.py
new file mode 100644
index 0000000..edc5332
--- /dev/null
+++ b/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/stream.py
@@ -0,0 +1,57 @@
+# Copyright 2011, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""This file exports public symbols.
+"""
+
+
+from mod_pywebsocket._stream_base import BadOperationException
+from mod_pywebsocket._stream_base import ConnectionTerminatedException
+from mod_pywebsocket._stream_base import InvalidFrameException
+from mod_pywebsocket._stream_base import InvalidUTF8Exception
+from mod_pywebsocket._stream_base import UnsupportedFrameException
+from mod_pywebsocket._stream_hixie75 import StreamHixie75
+from mod_pywebsocket._stream_hybi import Frame
+from mod_pywebsocket._stream_hybi import Stream
+from mod_pywebsocket._stream_hybi import StreamOptions
+
+# These methods are intended to be used by WebSocket client developers to have
+# their implementations receive broken data in tests.
+from mod_pywebsocket._stream_hybi import create_close_frame
+from mod_pywebsocket._stream_hybi import create_header
+from mod_pywebsocket._stream_hybi import create_length_header
+from mod_pywebsocket._stream_hybi import create_ping_frame
+from mod_pywebsocket._stream_hybi import create_pong_frame
+from mod_pywebsocket._stream_hybi import create_binary_frame
+from mod_pywebsocket._stream_hybi import create_text_frame
+from mod_pywebsocket._stream_hybi import create_closing_handshake_body
+
+
+# vi:sts=4 sw=4 et
diff --git a/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/util.py b/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/util.py
new file mode 100644
index 0000000..7bb0b5d
--- /dev/null
+++ b/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/util.py
@@ -0,0 +1,515 @@
+# Copyright 2011, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""WebSocket utilities.
+"""
+
+
+import array
+import errno
+
+# Import hash classes from a module available and recommended for each Python
+# version and re-export those symbol. Use sha and md5 module in Python 2.4, and
+# hashlib module in Python 2.6.
+try:
+    import hashlib
+    md5_hash = hashlib.md5
+    sha1_hash = hashlib.sha1
+except ImportError:
+    import md5
+    import sha
+    md5_hash = md5.md5
+    sha1_hash = sha.sha
+
+import StringIO
+import logging
+import os
+import re
+import socket
+import traceback
+import zlib
+
+
+def get_stack_trace():
+    """Get the current stack trace as string.
+
+    This is needed to support Python 2.3.
+    TODO: Remove this when we only support Python 2.4 and above.
+          Use traceback.format_exc instead.
+    """
+
+    out = StringIO.StringIO()
+    traceback.print_exc(file=out)
+    return out.getvalue()
+
+
+def prepend_message_to_exception(message, exc):
+    """Prepend message to the exception."""
+
+    exc.args = (message + str(exc),)
+    return
+
+
+def __translate_interp(interp, cygwin_path):
+    """Translate interp program path for Win32 python to run cygwin program
+    (e.g. perl).  Note that it doesn't support path that contains space,
+    which is typically true for Unix, where #!-script is written.
+    For Win32 python, cygwin_path is a directory of cygwin binaries.
+
+    Args:
+      interp: interp command line
+      cygwin_path: directory name of cygwin binary, or None
+    Returns:
+      translated interp command line.
+    """
+    if not cygwin_path:
+        return interp
+    m = re.match('^[^ ]*/([^ ]+)( .*)?', interp)
+    if m:
+        cmd = os.path.join(cygwin_path, m.group(1))
+        return cmd + m.group(2)
+    return interp
+
+
+def get_script_interp(script_path, cygwin_path=None):
+    """Gets #!-interpreter command line from the script.
+
+    It also fixes command path.  When Cygwin Python is used, e.g. in WebKit,
+    it could run "/usr/bin/perl -wT hello.pl".
+    When Win32 Python is used, e.g. in Chromium, it couldn't.  So, fix
+    "/usr/bin/perl" to "<cygwin_path>\perl.exe".
+
+    Args:
+      script_path: pathname of the script
+      cygwin_path: directory name of cygwin binary, or None
+    Returns:
+      #!-interpreter command line, or None if it is not #!-script.
+    """
+    fp = open(script_path)
+    line = fp.readline()
+    fp.close()
+    m = re.match('^#!(.*)', line)
+    if m:
+        return __translate_interp(m.group(1), cygwin_path)
+    return None
+
+
+def wrap_popen3_for_win(cygwin_path):
+    """Wrap popen3 to support #!-script on Windows.
+
+    Args:
+      cygwin_path:  path for cygwin binary if command path is needed to be
+                    translated.  None if no translation required.
+    """
+
+    __orig_popen3 = os.popen3
+
+    def __wrap_popen3(cmd, mode='t', bufsize=-1):
+        cmdline = cmd.split(' ')
+        interp = get_script_interp(cmdline[0], cygwin_path)
+        if interp:
+            cmd = interp + ' ' + cmd
+        return __orig_popen3(cmd, mode, bufsize)
+
+    os.popen3 = __wrap_popen3
+
+
+def hexify(s):
+    return ' '.join(map(lambda x: '%02x' % ord(x), s))
+
+
+def get_class_logger(o):
+    return logging.getLogger(
+        '%s.%s' % (o.__class__.__module__, o.__class__.__name__))
+
+
+class NoopMasker(object):
+    """A masking object that has the same interface as RepeatedXorMasker but
+    just returns the string passed in without making any change.
+    """
+
+    def __init__(self):
+        pass
+
+    def mask(self, s):
+        return s
+
+
+class RepeatedXorMasker(object):
+    """A masking object that applies XOR on the string given to mask method
+    with the masking bytes given to the constructor repeatedly. This object
+    remembers the position in the masking bytes the last mask method call
+    ended and resumes from that point on the next mask method call.
+    """
+
+    def __init__(self, mask):
+        self._mask = map(ord, mask)
+        self._mask_size = len(self._mask)
+        self._count = 0
+
+    def mask(self, s):
+        result = array.array('B')
+        result.fromstring(s)
+        # Use temporary local variables to eliminate the cost to access
+        # attributes
+        count = self._count
+        mask = self._mask
+        mask_size = self._mask_size
+        for i in xrange(len(result)):
+            result[i] ^= mask[count]
+            count = (count + 1) % mask_size
+        self._count = count
+
+        return result.tostring()
+
+
+class DeflateRequest(object):
+    """A wrapper class for request object to intercept send and recv to perform
+    deflate compression and decompression transparently.
+    """
+
+    def __init__(self, request):
+        self._request = request
+        self.connection = DeflateConnection(request.connection)
+
+    def __getattribute__(self, name):
+        if name in ('_request', 'connection'):
+            return object.__getattribute__(self, name)
+        return self._request.__getattribute__(name)
+
+    def __setattr__(self, name, value):
+        if name in ('_request', 'connection'):
+            return object.__setattr__(self, name, value)
+        return self._request.__setattr__(name, value)
+
+
+# By making wbits option negative, we can suppress CMF/FLG (2 octet) and
+# ADLER32 (4 octet) fields of zlib so that we can use zlib module just as
+# deflate library. DICTID won't be added as far as we don't set dictionary.
+# LZ77 window of 32K will be used for both compression and decompression.
+# For decompression, we can just use 32K to cover any windows size. For
+# compression, we use 32K so receivers must use 32K.
+#
+# Compression level is Z_DEFAULT_COMPRESSION. We don't have to match level
+# to decode.
+#
+# See zconf.h, deflate.cc, inflate.cc of zlib library, and zlibmodule.c of
+# Python. See also RFC1950 (ZLIB 3.3).
+
+
+class _Deflater(object):
+
+    def __init__(self, window_bits):
+        self._logger = get_class_logger(self)
+
+        self._compress = zlib.compressobj(
+            zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -window_bits)
+
+    def compress(self, bytes):
+        compressed_bytes = self._compress.compress(bytes)
+        self._logger.debug('Compress input %r', bytes)
+        self._logger.debug('Compress result %r', compressed_bytes)
+        return compressed_bytes
+
+    def compress_and_flush(self, bytes):
+        compressed_bytes = self._compress.compress(bytes)
+        compressed_bytes += self._compress.flush(zlib.Z_SYNC_FLUSH)
+        self._logger.debug('Compress input %r', bytes)
+        self._logger.debug('Compress result %r', compressed_bytes)
+        return compressed_bytes
+
+    def compress_and_finish(self, bytes):
+        compressed_bytes = self._compress.compress(bytes)
+        compressed_bytes += self._compress.flush(zlib.Z_FINISH)
+        self._logger.debug('Compress input %r', bytes)
+        self._logger.debug('Compress result %r', compressed_bytes)
+        return compressed_bytes
+
+class _Inflater(object):
+
+    def __init__(self):
+        self._logger = get_class_logger(self)
+
+        self._unconsumed = ''
+
+        self.reset()
+
+    def decompress(self, size):
+        if not (size == -1 or size > 0):
+            raise Exception('size must be -1 or positive')
+
+        data = ''
+
+        while True:
+            if size == -1:
+                data += self._decompress.decompress(self._unconsumed)
+                # See Python bug http://bugs.python.org/issue12050 to
+                # understand why the same code cannot be used for updating
+                # self._unconsumed for here and else block.
+                self._unconsumed = ''
+            else:
+                data += self._decompress.decompress(
+                    self._unconsumed, size - len(data))
+                self._unconsumed = self._decompress.unconsumed_tail
+            if self._decompress.unused_data:
+                # Encountered a last block (i.e. a block with BFINAL = 1) and
+                # found a new stream (unused_data). We cannot use the same
+                # zlib.Decompress object for the new stream. Create a new
+                # Decompress object to decompress the new one.
+                #
+                # It's fine to ignore unconsumed_tail if unused_data is not
+                # empty.
+                self._unconsumed = self._decompress.unused_data
+                self.reset()
+                if size >= 0 and len(data) == size:
+                    # data is filled. Don't call decompress again.
+                    break
+                else:
+                    # Re-invoke Decompress.decompress to try to decompress all
+                    # available bytes before invoking read which blocks until
+                    # any new byte is available.
+                    continue
+            else:
+                # Here, since unused_data is empty, even if unconsumed_tail is
+                # not empty, bytes of requested length are already in data. We
+                # don't have to "continue" here.
+                break
+
+        if data:
+            self._logger.debug('Decompressed %r', data)
+        return data
+
+    def append(self, data):
+        self._logger.debug('Appended %r', data)
+        self._unconsumed += data
+
+    def reset(self):
+        self._logger.debug('Reset')
+        self._decompress = zlib.decompressobj(-zlib.MAX_WBITS)
+
+
+# Compresses/decompresses given octets using the method introduced in RFC1979.
+
+
+class _RFC1979Deflater(object):
+    """A compressor class that applies DEFLATE to given byte sequence and
+    flushes using the algorithm described in the RFC1979 section 2.1.
+    """
+
+    def __init__(self, window_bits, no_context_takeover):
+        self._deflater = None
+        if window_bits is None:
+            window_bits = zlib.MAX_WBITS
+        self._window_bits = window_bits
+        self._no_context_takeover = no_context_takeover
+
+    def filter(self, bytes, flush=True, bfinal=False):
+        if self._deflater is None or (self._no_context_takeover and flush):
+            self._deflater = _Deflater(self._window_bits)
+
+        if bfinal:
+            result = self._deflater.compress_and_finish(bytes)
+            # Add a padding block with BFINAL = 0 and BTYPE = 0.
+            result = result + chr(0)
+            self._deflater = None
+            return result
+        if flush:
+            # Strip last 4 octets which is LEN and NLEN field of a
+            # non-compressed block added for Z_SYNC_FLUSH.
+            return self._deflater.compress_and_flush(bytes)[:-4]
+        return self._deflater.compress(bytes)
+
+class _RFC1979Inflater(object):
+    """A decompressor class for byte sequence compressed and flushed following
+    the algorithm described in the RFC1979 section 2.1.
+    """
+
+    def __init__(self):
+        self._inflater = _Inflater()
+
+    def filter(self, bytes):
+        # Restore stripped LEN and NLEN field of a non-compressed block added
+        # for Z_SYNC_FLUSH.
+        self._inflater.append(bytes + '\x00\x00\xff\xff')
+        return self._inflater.decompress(-1)
+
+
+class DeflateSocket(object):
+    """A wrapper class for socket object to intercept send and recv to perform
+    deflate compression and decompression transparently.
+    """
+
+    # Size of the buffer passed to recv to receive compressed data.
+    _RECV_SIZE = 4096
+
+    def __init__(self, socket):
+        self._socket = socket
+
+        self._logger = get_class_logger(self)
+
+        self._deflater = _Deflater(zlib.MAX_WBITS)
+        self._inflater = _Inflater()
+
+    def recv(self, size):
+        """Receives data from the socket specified on the construction up
+        to the specified size. Once any data is available, returns it even
+        if it's smaller than the specified size.
+        """
+
+        # TODO(tyoshino): Allow call with size=0. It should block until any
+        # decompressed data is available.
+        if size <= 0:
+            raise Exception('Non-positive size passed')
+        while True:
+            data = self._inflater.decompress(size)
+            if len(data) != 0:
+                return data
+
+            read_data = self._socket.recv(DeflateSocket._RECV_SIZE)
+            if not read_data:
+                return ''
+            self._inflater.append(read_data)
+
+    def sendall(self, bytes):
+        self.send(bytes)
+
+    def send(self, bytes):
+        self._socket.sendall(self._deflater.compress_and_flush(bytes))
+        return len(bytes)
+
+
+class DeflateConnection(object):
+    """A wrapper class for request object to intercept write and read to
+    perform deflate compression and decompression transparently.
+    """
+
+    def __init__(self, connection):
+        self._connection = connection
+
+        self._logger = get_class_logger(self)
+
+        self._deflater = _Deflater(zlib.MAX_WBITS)
+        self._inflater = _Inflater()
+
+    def get_remote_addr(self):
+        return self._connection.remote_addr
+    remote_addr = property(get_remote_addr)
+
+    def put_bytes(self, bytes):
+        self.write(bytes)
+
+    def read(self, size=-1):
+        """Reads at most size bytes. Blocks until there's at least one byte
+        available.
+        """
+
+        # TODO(tyoshino): Allow call with size=0.
+        if not (size == -1 or size > 0):
+            raise Exception('size must be -1 or positive')
+
+        data = ''
+        while True:
+            if size == -1:
+                data += self._inflater.decompress(-1)
+            else:
+                data += self._inflater.decompress(size - len(data))
+
+            if size >= 0 and len(data) != 0:
+                break
+
+            # TODO(tyoshino): Make this read efficient by some workaround.
+            #
+            # In 3.0.3 and prior of mod_python, read blocks until length bytes
+            # was read. We don't know the exact size to read while using
+            # deflate, so read byte-by-byte.
+            #
+            # _StandaloneRequest.read that ultimately performs
+            # socket._fileobject.read also blocks until length bytes was read
+            read_data = self._connection.read(1)
+            if not read_data:
+                break
+            self._inflater.append(read_data)
+        return data
+
+    def write(self, bytes):
+        self._connection.write(self._deflater.compress_and_flush(bytes))
+
+
+def _is_ewouldblock_errno(error_number):
+    """Returns True iff error_number indicates that receive operation would
+    block. To make this portable, we check availability of errno and then
+    compare them.
+    """
+
+    for error_name in ['WSAEWOULDBLOCK', 'EWOULDBLOCK', 'EAGAIN']:
+        if (error_name in dir(errno) and
+            error_number == getattr(errno, error_name)):
+            return True
+    return False
+
+
+def drain_received_data(raw_socket):
+    # Set the socket non-blocking.
+    original_timeout = raw_socket.gettimeout()
+    raw_socket.settimeout(0.0)
+
+    drained_data = []
+
+    # Drain until the socket is closed or no data is immediately
+    # available for read.
+    while True:
+        try:
+            data = raw_socket.recv(1)
+            if not data:
+                break
+            drained_data.append(data)
+        except socket.error, e:
+            # e can be either a pair (errno, string) or just a string (or
+            # something else) telling what went wrong. We suppress only
+            # the errors that indicates that the socket blocks. Those
+            # exceptions can be parsed as a pair (errno, string).
+            try:
+                error_number, message = e
+            except:
+                # Failed to parse socket.error.
+                raise e
+
+            if _is_ewouldblock_errno(error_number):
+                break
+            else:
+                raise e
+
+    # Rollback timeout value.
+    raw_socket.settimeout(original_timeout)
+
+    return ''.join(drained_data)
+
+
+# vi:sts=4 sw=4 et
diff --git a/Tools/Scripts/webkitpy/thirdparty/ordered_dict.py b/Tools/Scripts/webkitpy/thirdparty/ordered_dict.py
new file mode 100644
index 0000000..3dc735a
--- /dev/null
+++ b/Tools/Scripts/webkitpy/thirdparty/ordered_dict.py
@@ -0,0 +1,89 @@
+# Copyright (c) 2009 Raymond Hettinger.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+# This code is obtained from http://code.activestate.com/recipes/576669/
+
+from collections import MutableMapping
+
+class OrderedDict(dict, MutableMapping):
+
+    # Methods with direct access to underlying attributes
+
+    def __init__(self, *args, **kwds):
+        if len(args) > 1:
+            raise TypeError('expected at 1 argument, got %d', len(args))
+        if not hasattr(self, '_keys'):
+            self._keys = []
+        self.update(*args, **kwds)
+
+    def clear(self):
+        del self._keys[:]
+        dict.clear(self)
+
+    def __setitem__(self, key, value):
+        if key not in self:
+            self._keys.append(key)
+        dict.__setitem__(self, key, value)
+
+    def __delitem__(self, key):
+        dict.__delitem__(self, key)
+        self._keys.remove(key)
+
+    def __iter__(self):
+        return iter(self._keys)
+
+    def __reversed__(self):
+        return reversed(self._keys)
+
+    def popitem(self):
+        if not self:
+            raise KeyError
+        key = self._keys.pop()
+        value = dict.pop(self, key)
+        return key, value
+
+    def __reduce__(self):
+        items = [[k, self[k]] for k in self]
+        inst_dict = vars(self).copy()
+        inst_dict.pop('_keys', None)
+        return (self.__class__, (items,), inst_dict)
+
+    # Methods with indirect access via the above methods
+
+    setdefault = MutableMapping.setdefault
+    update = MutableMapping.update
+    pop = MutableMapping.pop
+    keys = MutableMapping.keys
+    values = MutableMapping.values
+    items = MutableMapping.items
+
+    def __repr__(self):
+        pairs = ', '.join(map('%r: %r'.__mod__, self.items()))
+        return '%s({%s})' % (self.__class__.__name__, pairs)
+
+    def copy(self):
+        return self.__class__(self)
+
+    @classmethod
+    def fromkeys(cls, iterable, value=None):
+        d = cls()
+        for key in iterable:
+            d[key] = value
+        return d
diff --git a/Tools/Scripts/webkitpy/to_be_moved/__init__.py b/Tools/Scripts/webkitpy/to_be_moved/__init__.py
new file mode 100644
index 0000000..c0528b7
--- /dev/null
+++ b/Tools/Scripts/webkitpy/to_be_moved/__init__.py
@@ -0,0 +1,10 @@
+# Required for Python to search this directory for module files
+# This directory houses Python modules that do not yet have a proper home.
+#
+# Some of the Python modules in this directory aren't really part of webkitpy
+# in the sense that they're not classes that are meant to be used as part of
+# the webkitpy library. Instead, they're a bunch of helper code for individual
+# scripts in in Tools/Scripts.
+#
+# Really, all this code should either be refactored or moved somewhere else,
+# hence the somewhat lame name for this directory.
diff --git a/Tools/Scripts/webkitpy/to_be_moved/update_webgl_conformance_tests.py b/Tools/Scripts/webkitpy/to_be_moved/update_webgl_conformance_tests.py
new file mode 100755
index 0000000..68c2fb7
--- /dev/null
+++ b/Tools/Scripts/webkitpy/to_be_moved/update_webgl_conformance_tests.py
@@ -0,0 +1,163 @@
+#!/usr/bin/env python
+
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+#    notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+#    notice, this list of conditions and the following disclaimer in the
+#    documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE COMPUTER, INC. OR
+# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import glob
+import logging
+import optparse
+import os
+import re
+import sys
+from webkitpy.common.checkout import scm
+from webkitpy.common.system.filesystem import FileSystem
+from webkitpy.common.system.executive import Executive
+
+
+_log = logging.getLogger(__name__)
+
+
+def remove_first_line_comment(text):
+    return re.compile(r'^<!--.*?-->\s*', re.DOTALL).sub('', text)
+
+
+def translate_includes(text):
+    # Mapping of single filename to relative path under WebKit root.
+    # Assumption: these filenames are globally unique.
+    include_mapping = {
+        "js-test-style.css": "../../js/resources",
+        "js-test-pre.js": "../../js/resources",
+        "js-test-post.js": "../../js/resources",
+        "desktop-gl-constants.js": "resources",
+    }
+
+    for filename, path in include_mapping.items():
+        search = r'(?:[^"\'= ]*/)?' + re.escape(filename)
+        # We use '/' instead of os.path.join in order to produce consistent
+        # output cross-platform.
+        replace = path + '/' + filename
+        text = re.sub(search, replace, text)
+
+    return text
+
+
+def translate_khronos_test(text):
+    """
+    This method translates the contents of a Khronos test to a WebKit test.
+    """
+
+    translateFuncs = [
+        remove_first_line_comment,
+        translate_includes,
+    ]
+
+    for f in translateFuncs:
+        text = f(text)
+
+    return text
+
+
+def update_file(in_filename, out_dir):
+    # check in_filename exists
+    # check out_dir exists
+    out_filename = os.path.join(out_dir, os.path.basename(in_filename))
+
+    _log.debug("Processing " + in_filename)
+    with open(in_filename, 'r') as in_file:
+        with open(out_filename, 'w') as out_file:
+            out_file.write(translate_khronos_test(in_file.read()))
+
+
+def update_directory(in_dir, out_dir):
+    for filename in glob.glob(os.path.join(in_dir, '*.html')):
+        update_file(os.path.join(in_dir, filename), out_dir)
+
+
+def default_out_dir():
+    detector = scm.SCMDetector(FileSystem(), Executive())
+    current_scm = detector.detect_scm_system(os.path.dirname(sys.argv[0]))
+    if not current_scm:
+        return os.getcwd()
+    root_dir = current_scm.checkout_root
+    if not root_dir:
+        return os.getcwd()
+    out_dir = os.path.join(root_dir, "LayoutTests/fast/canvas/webgl")
+    if os.path.isdir(out_dir):
+        return out_dir
+    return os.getcwd()
+
+
+def configure_logging(options):
+    """Configures the logging system."""
+    log_fmt = '%(levelname)s: %(message)s'
+    log_datefmt = '%y%m%d %H:%M:%S'
+    log_level = logging.INFO
+    if options.verbose:
+        log_fmt = ('%(asctime)s %(filename)s:%(lineno)-4d %(levelname)s '
+                   '%(message)s')
+        log_level = logging.DEBUG
+    logging.basicConfig(level=log_level, format=log_fmt,
+                        datefmt=log_datefmt)
+
+
+def option_parser():
+    usage = "usage: %prog [options] (input file or directory)"
+    parser = optparse.OptionParser(usage=usage)
+    parser.add_option('-v', '--verbose',
+                             action='store_true',
+                             default=False,
+                             help='include debug-level logging')
+    parser.add_option('-o', '--output',
+                             action='store',
+                             type='string',
+                             default=default_out_dir(),
+                             metavar='DIR',
+                             help='specify an output directory to place files '
+                                  'in [default: %default]')
+    return parser
+
+
+def main():
+    parser = option_parser()
+    (options, args) = parser.parse_args()
+    configure_logging(options)
+
+    if len(args) == 0:
+        _log.error("Must specify an input directory or filename.")
+        parser.print_help()
+        return 1
+
+    in_name = args[0]
+    if os.path.isfile(in_name):
+        update_file(in_name, options.output)
+    elif os.path.isdir(in_name):
+        update_directory(in_name, options.output)
+    else:
+        _log.error("'%s' is not a directory or a file.", in_name)
+        return 2
+
+    return 0
+
+
+if __name__ == "__main__":
+    sys.exit(main())
diff --git a/Tools/Scripts/webkitpy/to_be_moved/update_webgl_conformance_tests_unittest.py b/Tools/Scripts/webkitpy/to_be_moved/update_webgl_conformance_tests_unittest.py
new file mode 100644
index 0000000..b3b4d58
--- /dev/null
+++ b/Tools/Scripts/webkitpy/to_be_moved/update_webgl_conformance_tests_unittest.py
@@ -0,0 +1,102 @@
+#!/usr/bin/python
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Unit tests for update_webgl_conformance_tests."""
+
+import unittest
+from webkitpy.to_be_moved import update_webgl_conformance_tests as webgl
+
+
+def construct_script(name):
+    return "<script src=\"" + name + "\"></script>\n"
+
+
+def construct_style(name):
+    return "<link rel=\"stylesheet\" href=\"" + name + "\">"
+
+
+class TestTranslation(unittest.TestCase):
+    def assert_unchanged(self, text):
+        self.assertEqual(text, webgl.translate_khronos_test(text))
+
+    def assert_translate(self, input, output):
+        self.assertEqual(output, webgl.translate_khronos_test(input))
+
+    def test_simple_unchanged(self):
+        self.assert_unchanged("")
+        self.assert_unchanged("<html></html>")
+
+    def test_header_strip(self):
+        single_line_header = "<!-- single line header. -->"
+        multi_line_header = """<!-- this is a multi-line
+                header.  it should all be removed too.
+                -->"""
+        text = "<html></html>"
+        self.assert_translate(single_line_header, "")
+        self.assert_translate(single_line_header + text, text)
+        self.assert_translate(multi_line_header + text, text)
+
+    def dont_strip_other_headers(self):
+        self.assert_unchanged("<html>\n<!-- don't remove comments on other lines. -->\n</html>")
+
+    def test_include_rewriting(self):
+        # Mappings to None are unchanged
+        styles = {
+            "../resources/js-test-style.css": "../../js/resources/js-test-style.css",
+            "fail.css": None,
+            "resources/stylesheet.css": None,
+            "../resources/style.css": None,
+        }
+        scripts = {
+            "../resources/js-test-pre.js": "../../js/resources/js-test-pre.js",
+            "../resources/js-test-post.js": "../../js/resources/js-test-post.js",
+            "../resources/desktop-gl-constants.js": "resources/desktop-gl-constants.js",
+
+            "resources/shadow-offset.js": None,
+            "../resources/js-test-post-async.js": None,
+        }
+
+        input_text = ""
+        output_text = ""
+        for input, output in styles.items():
+            input_text += construct_style(input)
+            output_text += construct_style(output if output else input)
+        for input, output in scripts.items():
+            input_text += construct_script(input)
+            output_text += construct_script(output if output else input)
+
+        head = '<!DOCTYPE HTML PUBLIC "-//IETF//DTD HTML//EN">\n<html>\n<head>\n'
+        foot = '</head>\n<body>\n</body>\n</html>'
+        input_text = head + input_text + foot
+        output_text = head + output_text + foot
+        self.assert_translate(input_text, output_text)
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/Tools/Scripts/webkitpy/tool/__init__.py b/Tools/Scripts/webkitpy/tool/__init__.py
new file mode 100644
index 0000000..ef65bee
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/__init__.py
@@ -0,0 +1 @@
+# Required for Python to search this directory for module files
diff --git a/Tools/Scripts/webkitpy/tool/bot/__init__.py b/Tools/Scripts/webkitpy/tool/bot/__init__.py
new file mode 100644
index 0000000..ef65bee
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/bot/__init__.py
@@ -0,0 +1 @@
+# Required for Python to search this directory for module files
diff --git a/Tools/Scripts/webkitpy/tool/bot/botinfo.py b/Tools/Scripts/webkitpy/tool/bot/botinfo.py
new file mode 100644
index 0000000..b9fd938
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/bot/botinfo.py
@@ -0,0 +1,39 @@
+# Copyright (c) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+# FIXME: We should consider hanging one of these off the tool object.
+class BotInfo(object):
+    def __init__(self, tool):
+        self._tool = tool
+
+    def summary_text(self):
+        # bot_id is also stored on the options dictionary on the tool.
+        bot_id = self._tool.status_server.bot_id
+        bot_id_string = "Bot: %s  " % (bot_id) if bot_id else ""
+        return "%sPort: %s  Platform: %s" % (bot_id_string, self._tool.port().name(), self._tool.platform.display_name())
diff --git a/Tools/Scripts/webkitpy/tool/bot/botinfo_unittest.py b/Tools/Scripts/webkitpy/tool/bot/botinfo_unittest.py
new file mode 100644
index 0000000..820ff55
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/bot/botinfo_unittest.py
@@ -0,0 +1,41 @@
+# Copyright (c) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from webkitpy.tool.bot.botinfo import BotInfo
+from webkitpy.tool.mocktool import MockTool
+from webkitpy.common.net.statusserver_mock import MockStatusServer
+
+
+class BotInfoTest(unittest.TestCase):
+
+    def test_summary_text(self):
+        tool = MockTool()
+        tool.status_server = MockStatusServer("MockBotId")
+        self.assertEqual(BotInfo(tool).summary_text(), "Bot: MockBotId  Port: MockPort  Platform: MockPlatform 1.0")
diff --git a/Tools/Scripts/webkitpy/tool/bot/commitqueuetask.py b/Tools/Scripts/webkitpy/tool/bot/commitqueuetask.py
new file mode 100644
index 0000000..491ba79
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/bot/commitqueuetask.py
@@ -0,0 +1,94 @@
+# Copyright (c) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from webkitpy.tool.bot.patchanalysistask import PatchAnalysisTask, PatchAnalysisTaskDelegate
+
+
+class CommitQueueTaskDelegate(PatchAnalysisTaskDelegate):
+    def parent_command(self):
+        return "commit-queue"
+
+    def did_pass_testing_ews(self, patch):
+        raise NotImplementedError("subclasses must implement")
+
+
+class CommitQueueTask(PatchAnalysisTask):
+    def validate(self):
+        # Bugs might get closed, or patches might be obsoleted or r-'d while the
+        # commit-queue is processing.
+        self._patch = self._delegate.refetch_patch(self._patch)
+        if self._patch.is_obsolete():
+            return False
+        if self._patch.bug().is_closed():
+            return False
+        if not self._patch.committer():
+            return False
+        if self._patch.review() == "-":
+            return False
+        return True
+
+    def _validate_changelog(self):
+        return self._run_command([
+            "validate-changelog",
+            "--non-interactive",
+            self._patch.id(),
+        ],
+        "ChangeLog validated",
+        "ChangeLog did not pass validation")
+
+    def _did_pass_tests_recently(self):
+        if self._delegate.did_pass_testing_ews(self._patch):
+            return True
+        return self._test_patch()
+
+    def run(self):
+        if not self.validate():
+            return False
+        if not self._clean():
+            return False
+        if not self._update():
+            return False
+        if not self._apply():
+            return self.report_failure()
+        if not self._validate_changelog():
+            return self.report_failure()
+        if not self._patch.is_rollout():
+            if not self._build():
+                if not self._build_without_patch():
+                    return False
+                return self.report_failure()
+            if not self._did_pass_tests_recently():
+                return False
+        # Make sure the patch is still valid before landing (e.g., make sure
+        # no one has set commit-queue- since we started working on the patch.)
+        if not self.validate():
+            return False
+        # FIXME: We should understand why the land failure occured and retry if possible.
+        if not self._land():
+            return self.report_failure()
+        return True
diff --git a/Tools/Scripts/webkitpy/tool/bot/commitqueuetask_unittest.py b/Tools/Scripts/webkitpy/tool/bot/commitqueuetask_unittest.py
new file mode 100644
index 0000000..8b33416
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/bot/commitqueuetask_unittest.py
@@ -0,0 +1,580 @@
+# Copyright (c) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from datetime import datetime
+import unittest
+
+from webkitpy.common.net import bugzilla
+from webkitpy.common.net.layouttestresults import LayoutTestResults
+from webkitpy.common.system.deprecated_logging import error, log
+from webkitpy.common.system.executive import ScriptError
+from webkitpy.common.system.outputcapture import OutputCapture
+from webkitpy.layout_tests.models import test_results
+from webkitpy.layout_tests.models import test_failures
+from webkitpy.thirdparty.mock import Mock
+from webkitpy.tool.bot.commitqueuetask import *
+from webkitpy.tool.bot.expectedfailures import ExpectedFailures
+from webkitpy.tool.mocktool import MockTool
+
+
+class MockCommitQueue(CommitQueueTaskDelegate):
+    def __init__(self, error_plan):
+        self._error_plan = error_plan
+        self._failure_status_id = 0
+
+    def run_command(self, command):
+        log("run_webkit_patch: %s" % command)
+        if self._error_plan:
+            error = self._error_plan.pop(0)
+            if error:
+                raise error
+
+    def command_passed(self, success_message, patch):
+        log("command_passed: success_message='%s' patch='%s'" % (
+            success_message, patch.id()))
+
+    def command_failed(self, failure_message, script_error, patch):
+        log("command_failed: failure_message='%s' script_error='%s' patch='%s'" % (
+            failure_message, script_error, patch.id()))
+        self._failure_status_id += 1
+        return self._failure_status_id
+
+    def refetch_patch(self, patch):
+        return patch
+
+    def expected_failures(self):
+        return ExpectedFailures()
+
+    def test_results(self):
+        return None
+
+    def report_flaky_tests(self, patch, flaky_results, results_archive):
+        flaky_tests = [result.filename for result in flaky_results]
+        log("report_flaky_tests: patch='%s' flaky_tests='%s' archive='%s'" % (patch.id(), flaky_tests, results_archive.filename))
+
+    def archive_last_test_results(self, patch):
+        log("archive_last_test_results: patch='%s'" % patch.id())
+        archive = Mock()
+        archive.filename = "mock-archive-%s.zip" % patch.id()
+        return archive
+
+    def build_style(self):
+        return "both"
+
+    def did_pass_testing_ews(self, patch):
+        return False
+
+
+class FailingTestCommitQueue(MockCommitQueue):
+    def __init__(self, error_plan, test_failure_plan):
+        MockCommitQueue.__init__(self, error_plan)
+        self._test_run_counter = -1  # Special value to indicate tests have never been run.
+        self._test_failure_plan = test_failure_plan
+
+    def run_command(self, command):
+        if command[0] == "build-and-test":
+            self._test_run_counter += 1
+        MockCommitQueue.run_command(self, command)
+
+    def _mock_test_result(self, testname):
+        return test_results.TestResult(testname, [test_failures.FailureTextMismatch()])
+
+    def test_results(self):
+        # Doesn't make sense to ask for the test_results until the tests have run at least once.
+        assert(self._test_run_counter >= 0)
+        failures_for_run = self._test_failure_plan[self._test_run_counter]
+        results = LayoutTestResults(map(self._mock_test_result, failures_for_run))
+        # This makes the results trustable by ExpectedFailures.
+        results.set_failure_limit_count(10)
+        return results
+
+
+# We use GoldenScriptError to make sure that the code under test throws the
+# correct (i.e., golden) exception.
+class GoldenScriptError(ScriptError):
+    pass
+
+
+class CommitQueueTaskTest(unittest.TestCase):
+    def _run_through_task(self, commit_queue, expected_stderr, expected_exception=None, expect_retry=False):
+        tool = MockTool(log_executive=True)
+        patch = tool.bugs.fetch_attachment(10000)
+        task = CommitQueueTask(commit_queue, patch)
+        success = OutputCapture().assert_outputs(self, task.run, expected_stderr=expected_stderr, expected_exception=expected_exception)
+        if not expected_exception:
+            self.assertEqual(success, not expect_retry)
+        return task
+
+    def test_success_case(self):
+        commit_queue = MockCommitQueue([])
+        expected_stderr = """run_webkit_patch: ['clean']
+command_passed: success_message='Cleaned working directory' patch='10000'
+run_webkit_patch: ['update']
+command_passed: success_message='Updated working directory' patch='10000'
+run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 10000]
+command_passed: success_message='Applied patch' patch='10000'
+run_webkit_patch: ['validate-changelog', '--non-interactive', 10000]
+command_passed: success_message='ChangeLog validated' patch='10000'
+run_webkit_patch: ['build', '--no-clean', '--no-update', '--build-style=both']
+command_passed: success_message='Built patch' patch='10000'
+run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive']
+command_passed: success_message='Passed tests' patch='10000'
+run_webkit_patch: ['land-attachment', '--force-clean', '--non-interactive', '--parent-command=commit-queue', 10000]
+command_passed: success_message='Landed patch' patch='10000'
+"""
+        self._run_through_task(commit_queue, expected_stderr)
+
+    def test_fast_success_case(self):
+        commit_queue = MockCommitQueue([])
+        commit_queue.did_pass_testing_ews = lambda patch: True
+        expected_stderr = """run_webkit_patch: ['clean']
+command_passed: success_message='Cleaned working directory' patch='10000'
+run_webkit_patch: ['update']
+command_passed: success_message='Updated working directory' patch='10000'
+run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 10000]
+command_passed: success_message='Applied patch' patch='10000'
+run_webkit_patch: ['validate-changelog', '--non-interactive', 10000]
+command_passed: success_message='ChangeLog validated' patch='10000'
+run_webkit_patch: ['build', '--no-clean', '--no-update', '--build-style=both']
+command_passed: success_message='Built patch' patch='10000'
+run_webkit_patch: ['land-attachment', '--force-clean', '--non-interactive', '--parent-command=commit-queue', 10000]
+command_passed: success_message='Landed patch' patch='10000'
+"""
+        self._run_through_task(commit_queue, expected_stderr)
+
+    def test_clean_failure(self):
+        commit_queue = MockCommitQueue([
+            ScriptError("MOCK clean failure"),
+        ])
+        expected_stderr = """run_webkit_patch: ['clean']
+command_failed: failure_message='Unable to clean working directory' script_error='MOCK clean failure' patch='10000'
+"""
+        self._run_through_task(commit_queue, expected_stderr, expect_retry=True)
+
+    def test_update_failure(self):
+        commit_queue = MockCommitQueue([
+            None,
+            ScriptError("MOCK update failure"),
+        ])
+        expected_stderr = """run_webkit_patch: ['clean']
+command_passed: success_message='Cleaned working directory' patch='10000'
+run_webkit_patch: ['update']
+command_failed: failure_message='Unable to update working directory' script_error='MOCK update failure' patch='10000'
+"""
+        self._run_through_task(commit_queue, expected_stderr, expect_retry=True)
+
+    def test_apply_failure(self):
+        commit_queue = MockCommitQueue([
+            None,
+            None,
+            GoldenScriptError("MOCK apply failure"),
+        ])
+        expected_stderr = """run_webkit_patch: ['clean']
+command_passed: success_message='Cleaned working directory' patch='10000'
+run_webkit_patch: ['update']
+command_passed: success_message='Updated working directory' patch='10000'
+run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 10000]
+command_failed: failure_message='Patch does not apply' script_error='MOCK apply failure' patch='10000'
+"""
+        self._run_through_task(commit_queue, expected_stderr, GoldenScriptError)
+
+    def test_validate_changelog_failure(self):
+        commit_queue = MockCommitQueue([
+            None,
+            None,
+            None,
+            GoldenScriptError("MOCK validate failure"),
+        ])
+        expected_stderr = """run_webkit_patch: ['clean']
+command_passed: success_message='Cleaned working directory' patch='10000'
+run_webkit_patch: ['update']
+command_passed: success_message='Updated working directory' patch='10000'
+run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 10000]
+command_passed: success_message='Applied patch' patch='10000'
+run_webkit_patch: ['validate-changelog', '--non-interactive', 10000]
+command_failed: failure_message='ChangeLog did not pass validation' script_error='MOCK validate failure' patch='10000'
+"""
+        self._run_through_task(commit_queue, expected_stderr, GoldenScriptError)
+
+    def test_build_failure(self):
+        commit_queue = MockCommitQueue([
+            None,
+            None,
+            None,
+            None,
+            GoldenScriptError("MOCK build failure"),
+        ])
+        expected_stderr = """run_webkit_patch: ['clean']
+command_passed: success_message='Cleaned working directory' patch='10000'
+run_webkit_patch: ['update']
+command_passed: success_message='Updated working directory' patch='10000'
+run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 10000]
+command_passed: success_message='Applied patch' patch='10000'
+run_webkit_patch: ['validate-changelog', '--non-interactive', 10000]
+command_passed: success_message='ChangeLog validated' patch='10000'
+run_webkit_patch: ['build', '--no-clean', '--no-update', '--build-style=both']
+command_failed: failure_message='Patch does not build' script_error='MOCK build failure' patch='10000'
+run_webkit_patch: ['build', '--force-clean', '--no-update', '--build-style=both']
+command_passed: success_message='Able to build without patch' patch='10000'
+"""
+        self._run_through_task(commit_queue, expected_stderr, GoldenScriptError)
+
+    def test_red_build_failure(self):
+        commit_queue = MockCommitQueue([
+            None,
+            None,
+            None,
+            None,
+            ScriptError("MOCK build failure"),
+            ScriptError("MOCK clean build failure"),
+        ])
+        expected_stderr = """run_webkit_patch: ['clean']
+command_passed: success_message='Cleaned working directory' patch='10000'
+run_webkit_patch: ['update']
+command_passed: success_message='Updated working directory' patch='10000'
+run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 10000]
+command_passed: success_message='Applied patch' patch='10000'
+run_webkit_patch: ['validate-changelog', '--non-interactive', 10000]
+command_passed: success_message='ChangeLog validated' patch='10000'
+run_webkit_patch: ['build', '--no-clean', '--no-update', '--build-style=both']
+command_failed: failure_message='Patch does not build' script_error='MOCK build failure' patch='10000'
+run_webkit_patch: ['build', '--force-clean', '--no-update', '--build-style=both']
+command_failed: failure_message='Unable to build without patch' script_error='MOCK clean build failure' patch='10000'
+"""
+        self._run_through_task(commit_queue, expected_stderr, expect_retry=True)
+
+    def test_flaky_test_failure(self):
+        commit_queue = MockCommitQueue([
+            None,
+            None,
+            None,
+            None,
+            None,
+            ScriptError("MOCK tests failure"),
+        ])
+        # CommitQueueTask will only report flaky tests if we successfully parsed
+        # results.html and returned a LayoutTestResults object, so we fake one.
+        commit_queue.test_results = lambda: LayoutTestResults([])
+        expected_stderr = """run_webkit_patch: ['clean']
+command_passed: success_message='Cleaned working directory' patch='10000'
+run_webkit_patch: ['update']
+command_passed: success_message='Updated working directory' patch='10000'
+run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 10000]
+command_passed: success_message='Applied patch' patch='10000'
+run_webkit_patch: ['validate-changelog', '--non-interactive', 10000]
+command_passed: success_message='ChangeLog validated' patch='10000'
+run_webkit_patch: ['build', '--no-clean', '--no-update', '--build-style=both']
+command_passed: success_message='Built patch' patch='10000'
+run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive']
+command_failed: failure_message='Patch does not pass tests' script_error='MOCK tests failure' patch='10000'
+archive_last_test_results: patch='10000'
+run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive']
+command_passed: success_message='Passed tests' patch='10000'
+report_flaky_tests: patch='10000' flaky_tests='[]' archive='mock-archive-10000.zip'
+run_webkit_patch: ['land-attachment', '--force-clean', '--non-interactive', '--parent-command=commit-queue', 10000]
+command_passed: success_message='Landed patch' patch='10000'
+"""
+        self._run_through_task(commit_queue, expected_stderr)
+
+    def test_failed_archive(self):
+        commit_queue = MockCommitQueue([
+            None,
+            None,
+            None,
+            None,
+            None,
+            ScriptError("MOCK tests failure"),
+        ])
+        commit_queue.test_results = lambda: LayoutTestResults([])
+        # It's possible delegate to fail to archive layout tests, don't try to report
+        # flaky tests when that happens.
+        commit_queue.archive_last_test_results = lambda patch: None
+        expected_stderr = """run_webkit_patch: ['clean']
+command_passed: success_message='Cleaned working directory' patch='10000'
+run_webkit_patch: ['update']
+command_passed: success_message='Updated working directory' patch='10000'
+run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 10000]
+command_passed: success_message='Applied patch' patch='10000'
+run_webkit_patch: ['validate-changelog', '--non-interactive', 10000]
+command_passed: success_message='ChangeLog validated' patch='10000'
+run_webkit_patch: ['build', '--no-clean', '--no-update', '--build-style=both']
+command_passed: success_message='Built patch' patch='10000'
+run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive']
+command_failed: failure_message='Patch does not pass tests' script_error='MOCK tests failure' patch='10000'
+run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive']
+command_passed: success_message='Passed tests' patch='10000'
+run_webkit_patch: ['land-attachment', '--force-clean', '--non-interactive', '--parent-command=commit-queue', 10000]
+command_passed: success_message='Landed patch' patch='10000'
+"""
+        self._run_through_task(commit_queue, expected_stderr)
+
+    def test_double_flaky_test_failure(self):
+        commit_queue = FailingTestCommitQueue([
+            None,
+            None,
+            None,
+            None,
+            None,
+            ScriptError("MOCK test failure"),
+            ScriptError("MOCK test failure again"),
+        ], [
+            "foo.html",
+            "bar.html",
+            "foo.html",
+        ])
+        # The (subtle) point of this test is that report_flaky_tests does not appear
+        # in the expected_stderr for this run.
+        # Note also that there is no attempt to run the tests w/o the patch.
+        expected_stderr = """run_webkit_patch: ['clean']
+command_passed: success_message='Cleaned working directory' patch='10000'
+run_webkit_patch: ['update']
+command_passed: success_message='Updated working directory' patch='10000'
+run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 10000]
+command_passed: success_message='Applied patch' patch='10000'
+run_webkit_patch: ['validate-changelog', '--non-interactive', 10000]
+command_passed: success_message='ChangeLog validated' patch='10000'
+run_webkit_patch: ['build', '--no-clean', '--no-update', '--build-style=both']
+command_passed: success_message='Built patch' patch='10000'
+run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive']
+command_failed: failure_message='Patch does not pass tests' script_error='MOCK test failure' patch='10000'
+archive_last_test_results: patch='10000'
+run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive']
+command_failed: failure_message='Patch does not pass tests' script_error='MOCK test failure again' patch='10000'
+"""
+        tool = MockTool(log_executive=True)
+        patch = tool.bugs.fetch_attachment(10000)
+        task = CommitQueueTask(commit_queue, patch)
+        success = OutputCapture().assert_outputs(self, task.run, expected_stderr=expected_stderr)
+        self.assertEqual(success, False)
+
+    def test_test_failure(self):
+        commit_queue = MockCommitQueue([
+            None,
+            None,
+            None,
+            None,
+            None,
+            GoldenScriptError("MOCK test failure"),
+            ScriptError("MOCK test failure again"),
+        ])
+        expected_stderr = """run_webkit_patch: ['clean']
+command_passed: success_message='Cleaned working directory' patch='10000'
+run_webkit_patch: ['update']
+command_passed: success_message='Updated working directory' patch='10000'
+run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 10000]
+command_passed: success_message='Applied patch' patch='10000'
+run_webkit_patch: ['validate-changelog', '--non-interactive', 10000]
+command_passed: success_message='ChangeLog validated' patch='10000'
+run_webkit_patch: ['build', '--no-clean', '--no-update', '--build-style=both']
+command_passed: success_message='Built patch' patch='10000'
+run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive']
+command_failed: failure_message='Patch does not pass tests' script_error='MOCK test failure' patch='10000'
+archive_last_test_results: patch='10000'
+run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive']
+command_failed: failure_message='Patch does not pass tests' script_error='MOCK test failure again' patch='10000'
+archive_last_test_results: patch='10000'
+run_webkit_patch: ['build-and-test', '--force-clean', '--no-update', '--build', '--test', '--non-interactive']
+command_passed: success_message='Able to pass tests without patch' patch='10000'
+"""
+        self._run_through_task(commit_queue, expected_stderr, GoldenScriptError)
+
+    def test_red_test_failure(self):
+        commit_queue = FailingTestCommitQueue([
+            None,
+            None,
+            None,
+            None,
+            None,
+            ScriptError("MOCK test failure"),
+            ScriptError("MOCK test failure again"),
+            ScriptError("MOCK clean test failure"),
+        ], [
+            "foo.html",
+            "foo.html",
+            "foo.html",
+        ])
+
+        # Tests always fail, and always return the same results, but we
+        # should still be able to land in this case!
+        expected_stderr = """run_webkit_patch: ['clean']
+command_passed: success_message='Cleaned working directory' patch='10000'
+run_webkit_patch: ['update']
+command_passed: success_message='Updated working directory' patch='10000'
+run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 10000]
+command_passed: success_message='Applied patch' patch='10000'
+run_webkit_patch: ['validate-changelog', '--non-interactive', 10000]
+command_passed: success_message='ChangeLog validated' patch='10000'
+run_webkit_patch: ['build', '--no-clean', '--no-update', '--build-style=both']
+command_passed: success_message='Built patch' patch='10000'
+run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive']
+command_failed: failure_message='Patch does not pass tests' script_error='MOCK test failure' patch='10000'
+archive_last_test_results: patch='10000'
+run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive']
+command_failed: failure_message='Patch does not pass tests' script_error='MOCK test failure again' patch='10000'
+archive_last_test_results: patch='10000'
+run_webkit_patch: ['build-and-test', '--force-clean', '--no-update', '--build', '--test', '--non-interactive']
+command_failed: failure_message='Unable to pass tests without patch (tree is red?)' script_error='MOCK clean test failure' patch='10000'
+run_webkit_patch: ['land-attachment', '--force-clean', '--non-interactive', '--parent-command=commit-queue', 10000]
+command_passed: success_message='Landed patch' patch='10000'
+"""
+        self._run_through_task(commit_queue, expected_stderr)
+
+    def test_very_red_tree_retry(self):
+        lots_of_failing_tests = map(lambda num: "test-%s.html" % num, range(0, 100))
+        commit_queue = FailingTestCommitQueue([
+            None,
+            None,
+            None,
+            None,
+            None,
+            ScriptError("MOCK test failure"),
+            ScriptError("MOCK test failure again"),
+            ScriptError("MOCK clean test failure"),
+        ], [
+            lots_of_failing_tests,
+            lots_of_failing_tests,
+            lots_of_failing_tests,
+        ])
+
+        # Tests always fail, and return so many failures that we do not
+        # trust the results (see ExpectedFailures._can_trust_results) so we
+        # just give up and retry the patch.
+        expected_stderr = """run_webkit_patch: ['clean']
+command_passed: success_message='Cleaned working directory' patch='10000'
+run_webkit_patch: ['update']
+command_passed: success_message='Updated working directory' patch='10000'
+run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 10000]
+command_passed: success_message='Applied patch' patch='10000'
+run_webkit_patch: ['validate-changelog', '--non-interactive', 10000]
+command_passed: success_message='ChangeLog validated' patch='10000'
+run_webkit_patch: ['build', '--no-clean', '--no-update', '--build-style=both']
+command_passed: success_message='Built patch' patch='10000'
+run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive']
+command_failed: failure_message='Patch does not pass tests' script_error='MOCK test failure' patch='10000'
+archive_last_test_results: patch='10000'
+run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive']
+command_failed: failure_message='Patch does not pass tests' script_error='MOCK test failure again' patch='10000'
+archive_last_test_results: patch='10000'
+run_webkit_patch: ['build-and-test', '--force-clean', '--no-update', '--build', '--test', '--non-interactive']
+command_failed: failure_message='Unable to pass tests without patch (tree is red?)' script_error='MOCK clean test failure' patch='10000'
+"""
+        self._run_through_task(commit_queue, expected_stderr, expect_retry=True)
+
+    def test_red_tree_patch_rejection(self):
+        commit_queue = FailingTestCommitQueue([
+            None,
+            None,
+            None,
+            None,
+            None,
+            GoldenScriptError("MOCK test failure"),
+            ScriptError("MOCK test failure again"),
+            ScriptError("MOCK clean test failure"),
+        ], [
+            ["foo.html", "bar.html"],
+            ["foo.html", "bar.html"],
+            ["foo.html"],
+        ])
+
+        # Tests always fail, but the clean tree only fails one test
+        # while the patch fails two.  So we should reject the patch!
+        expected_stderr = """run_webkit_patch: ['clean']
+command_passed: success_message='Cleaned working directory' patch='10000'
+run_webkit_patch: ['update']
+command_passed: success_message='Updated working directory' patch='10000'
+run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 10000]
+command_passed: success_message='Applied patch' patch='10000'
+run_webkit_patch: ['validate-changelog', '--non-interactive', 10000]
+command_passed: success_message='ChangeLog validated' patch='10000'
+run_webkit_patch: ['build', '--no-clean', '--no-update', '--build-style=both']
+command_passed: success_message='Built patch' patch='10000'
+run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive']
+command_failed: failure_message='Patch does not pass tests' script_error='MOCK test failure' patch='10000'
+archive_last_test_results: patch='10000'
+run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive']
+command_failed: failure_message='Patch does not pass tests' script_error='MOCK test failure again' patch='10000'
+archive_last_test_results: patch='10000'
+run_webkit_patch: ['build-and-test', '--force-clean', '--no-update', '--build', '--test', '--non-interactive']
+command_failed: failure_message='Unable to pass tests without patch (tree is red?)' script_error='MOCK clean test failure' patch='10000'
+"""
+        task = self._run_through_task(commit_queue, expected_stderr, GoldenScriptError)
+        self.assertEqual(task.results_from_patch_test_run(task._patch).failing_tests(), ["foo.html", "bar.html"])
+        # failure_status_id should be of the test with patch (1), not the test without patch (2).
+        self.assertEqual(task.failure_status_id, 1)
+
+    def test_land_failure(self):
+        commit_queue = MockCommitQueue([
+            None,
+            None,
+            None,
+            None,
+            None,
+            None,
+            GoldenScriptError("MOCK land failure"),
+        ])
+        expected_stderr = """run_webkit_patch: ['clean']
+command_passed: success_message='Cleaned working directory' patch='10000'
+run_webkit_patch: ['update']
+command_passed: success_message='Updated working directory' patch='10000'
+run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 10000]
+command_passed: success_message='Applied patch' patch='10000'
+run_webkit_patch: ['validate-changelog', '--non-interactive', 10000]
+command_passed: success_message='ChangeLog validated' patch='10000'
+run_webkit_patch: ['build', '--no-clean', '--no-update', '--build-style=both']
+command_passed: success_message='Built patch' patch='10000'
+run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive']
+command_passed: success_message='Passed tests' patch='10000'
+run_webkit_patch: ['land-attachment', '--force-clean', '--non-interactive', '--parent-command=commit-queue', 10000]
+command_failed: failure_message='Unable to land patch' script_error='MOCK land failure' patch='10000'
+"""
+        # FIXME: This should really be expect_retry=True for a better user experiance.
+        self._run_through_task(commit_queue, expected_stderr, GoldenScriptError)
+
+    def _expect_validate(self, patch, is_valid):
+        class MockDelegate(object):
+            def refetch_patch(self, patch):
+                return patch
+
+            def expected_failures(self):
+                return ExpectedFailures()
+
+        task = CommitQueueTask(MockDelegate(), patch)
+        self.assertEquals(task.validate(), is_valid)
+
+    def _mock_patch(self, attachment_dict={}, bug_dict={'bug_status': 'NEW'}, committer="fake"):
+        bug = bugzilla.Bug(bug_dict, None)
+        patch = bugzilla.Attachment(attachment_dict, bug)
+        patch._committer = committer
+        return patch
+
+    def test_validate(self):
+        self._expect_validate(self._mock_patch(), True)
+        self._expect_validate(self._mock_patch({'is_obsolete': True}), False)
+        self._expect_validate(self._mock_patch(bug_dict={'bug_status': 'CLOSED'}), False)
+        self._expect_validate(self._mock_patch(committer=None), False)
+        self._expect_validate(self._mock_patch({'review': '-'}), False)
diff --git a/Tools/Scripts/webkitpy/tool/bot/earlywarningsystemtask.py b/Tools/Scripts/webkitpy/tool/bot/earlywarningsystemtask.py
new file mode 100644
index 0000000..b66cfbc
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/bot/earlywarningsystemtask.py
@@ -0,0 +1,66 @@
+# Copyright (c) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from webkitpy.tool.bot.patchanalysistask import PatchAnalysisTask, PatchAnalysisTaskDelegate, UnableToApplyPatch
+
+
+class EarlyWarningSystemTaskDelegate(PatchAnalysisTaskDelegate):
+    pass
+
+
+class EarlyWarningSystemTask(PatchAnalysisTask):
+    def __init__(self, delegate, patch, should_run_tests=True):
+        PatchAnalysisTask.__init__(self, delegate, patch)
+        self._should_run_tests = should_run_tests
+
+    def validate(self):
+        self._patch = self._delegate.refetch_patch(self._patch)
+        if self._patch.is_obsolete():
+            return False
+        if self._patch.bug().is_closed():
+            return False
+        if self._patch.review() == "-":
+            return False
+        return True
+
+    def run(self):
+        if not self.validate():
+            return False
+        if not self._clean():
+            return False
+        if not self._update():
+            return False
+        if not self._apply():
+            raise UnableToApplyPatch(self._patch)
+        if not self._build():
+            if not self._build_without_patch():
+                return False
+            return self.report_failure()
+        if not self._should_run_tests:
+            return True
+        return self._test_patch()
diff --git a/Tools/Scripts/webkitpy/tool/bot/expectedfailures.py b/Tools/Scripts/webkitpy/tool/bot/expectedfailures.py
new file mode 100644
index 0000000..c0cfe21
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/bot/expectedfailures.py
@@ -0,0 +1,60 @@
+# Copyright (c) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+class ExpectedFailures(object):
+    def __init__(self):
+        self._failures = set()
+        self._is_trustworthy = True
+
+    @classmethod
+    def _has_failures(cls, results):
+        return bool(results and results.failing_tests())
+
+    @classmethod
+    def _should_trust(cls, results):
+        return bool(cls._has_failures(results) and results.failure_limit_count() and len(results.failing_tests()) < results.failure_limit_count())
+
+    def failures_were_expected(self, results):
+        if not self._is_trustworthy:
+            return False
+        if not self._should_trust(results):
+            return False
+        return set(results.failing_tests()) <= self._failures
+
+    def unexpected_failures_observed(self, results):
+        if not self._is_trustworthy:
+            return None
+        if not self._has_failures(results):
+            return None
+        return set(results.failing_tests()) - self._failures
+
+    def update(self, results):
+        if results:
+            self._failures = set(results.failing_tests())
+            self._is_trustworthy = self._should_trust(results)
diff --git a/Tools/Scripts/webkitpy/tool/bot/expectedfailures_unittest.py b/Tools/Scripts/webkitpy/tool/bot/expectedfailures_unittest.py
new file mode 100644
index 0000000..4c1c3d9
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/bot/expectedfailures_unittest.py
@@ -0,0 +1,95 @@
+# Copyright (c) 2009 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from webkitpy.tool.bot.expectedfailures import ExpectedFailures
+
+
+class MockResults(object):
+    def __init__(self, failing_tests=[], failure_limit=10):
+        self._failing_tests = failing_tests
+        self._failure_limit_count = failure_limit
+
+    def failure_limit_count(self):
+        return self._failure_limit_count
+
+    def failing_tests(self):
+        return self._failing_tests
+
+
+class ExpectedFailuresTest(unittest.TestCase):
+    def _assert_can_trust(self, results, can_trust):
+        self.assertEquals(ExpectedFailures._should_trust(results), can_trust)
+
+    def test_can_trust_results(self):
+        self._assert_can_trust(None, False)
+        self._assert_can_trust(MockResults(failing_tests=[], failure_limit=None), False)
+        self._assert_can_trust(MockResults(failing_tests=[], failure_limit=10), False)
+        self._assert_can_trust(MockResults(failing_tests=[1], failure_limit=None), False)
+        self._assert_can_trust(MockResults(failing_tests=[1], failure_limit=2), True)
+        self._assert_can_trust(MockResults(failing_tests=[1], failure_limit=1), False)
+        self._assert_can_trust(MockResults(failing_tests=[1, 2], failure_limit=1), False)
+
+    def _assert_expected(self, expected_failures, failures, expected):
+        self.assertEqual(expected_failures.failures_were_expected(MockResults(failures)), expected)
+
+    def test_failures_were_expected(self):
+        failures = ExpectedFailures()
+        failures.update(MockResults(['foo.html']))
+        self._assert_expected(failures, ['foo.html'], True)
+        self._assert_expected(failures, ['bar.html'], False)
+        self._assert_expected(failures, ['bar.html', 'foo.html'], False)
+
+        failures.update(MockResults(['baz.html']))
+        self._assert_expected(failures, ['baz.html'], True)
+        self._assert_expected(failures, ['foo.html'], False)
+
+        failures.update(MockResults([]))
+        self._assert_expected(failures, ['baz.html'], False)
+        self._assert_expected(failures, ['foo.html'], False)
+
+    def test_unexpected_failures_observed(self):
+        failures = ExpectedFailures()
+        failures.update(MockResults(['foo.html']))
+        self.assertEquals(failures.unexpected_failures_observed(MockResults(['foo.html', 'bar.html'])), set(['bar.html']))
+        self.assertEquals(failures.unexpected_failures_observed(MockResults(['baz.html'])), set(['baz.html']))
+        unbounded_results = MockResults(['baz.html', 'qux.html', 'taco.html'], failure_limit=3)
+        self.assertEquals(failures.unexpected_failures_observed(unbounded_results), set(['baz.html', 'qux.html', 'taco.html']))
+        unbounded_results_with_existing_failure = MockResults(['foo.html', 'baz.html', 'qux.html', 'taco.html'], failure_limit=4)
+        self.assertEquals(failures.unexpected_failures_observed(unbounded_results_with_existing_failure), set(['baz.html', 'qux.html', 'taco.html']))
+
+    def test_unexpected_failures_observed_when_tree_is_hosed(self):
+        failures = ExpectedFailures()
+        failures.update(MockResults(['foo.html', 'banana.html'], failure_limit=2))
+        self.assertEquals(failures.unexpected_failures_observed(MockResults(['foo.html', 'bar.html'])), None)
+        self.assertEquals(failures.unexpected_failures_observed(MockResults(['baz.html'])), None)
+        unbounded_results = MockResults(['baz.html', 'qux.html', 'taco.html'], failure_limit=3)
+        self.assertEquals(failures.unexpected_failures_observed(unbounded_results), None)
+        unbounded_results_with_existing_failure = MockResults(['foo.html', 'baz.html', 'qux.html', 'taco.html'], failure_limit=4)
+        self.assertEquals(failures.unexpected_failures_observed(unbounded_results_with_existing_failure), None)
diff --git a/Tools/Scripts/webkitpy/tool/bot/feeders.py b/Tools/Scripts/webkitpy/tool/bot/feeders.py
new file mode 100644
index 0000000..4ba2f04
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/bot/feeders.py
@@ -0,0 +1,95 @@
+# Copyright (c) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from webkitpy.common.config.committervalidator import CommitterValidator
+from webkitpy.common.system.deprecated_logging import log
+from webkitpy.tool.grammar import pluralize
+
+
+class AbstractFeeder(object):
+    def __init__(self, tool):
+        self._tool = tool
+
+    def feed(self):
+        raise NotImplementedError("subclasses must implement")
+
+
+class CommitQueueFeeder(AbstractFeeder):
+    queue_name = "commit-queue"
+
+    def __init__(self, tool):
+        AbstractFeeder.__init__(self, tool)
+        self.committer_validator = CommitterValidator(self._tool)
+
+    def _update_work_items(self, item_ids):
+        # FIXME: This is the last use of update_work_items, the commit-queue
+        # should move to feeding patches one at a time like the EWS does.
+        self._tool.status_server.update_work_items(self.queue_name, item_ids)
+        log("Feeding %s items %s" % (self.queue_name, item_ids))
+
+    def feed(self):
+        patches = self._validate_patches()
+        patches = self._patches_with_acceptable_review_flag(patches)
+        patches = sorted(patches, self._patch_cmp)
+        patch_ids = [patch.id() for patch in patches]
+        self._update_work_items(patch_ids)
+
+    def _patches_for_bug(self, bug_id):
+        return self._tool.bugs.fetch_bug(bug_id).commit_queued_patches(include_invalid=True)
+
+    # Filters out patches with r? or r-, only r+ or no review are OK to land.
+    def _patches_with_acceptable_review_flag(self, patches):
+        return [patch for patch in patches if patch.review() in [None, '+']]
+
+    def _validate_patches(self):
+        # Not using BugzillaQueries.fetch_patches_from_commit_queue() so we can reject patches with invalid committers/reviewers.
+        bug_ids = self._tool.bugs.queries.fetch_bug_ids_from_commit_queue()
+        all_patches = sum([self._patches_for_bug(bug_id) for bug_id in bug_ids], [])
+        return self.committer_validator.patches_after_rejecting_invalid_commiters_and_reviewers(all_patches)
+
+    def _patch_cmp(self, a, b):
+        # Sort first by is_rollout, then by attach_date.
+        # Reversing the order so that is_rollout is first.
+        rollout_cmp = cmp(b.is_rollout(), a.is_rollout())
+        if rollout_cmp != 0:
+            return rollout_cmp
+        return cmp(a.attach_date(), b.attach_date())
+
+
+class EWSFeeder(AbstractFeeder):
+    def __init__(self, tool):
+        self._ids_sent_to_server = set()
+        AbstractFeeder.__init__(self, tool)
+
+    def feed(self):
+        ids_needing_review = set(self._tool.bugs.queries.fetch_attachment_ids_from_review_queue())
+        new_ids = ids_needing_review.difference(self._ids_sent_to_server)
+        log("Feeding EWS (%s, %s new)" % (pluralize("r? patch", len(ids_needing_review)), len(new_ids)))
+        for attachment_id in new_ids:  # Order doesn't really matter for the EWS.
+            self._tool.status_server.submit_to_ews(attachment_id)
+            self._ids_sent_to_server.add(attachment_id)
diff --git a/Tools/Scripts/webkitpy/tool/bot/feeders_unittest.py b/Tools/Scripts/webkitpy/tool/bot/feeders_unittest.py
new file mode 100644
index 0000000..dff48ad
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/bot/feeders_unittest.py
@@ -0,0 +1,80 @@
+# Copyright (c) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from datetime import datetime
+import unittest
+
+from webkitpy.common.system.outputcapture import OutputCapture
+from webkitpy.thirdparty.mock import Mock
+from webkitpy.tool.bot.feeders import *
+from webkitpy.tool.mocktool import MockTool
+
+
+class FeedersTest(unittest.TestCase):
+    def test_commit_queue_feeder(self):
+        feeder = CommitQueueFeeder(MockTool())
+        expected_stderr = u"""Warning, attachment 10001 on bug 50000 has invalid committer (non-committer@example.com)
+Warning, attachment 10001 on bug 50000 has invalid committer (non-committer@example.com)
+MOCK setting flag 'commit-queue' to '-' on attachment '10001' with comment 'Rejecting attachment 10001 from commit-queue.' and additional comment 'non-committer@example.com does not have committer permissions according to http://trac.webkit.org/browser/trunk/Tools/Scripts/webkitpy/common/config/committers.py.
+
+- If you do not have committer rights please read http://webkit.org/coding/contributing.html for instructions on how to use bugzilla flags.
+
+- If you have committer rights please correct the error in Tools/Scripts/webkitpy/common/config/committers.py by adding yourself to the file (no review needed).  The commit-queue restarts itself every 2 hours.  After restart the commit-queue will correctly respect your committer rights.'
+MOCK: update_work_items: commit-queue [10005, 10000]
+Feeding commit-queue items [10005, 10000]
+"""
+        OutputCapture().assert_outputs(self, feeder.feed, expected_stderr=expected_stderr)
+
+    def _mock_attachment(self, is_rollout, attach_date):
+        attachment = Mock()
+        attachment.is_rollout = lambda: is_rollout
+        attachment.attach_date = lambda: attach_date
+        return attachment
+
+    def test_patch_cmp(self):
+        long_ago_date = datetime(1900, 1, 21)
+        recent_date = datetime(2010, 1, 21)
+        attachment1 = self._mock_attachment(is_rollout=False, attach_date=recent_date)
+        attachment2 = self._mock_attachment(is_rollout=False, attach_date=long_ago_date)
+        attachment3 = self._mock_attachment(is_rollout=True, attach_date=recent_date)
+        attachment4 = self._mock_attachment(is_rollout=True, attach_date=long_ago_date)
+        attachments = [attachment1, attachment2, attachment3, attachment4]
+        expected_sort = [attachment4, attachment3, attachment2, attachment1]
+        queue = CommitQueueFeeder(MockTool())
+        attachments.sort(queue._patch_cmp)
+        self.assertEqual(attachments, expected_sort)
+
+    def test_patches_with_acceptable_review_flag(self):
+        class MockPatch(object):
+            def __init__(self, patch_id, review):
+                self.id = patch_id
+                self.review = lambda: review
+
+        feeder = CommitQueueFeeder(MockTool())
+        patches = [MockPatch(1, None), MockPatch(2, '-'), MockPatch(3, "+")]
+        self.assertEquals([patch.id for patch in feeder._patches_with_acceptable_review_flag(patches)], [1, 3])
diff --git a/Tools/Scripts/webkitpy/tool/bot/flakytestreporter.py b/Tools/Scripts/webkitpy/tool/bot/flakytestreporter.py
new file mode 100644
index 0000000..7be4a4a
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/bot/flakytestreporter.py
@@ -0,0 +1,199 @@
+# Copyright (c) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import codecs
+import logging
+import os.path
+
+from webkitpy.common.net.layouttestresults import path_for_layout_test, LayoutTestResults
+from webkitpy.common.config import urls
+from webkitpy.tool.bot.botinfo import BotInfo
+from webkitpy.tool.grammar import plural, pluralize, join_with_separators
+
+_log = logging.getLogger(__name__)
+
+
+class FlakyTestReporter(object):
+    def __init__(self, tool, bot_name):
+        self._tool = tool
+        self._bot_name = bot_name
+        self._bot_info = BotInfo(tool)
+
+    def _author_emails_for_test(self, flaky_test):
+        test_path = path_for_layout_test(flaky_test)
+        commit_infos = self._tool.checkout().recent_commit_infos_for_files([test_path])
+        # This ignores authors which are not committers because we don't have their bugzilla_email.
+        return set([commit_info.author().bugzilla_email() for commit_info in commit_infos if commit_info.author()])
+
+    def _bugzilla_email(self):
+        # FIXME: This is kinda a funny way to get the bugzilla email,
+        # we could also just create a Credentials object directly
+        # but some of the Credentials logic is in bugzilla.py too...
+        self._tool.bugs.authenticate()
+        return self._tool.bugs.username
+
+    # FIXME: This should move into common.config
+    _bot_emails = set([
+        "commit-queue@webkit.org",  # commit-queue
+        "eseidel@chromium.org",  # old commit-queue
+        "webkit.review.bot@gmail.com",  # style-queue, sheriff-bot, CrLx/Gtk EWS
+        "buildbot@hotmail.com",  # Win EWS
+        # Mac EWS currently uses eric@webkit.org, but that's not normally a bot
+    ])
+
+    def _lookup_bug_for_flaky_test(self, flaky_test):
+        bugs = self._tool.bugs.queries.fetch_bugs_matching_search(search_string=flaky_test)
+        if not bugs:
+            return None
+        # Match any bugs which are from known bots or the email this bot is using.
+        allowed_emails = self._bot_emails | set([self._bugzilla_email])
+        bugs = filter(lambda bug: bug.reporter_email() in allowed_emails, bugs)
+        if not bugs:
+            return None
+        if len(bugs) > 1:
+            # FIXME: There are probably heuristics we could use for finding
+            # the right bug instead of the first, like open vs. closed.
+            _log.warn("Found %s %s matching '%s' filed by a bot, using the first." % (pluralize('bug', len(bugs)), [bug.id() for bug in bugs], flaky_test))
+        return bugs[0]
+
+    def _view_source_url_for_test(self, test_path):
+        return urls.view_source_url("LayoutTests/%s" % test_path)
+
+    def _create_bug_for_flaky_test(self, flaky_test, author_emails, latest_flake_message):
+        format_values = {
+            'test': flaky_test,
+            'authors': join_with_separators(sorted(author_emails)),
+            'flake_message': latest_flake_message,
+            'test_url': self._view_source_url_for_test(flaky_test),
+            'bot_name': self._bot_name,
+        }
+        title = "Flaky Test: %(test)s" % format_values
+        description = """This is an automatically generated bug from the %(bot_name)s.
+%(test)s has been flaky on the %(bot_name)s.
+
+%(test)s was authored by %(authors)s.
+%(test_url)s
+
+%(flake_message)s
+
+The bots will update this with information from each new failure.
+
+If you believe this bug to be fixed or invalid, feel free to close.  The bots will re-open if the flake re-occurs.
+
+If you would like to track this test fix with another bug, please close this bug as a duplicate.  The bots will follow the duplicate chain when making future comments.
+""" % format_values
+
+        master_flake_bug = 50856  # MASTER: Flaky tests found by the commit-queue
+        return self._tool.bugs.create_bug(title, description,
+            component="Tools / Tests",
+            cc=",".join(author_emails),
+            blocked="50856")
+
+    # This is over-engineered, but it makes for pretty bug messages.
+    def _optional_author_string(self, author_emails):
+        if not author_emails:
+            return ""
+        heading_string = plural('author') if len(author_emails) > 1 else 'author'
+        authors_string = join_with_separators(sorted(author_emails))
+        return " (%s: %s)" % (heading_string, authors_string)
+
+    def _latest_flake_message(self, flaky_result, patch):
+        failure_messages = [failure.message() for failure in flaky_result.failures]
+        flake_message = "The %s just saw %s flake (%s) while processing attachment %s on bug %s." % (self._bot_name, flaky_result.test_name, ", ".join(failure_messages), patch.id(), patch.bug_id())
+        return "%s\n%s" % (flake_message, self._bot_info.summary_text())
+
+    def _results_diff_path_for_test(self, test_path):
+        # FIXME: This is a big hack.  We should get this path from results.json
+        # except that old-run-webkit-tests doesn't produce a results.json
+        # so we just guess at the file path.
+        (test_path_root, _) = os.path.splitext(test_path)
+        return "%s-diffs.txt" % test_path_root
+
+    def _follow_duplicate_chain(self, bug):
+        while bug.is_closed() and bug.duplicate_of():
+            bug = self._tool.bugs.fetch_bug(bug.duplicate_of())
+        return bug
+
+    # Maybe this logic should move into Bugzilla? a reopen=True arg to post_comment?
+    def _update_bug_for_flaky_test(self, bug, latest_flake_message):
+        if bug.is_closed():
+            self._tool.bugs.reopen_bug(bug.id(), latest_flake_message)
+        else:
+            self._tool.bugs.post_comment_to_bug(bug.id(), latest_flake_message)
+
+    # This method is needed because our archive paths include a leading tmp/layout-test-results
+    def _find_in_archive(self, path, archive):
+        for archived_path in archive.namelist():
+            # Archives are currently created with full paths.
+            if archived_path.endswith(path):
+                return archived_path
+        return None
+
+    def _attach_failure_diff(self, flake_bug_id, flaky_test, results_archive_zip):
+        results_diff_path = self._results_diff_path_for_test(flaky_test)
+        # Check to make sure that the path makes sense.
+        # Since we're not actually getting this path from the results.html
+        # there is a chance it's wrong.
+        bot_id = self._tool.status_server.bot_id or "bot"
+        archive_path = self._find_in_archive(results_diff_path, results_archive_zip)
+        if archive_path:
+            results_diff = results_archive_zip.read(archive_path)
+            description = "Failure diff from %s" % bot_id
+            self._tool.bugs.add_attachment_to_bug(flake_bug_id, results_diff, description, filename="failure.diff")
+        else:
+            _log.warn("%s does not exist in results archive, uploading entire archive." % results_diff_path)
+            description = "Archive of layout-test-results from %s" % bot_id
+            # results_archive is a ZipFile object, grab the File object (.fp) to pass to Mechanize for uploading.
+            results_archive_file = results_archive_zip.fp
+            # Rewind the file object to start (since Mechanize won't do that automatically)
+            # See https://bugs.webkit.org/show_bug.cgi?id=54593
+            results_archive_file.seek(0)
+            self._tool.bugs.add_attachment_to_bug(flake_bug_id, results_archive_file, description, filename="layout-test-results.zip")
+
+    def report_flaky_tests(self, patch, flaky_test_results, results_archive):
+        message = "The %s encountered the following flaky tests while processing attachment %s:\n\n" % (self._bot_name, patch.id())
+        for flaky_result in flaky_test_results:
+            flaky_test = flaky_result.test_name
+            bug = self._lookup_bug_for_flaky_test(flaky_test)
+            latest_flake_message = self._latest_flake_message(flaky_result, patch)
+            author_emails = self._author_emails_for_test(flaky_test)
+            if not bug:
+                _log.info("Bug does not already exist for %s, creating." % flaky_test)
+                flake_bug_id = self._create_bug_for_flaky_test(flaky_test, author_emails, latest_flake_message)
+            else:
+                bug = self._follow_duplicate_chain(bug)
+                # FIXME: Ideally we'd only make one comment per flake, not two.  But that's not possible
+                # in all cases (e.g. when reopening), so for now file attachment and comment are separate.
+                self._update_bug_for_flaky_test(bug, latest_flake_message)
+                flake_bug_id = bug.id()
+
+            self._attach_failure_diff(flake_bug_id, flaky_test, results_archive)
+            message += "%s bug %s%s\n" % (flaky_test, flake_bug_id, self._optional_author_string(author_emails))
+
+        message += "The %s is continuing to process your patch." % self._bot_name
+        self._tool.bugs.post_comment_to_bug(patch.bug_id(), message)
diff --git a/Tools/Scripts/webkitpy/tool/bot/flakytestreporter_unittest.py b/Tools/Scripts/webkitpy/tool/bot/flakytestreporter_unittest.py
new file mode 100644
index 0000000..eeb06c3
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/bot/flakytestreporter_unittest.py
@@ -0,0 +1,168 @@
+# Copyright (c) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from webkitpy.common.config.committers import Committer
+from webkitpy.common.system.filesystem_mock import MockFileSystem
+from webkitpy.common.system.outputcapture import OutputCapture
+from webkitpy.layout_tests.models import test_results
+from webkitpy.layout_tests.models import test_failures
+from webkitpy.thirdparty.mock import Mock
+from webkitpy.tool.bot.flakytestreporter import FlakyTestReporter
+from webkitpy.tool.mocktool import MockTool
+from webkitpy.common.net.statusserver_mock import MockStatusServer
+
+
+# Creating fake CommitInfos is a pain, so we use a mock one here.
+class MockCommitInfo(object):
+    def __init__(self, author_email):
+        self._author_email = author_email
+
+    def author(self):
+        # It's definitely possible to have commits with authors who
+        # are not in our committers.py list.
+        if not self._author_email:
+            return None
+        return Committer("Mock Committer", self._author_email)
+
+
+class FlakyTestReporterTest(unittest.TestCase):
+    def _mock_test_result(self, testname):
+        return test_results.TestResult(testname, [test_failures.FailureTextMismatch()])
+
+    def _assert_emails_for_test(self, emails):
+        tool = MockTool()
+        reporter = FlakyTestReporter(tool, 'dummy-queue')
+        commit_infos = [MockCommitInfo(email) for email in emails]
+        tool.checkout().recent_commit_infos_for_files = lambda paths: set(commit_infos)
+        self.assertEqual(reporter._author_emails_for_test([]), set(emails))
+
+    def test_author_emails_for_test(self):
+        self._assert_emails_for_test([])
+        self._assert_emails_for_test(["test1@test.com", "test1@test.com"])
+        self._assert_emails_for_test(["test1@test.com", "test2@test.com"])
+
+    def test_create_bug_for_flaky_test(self):
+        reporter = FlakyTestReporter(MockTool(), 'dummy-queue')
+        expected_stderr = """MOCK create_bug
+bug_title: Flaky Test: foo/bar.html
+bug_description: This is an automatically generated bug from the dummy-queue.
+foo/bar.html has been flaky on the dummy-queue.
+
+foo/bar.html was authored by test@test.com.
+http://trac.webkit.org/browser/trunk/LayoutTests/foo/bar.html
+
+FLAKE_MESSAGE
+
+The bots will update this with information from each new failure.
+
+If you believe this bug to be fixed or invalid, feel free to close.  The bots will re-open if the flake re-occurs.
+
+If you would like to track this test fix with another bug, please close this bug as a duplicate.  The bots will follow the duplicate chain when making future comments.
+
+component: Tools / Tests
+cc: test@test.com
+blocked: 50856
+"""
+        OutputCapture().assert_outputs(self, reporter._create_bug_for_flaky_test, ['foo/bar.html', ['test@test.com'], 'FLAKE_MESSAGE'], expected_stderr=expected_stderr)
+
+    def test_follow_duplicate_chain(self):
+        tool = MockTool()
+        reporter = FlakyTestReporter(tool, 'dummy-queue')
+        bug = tool.bugs.fetch_bug(50004)
+        self.assertEqual(reporter._follow_duplicate_chain(bug).id(), 50002)
+
+    def test_report_flaky_tests_creating_bug(self):
+        tool = MockTool()
+        tool.filesystem = MockFileSystem({"/mock-results/foo/bar-diffs.txt": "mock"})
+        tool.status_server = MockStatusServer(bot_id="mock-bot-id")
+        reporter = FlakyTestReporter(tool, 'dummy-queue')
+        reporter._lookup_bug_for_flaky_test = lambda bug_id: None
+        patch = tool.bugs.fetch_attachment(10000)
+        expected_stderr = """MOCK create_bug
+bug_title: Flaky Test: foo/bar.html
+bug_description: This is an automatically generated bug from the dummy-queue.
+foo/bar.html has been flaky on the dummy-queue.
+
+foo/bar.html was authored by abarth@webkit.org.
+http://trac.webkit.org/browser/trunk/LayoutTests/foo/bar.html
+
+The dummy-queue just saw foo/bar.html flake (text diff) while processing attachment 10000 on bug 50000.
+Bot: mock-bot-id  Port: MockPort  Platform: MockPlatform 1.0
+
+The bots will update this with information from each new failure.
+
+If you believe this bug to be fixed or invalid, feel free to close.  The bots will re-open if the flake re-occurs.
+
+If you would like to track this test fix with another bug, please close this bug as a duplicate.  The bots will follow the duplicate chain when making future comments.
+
+component: Tools / Tests
+cc: abarth@webkit.org
+blocked: 50856
+MOCK add_attachment_to_bug: bug_id=60001, description=Failure diff from mock-bot-id filename=failure.diff mimetype=None
+MOCK bug comment: bug_id=50000, cc=None
+--- Begin comment ---
+The dummy-queue encountered the following flaky tests while processing attachment 10000:
+
+foo/bar.html bug 60001 (author: abarth@webkit.org)
+The dummy-queue is continuing to process your patch.
+--- End comment ---
+
+"""
+        test_results = [self._mock_test_result('foo/bar.html')]
+
+        class MockZipFile(object):
+            def read(self, path):
+                return ""
+
+            def namelist(self):
+                return ['foo/bar-diffs.txt']
+
+        OutputCapture().assert_outputs(self, reporter.report_flaky_tests, [patch, test_results, MockZipFile()], expected_stderr=expected_stderr)
+
+    def test_optional_author_string(self):
+        reporter = FlakyTestReporter(MockTool(), 'dummy-queue')
+        self.assertEqual(reporter._optional_author_string([]), "")
+        self.assertEqual(reporter._optional_author_string(["foo@bar.com"]), " (author: foo@bar.com)")
+        self.assertEqual(reporter._optional_author_string(["a@b.com", "b@b.com"]), " (authors: a@b.com and b@b.com)")
+
+    def test_results_diff_path_for_test(self):
+        reporter = FlakyTestReporter(MockTool(), 'dummy-queue')
+        self.assertEqual(reporter._results_diff_path_for_test("test.html"), "test-diffs.txt")
+
+    def test_find_in_archive(self):
+        reporter = FlakyTestReporter(MockTool(), 'dummy-queue')
+
+        class MockZipFile(object):
+            def namelist(self):
+                return ["tmp/layout-test-results/foo/bar-diffs.txt"]
+
+        reporter._find_in_archive("foo/bar-diffs.txt", MockZipFile())
+        # This is not ideal, but its
+        reporter._find_in_archive("txt", MockZipFile())
diff --git a/Tools/Scripts/webkitpy/tool/bot/irc_command.py b/Tools/Scripts/webkitpy/tool/bot/irc_command.py
new file mode 100644
index 0000000..1c061a8
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/bot/irc_command.py
@@ -0,0 +1,250 @@
+# Copyright (c) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import itertools
+import random
+import re
+
+from webkitpy.common.config import irc as config_irc
+from webkitpy.common.config import urls
+from webkitpy.common.config.committers import CommitterList
+from webkitpy.common.system.executive import ScriptError
+from webkitpy.tool.bot.queueengine import TerminateQueue
+from webkitpy.tool.grammar import join_with_separators
+
+
+def _post_error_and_check_for_bug_url(tool, nicks_string, exception):
+    tool.irc().post("%s" % exception)
+    bug_id = urls.parse_bug_id(exception.output)
+    if bug_id:
+        bug_url = tool.bugs.bug_url_for_bug_id(bug_id)
+        tool.irc().post("%s: Ugg...  Might have created %s" % (nicks_string, bug_url))
+
+
+# FIXME: Merge with Command?
+class IRCCommand(object):
+    def execute(self, nick, args, tool, sheriff):
+        raise NotImplementedError, "subclasses must implement"
+
+
+class Restart(IRCCommand):
+    def execute(self, nick, args, tool, sheriff):
+        tool.irc().post("Restarting...")
+        raise TerminateQueue()
+
+
+class Rollout(IRCCommand):
+    def _extract_revisions(self, arg):
+
+        revision_list = []
+        possible_revisions = arg.split(",")
+        for revision in possible_revisions:
+            revision = revision.strip()
+            if not revision:
+                continue
+            revision = revision.lstrip("r")
+            # If one part of the arg isn't in the correct format,
+            # then none of the arg should be considered a revision.
+            if not revision.isdigit():
+                return None
+            revision_list.append(int(revision))
+        return revision_list
+
+    def _parse_args(self, args):
+        if not args:
+            return (None, None)
+
+        svn_revision_list = []
+        remaining_args = args[:]
+        # First process all revisions.
+        while remaining_args:
+            new_revisions = self._extract_revisions(remaining_args[0])
+            if not new_revisions:
+                break
+            svn_revision_list += new_revisions
+            remaining_args = remaining_args[1:]
+
+        # Was there a revision number?
+        if not len(svn_revision_list):
+            return (None, None)
+
+        # Everything left is the reason.
+        rollout_reason = " ".join(remaining_args)
+        return svn_revision_list, rollout_reason
+
+    def _responsible_nicknames_from_revisions(self, tool, sheriff, svn_revision_list):
+        commit_infos = map(tool.checkout().commit_info_for_revision, svn_revision_list)
+        nickname_lists = map(sheriff.responsible_nicknames_from_commit_info, commit_infos)
+        return sorted(set(itertools.chain(*nickname_lists)))
+
+    def _nicks_string(self, tool, sheriff, requester_nick, svn_revision_list):
+        # FIXME: _parse_args guarentees that our svn_revision_list is all numbers.
+        # However, it's possible our checkout will not include one of the revisions,
+        # so we may need to catch exceptions from commit_info_for_revision here.
+        target_nicks = [requester_nick] + self._responsible_nicknames_from_revisions(tool, sheriff, svn_revision_list)
+        return ", ".join(target_nicks)
+
+    def _update_working_copy(self, tool):
+        tool.scm().ensure_clean_working_directory(force_clean=True)
+        tool.executive.run_and_throw_if_fail(tool.port().update_webkit_command(), quiet=True, cwd=tool.scm().checkout_root)
+
+    def execute(self, nick, args, tool, sheriff):
+        svn_revision_list, rollout_reason = self._parse_args(args)
+
+        if (not svn_revision_list or not rollout_reason):
+            # return is equivalent to an irc().post(), but makes for easier unit testing.
+            return "%s: Usage: rollout SVN_REVISION [SVN_REVISIONS] REASON" % nick
+
+        revision_urls_string = join_with_separators([urls.view_revision_url(revision) for revision in svn_revision_list])
+        tool.irc().post("%s: Preparing rollout for %s ..." % (nick, revision_urls_string))
+
+        self._update_working_copy(tool)
+
+        # FIXME: IRCCommand should bind to a tool and have a self._tool like Command objects do.
+        # Likewise we should probably have a self._sheriff.
+        nicks_string = self._nicks_string(tool, sheriff, nick, svn_revision_list)
+
+        try:
+            complete_reason = "%s (Requested by %s on %s)." % (
+                rollout_reason, nick, config_irc.channel)
+            bug_id = sheriff.post_rollout_patch(svn_revision_list, complete_reason)
+            bug_url = tool.bugs.bug_url_for_bug_id(bug_id)
+            tool.irc().post("%s: Created rollout: %s" % (nicks_string, bug_url))
+        except ScriptError, e:
+            tool.irc().post("%s: Failed to create rollout patch:" % nicks_string)
+            _post_error_and_check_for_bug_url(tool, nicks_string, e)
+
+
+class RollChromiumDEPS(IRCCommand):
+    def _parse_args(self, args):
+        if not args:
+            return
+        revision = args[0].lstrip("r")
+        if not revision.isdigit():
+            return
+        return revision
+
+    def execute(self, nick, args, tool, sheriff):
+        revision = self._parse_args(args)
+
+        roll_target = "r%s" % revision if revision else "last-known good revision"
+        tool.irc().post("%s: Rolling Chromium DEPS to %s" % (nick, roll_target))
+
+        try:
+            bug_id = sheriff.post_chromium_deps_roll(revision, roll_target)
+            bug_url = tool.bugs.bug_url_for_bug_id(bug_id)
+            tool.irc().post("%s: Created DEPS roll: %s" % (nick, bug_url))
+        except ScriptError, e:
+            match = re.search(r"Current Chromium DEPS revision \d+ is newer than \d+\.", e.output)
+            if match:
+                tool.irc().post("%s: %s" % (nick, match.group(0)))
+                return
+            tool.irc().post("%s: Failed to create DEPS roll:" % nick)
+            _post_error_and_check_for_bug_url(tool, nick, e)
+
+
+class Help(IRCCommand):
+    def execute(self, nick, args, tool, sheriff):
+        return "%s: Available commands: %s" % (nick, ", ".join(sorted(visible_commands.keys())))
+
+
+class Hi(IRCCommand):
+    def execute(self, nick, args, tool, sheriff):
+        quips = tool.bugs.quips()
+        quips.append('"Only you can prevent forest fires." -- Smokey the Bear')
+        return random.choice(quips)
+
+
+class Whois(IRCCommand):
+    def _nick_or_full_record(self, contributor):
+        if contributor.irc_nicknames:
+            return ', '.join(contributor.irc_nicknames)
+        return unicode(contributor)
+
+    def execute(self, nick, args, tool, sheriff):
+        if len(args) != 1:
+            return "%s: Usage: whois SEARCH_STRING" % nick
+        search_string = args[0]
+        # FIXME: We should get the ContributorList off the tool somewhere.
+        contributors = CommitterList().contributors_by_search_string(search_string)
+        if not contributors:
+            return "%s: Sorry, I don't know any contributors matching '%s'." % (nick, search_string)
+        if len(contributors) > 5:
+            return "%s: More than 5 contributors match '%s', could you be more specific?" % (nick, search_string)
+        if len(contributors) == 1:
+            contributor = contributors[0]
+            if not contributor.irc_nicknames:
+                return "%s: %s hasn't told me their nick. Boo hoo :-(" % (nick, contributor)
+            if contributor.emails and search_string.lower() not in map(lambda email: email.lower(), contributor.emails):
+                formattedEmails = ', '.join(contributor.emails)
+                return "%s: %s is %s (%s). Why do you ask?" % (nick, search_string, self._nick_or_full_record(contributor), formattedEmails)
+            else:
+                return "%s: %s is %s. Why do you ask?" % (nick, search_string, self._nick_or_full_record(contributor))
+        contributor_nicks = map(self._nick_or_full_record, contributors)
+        contributors_string = join_with_separators(contributor_nicks, only_two_separator=" or ", last_separator=', or ')
+        return "%s: I'm not sure who you mean?  %s could be '%s'." % (nick, contributors_string, search_string)
+
+
+class CreateBug(IRCCommand):
+    def execute(self, nick, args, tool, sheriff):
+        if not args:
+            return "%s: Usage: create-bug BUG_TITLE" % nick
+
+        bug_title = " ".join(args)
+        bug_description = "%s\nRequested by %s on %s." % (bug_title, nick, config_irc.channel)
+
+        # There happens to be a committers list hung off of Bugzilla, so
+        # re-using that one makes things easiest for now.
+        requester = tool.bugs.committers.contributor_by_irc_nickname(nick)
+        requester_email = requester.bugzilla_email() if requester else None
+
+        try:
+            bug_id = tool.bugs.create_bug(bug_title, bug_description, cc=requester_email, assignee=requester_email)
+            bug_url = tool.bugs.bug_url_for_bug_id(bug_id)
+            return "%s: Created bug: %s" % (nick, bug_url)
+        except Exception, e:
+            return "%s: Failed to create bug:\n%s" % (nick, e)
+
+
+# FIXME: Lame.  We should have an auto-registering CommandCenter.
+visible_commands = {
+    "help": Help,
+    "hi": Hi,
+    "restart": Restart,
+    "rollout": Rollout,
+    "whois": Whois,
+    "create-bug": CreateBug,
+    "roll-chromium-deps": RollChromiumDEPS,
+}
+
+# Add revert as an "easter egg" command. Why?
+# revert is the same as rollout and it would be confusing to list both when
+# they do the same thing. However, this command is a very natural thing for
+# people to use and it seems silly to have them hunt around for "rollout" instead.
+commands = visible_commands.copy()
+commands["revert"] = Rollout
diff --git a/Tools/Scripts/webkitpy/tool/bot/irc_command_unittest.py b/Tools/Scripts/webkitpy/tool/bot/irc_command_unittest.py
new file mode 100644
index 0000000..4dec669
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/bot/irc_command_unittest.py
@@ -0,0 +1,117 @@
+# Copyright (c) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from webkitpy.common.system.outputcapture import OutputCapture
+from webkitpy.tool.bot.irc_command import *
+from webkitpy.tool.mocktool import MockTool
+from webkitpy.common.system.executive_mock import MockExecutive
+
+
+class IRCCommandTest(unittest.TestCase):
+    def test_whois(self):
+        whois = Whois()
+        self.assertEquals("tom: Usage: whois SEARCH_STRING",
+                          whois.execute("tom", [], None, None))
+        self.assertEquals("tom: Usage: whois SEARCH_STRING",
+                          whois.execute("tom", ["Adam", "Barth"], None, None))
+        self.assertEquals("tom: Sorry, I don't know any contributors matching 'unknown@example.com'.",
+                          whois.execute("tom", ["unknown@example.com"], None, None))
+        self.assertEquals("tom: tonyg@chromium.org is tonyg-cr. Why do you ask?",
+                          whois.execute("tom", ["tonyg@chromium.org"], None, None))
+        self.assertEquals("tom: TonyG@Chromium.org is tonyg-cr. Why do you ask?",
+                          whois.execute("tom", ["TonyG@Chromium.org"], None, None))
+        self.assertEquals("tom: rniwa is rniwa (rniwa@webkit.org). Why do you ask?",
+                          whois.execute("tom", ["rniwa"], None, None))
+        self.assertEquals("tom: lopez is xan (xan.lopez@gmail.com, xan@gnome.org, xan@webkit.org, xlopez@igalia.com). Why do you ask?",
+                          whois.execute("tom", ["lopez"], None, None))
+        self.assertEquals('tom: "Vicki Murley" <vicki@apple.com> hasn\'t told me their nick. Boo hoo :-(',
+                          whois.execute("tom", ["vicki@apple.com"], None, None))
+        self.assertEquals('tom: I\'m not sure who you mean?  gavinp or gbarra could be \'Gavin\'.',
+                          whois.execute("tom", ["Gavin"], None, None))
+        self.assertEquals('tom: More than 5 contributors match \'david\', could you be more specific?',
+                          whois.execute("tom", ["david"], None, None))
+
+    def test_create_bug(self):
+        create_bug = CreateBug()
+        self.assertEquals("tom: Usage: create-bug BUG_TITLE",
+                          create_bug.execute("tom", [], None, None))
+
+        example_args = ["sherrif-bot", "should", "have", "a", "create-bug", "command"]
+        tool = MockTool()
+
+        # MockBugzilla has a create_bug, but it logs to stderr, this avoids any logging.
+        tool.bugs.create_bug = lambda a, b, cc=None, assignee=None: 50004
+        self.assertEquals("tom: Created bug: http://example.com/50004",
+                          create_bug.execute("tom", example_args, tool, None))
+
+        def mock_create_bug(title, description, cc=None, assignee=None):
+            raise Exception("Exception from bugzilla!")
+        tool.bugs.create_bug = mock_create_bug
+        self.assertEquals("tom: Failed to create bug:\nException from bugzilla!",
+                          create_bug.execute("tom", example_args, tool, None))
+
+    def test_roll_chromium_deps(self):
+        roll = RollChromiumDEPS()
+        self.assertEquals(None, roll._parse_args([]))
+        self.assertEquals("1234", roll._parse_args(["1234"]))
+
+    def test_rollout_updates_working_copy(self):
+        rollout = Rollout()
+        tool = MockTool()
+        tool.executive = MockExecutive(should_log=True)
+        expected_stderr = "MOCK run_and_throw_if_fail: ['mock-update-webkit'], cwd=/mock-checkout\n"
+        OutputCapture().assert_outputs(self, rollout._update_working_copy, [tool], expected_stderr=expected_stderr)
+
+    def test_rollout(self):
+        rollout = Rollout()
+        self.assertEquals(([1234], "testing foo"),
+                          rollout._parse_args(["1234", "testing", "foo"]))
+
+        self.assertEquals(([554], "testing foo"),
+                          rollout._parse_args(["r554", "testing", "foo"]))
+
+        self.assertEquals(([556, 792], "testing foo"),
+                          rollout._parse_args(["r556", "792", "testing", "foo"]))
+
+        self.assertEquals(([128, 256], "testing foo"),
+                          rollout._parse_args(["r128,r256", "testing", "foo"]))
+
+        self.assertEquals(([512, 1024, 2048], "testing foo"),
+                          rollout._parse_args(["512,", "1024,2048", "testing", "foo"]))
+
+        # Test invalid argument parsing:
+        self.assertEquals((None, None), rollout._parse_args([]))
+        self.assertEquals((None, None), rollout._parse_args(["--bar", "1234"]))
+
+        # Invalid arguments result in the USAGE message.
+        self.assertEquals("tom: Usage: rollout SVN_REVISION [SVN_REVISIONS] REASON",
+                          rollout.execute("tom", [], None, None))
+
+        # FIXME: We need a better way to test IRCCommands which call tool.irc().post()
diff --git a/Tools/Scripts/webkitpy/tool/bot/ircbot.py b/Tools/Scripts/webkitpy/tool/bot/ircbot.py
new file mode 100644
index 0000000..0c45b97
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/bot/ircbot.py
@@ -0,0 +1,105 @@
+# Copyright (c) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from webkitpy.tool.bot.queueengine import TerminateQueue
+from webkitpy.tool.bot.irc_command import IRCCommand
+from webkitpy.common.net.irc.ircbot import IRCBotDelegate
+from webkitpy.common.thread.threadedmessagequeue import ThreadedMessageQueue
+
+
+class _IRCThreadTearoff(IRCBotDelegate):
+    def __init__(self, name, password, message_queue, wakeup_event):
+        self._name = name
+        self._password = password
+        self._message_queue = message_queue
+        self._wakeup_event = wakeup_event
+
+    # IRCBotDelegate methods
+
+    def irc_message_received(self, nick, message):
+        self._message_queue.post([nick, message])
+        self._wakeup_event.set()
+
+    def irc_nickname(self):
+        return self._name
+
+    def irc_password(self):
+        return self._password
+
+
+class Eliza(IRCCommand):
+    therapist = None
+
+    def __init__(self):
+        if not self.therapist:
+            import webkitpy.thirdparty.autoinstalled.eliza as eliza
+            Eliza.therapist = eliza.eliza()
+
+    def execute(self, nick, args, tool, sheriff):
+        return "%s: %s" % (nick, self.therapist.respond(" ".join(args)))
+
+
+class IRCBot(object):
+    def __init__(self, name, tool, agent, commands):
+        self._name = name
+        self._tool = tool
+        self._agent = agent
+        self._message_queue = ThreadedMessageQueue()
+        self._commands = commands
+
+    def irc_delegate(self):
+        return _IRCThreadTearoff(self._name, self._tool.irc_password,
+            self._message_queue, self._tool.wakeup_event)
+
+    def _parse_command_and_args(self, request):
+        tokenized_request = request.strip().split(" ")
+        command = self._commands.get(tokenized_request[0])
+        args = tokenized_request[1:]
+        if not command:
+            # Give the peoples someone to talk with.
+            command = Eliza
+            args = tokenized_request
+        return (command, args)
+
+    def process_message(self, requester_nick, request):
+        command, args = self._parse_command_and_args(request)
+        try:
+            response = command().execute(requester_nick, args, self._tool, self._agent)
+            if response:
+                self._tool.irc().post(response)
+        except TerminateQueue:
+            raise
+        # This will catch everything else. SystemExit and KeyboardInterrupt are not subclasses of Exception, so we won't catch those.
+        except Exception, e:
+            self._tool.irc().post("Exception executing command: %s" % e)
+
+    def process_pending_messages(self):
+        (messages, is_running) = self._message_queue.take_all()
+        for message in messages:
+            (nick, request) = message
+            self.process_message(nick, request)
diff --git a/Tools/Scripts/webkitpy/tool/bot/ircbot_unittest.py b/Tools/Scripts/webkitpy/tool/bot/ircbot_unittest.py
new file mode 100644
index 0000000..ce9a76b
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/bot/ircbot_unittest.py
@@ -0,0 +1,163 @@
+# Copyright (c) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+import random
+
+from webkitpy.common.system.outputcapture import OutputCapture
+from webkitpy.tool.bot import irc_command
+from webkitpy.tool.bot.queueengine import TerminateQueue
+from webkitpy.tool.bot.sheriff import Sheriff
+from webkitpy.tool.bot.ircbot import IRCBot
+from webkitpy.tool.bot.ircbot import Eliza
+from webkitpy.tool.bot.sheriff_unittest import MockSheriffBot
+from webkitpy.tool.mocktool import MockTool
+
+
+def run(message):
+    tool = MockTool()
+    tool.ensure_irc_connected(None)
+    bot = IRCBot("sheriffbot", tool, Sheriff(tool, MockSheriffBot()), irc_command.commands)
+    bot._message_queue.post(["mock_nick", message])
+    bot.process_pending_messages()
+
+
+class IRCBotTest(unittest.TestCase):
+    def test_eliza(self):
+        eliza = Eliza()
+        eliza.execute("tom", "hi", None, None)
+        eliza.execute("tom", "bye", None, None)
+
+    def test_parse_command_and_args(self):
+        tool = MockTool()
+        bot = IRCBot("sheriffbot", tool, Sheriff(tool, MockSheriffBot()), irc_command.commands)
+        self.assertEqual(bot._parse_command_and_args(""), (Eliza, [""]))
+        self.assertEqual(bot._parse_command_and_args("   "), (Eliza, [""]))
+        self.assertEqual(bot._parse_command_and_args(" hi "), (irc_command.Hi, []))
+        self.assertEqual(bot._parse_command_and_args(" hi there "), (irc_command.Hi, ["there"]))
+
+    def test_exception_during_command(self):
+        tool = MockTool()
+        tool.ensure_irc_connected(None)
+        bot = IRCBot("sheriffbot", tool, Sheriff(tool, MockSheriffBot()), irc_command.commands)
+
+        class CommandWithException(object):
+            def execute(self, nick, args, tool, sheriff):
+                raise Exception("mock_exception")
+
+        bot._parse_command_and_args = lambda request: (CommandWithException, [])
+        expected_stderr = 'MOCK: irc.post: Exception executing command: mock_exception\n'
+        OutputCapture().assert_outputs(self, bot.process_message, args=["mock_nick", "ignored message"], expected_stderr=expected_stderr)
+
+        class CommandWithException(object):
+            def execute(self, nick, args, tool, sheriff):
+                raise KeyboardInterrupt()
+
+        bot._parse_command_and_args = lambda request: (CommandWithException, [])
+        # KeyboardInterrupt and SystemExit are not subclasses of Exception and thus correctly will not be caught.
+        OutputCapture().assert_outputs(self, bot.process_message, args=["mock_nick", "ignored message"], expected_exception=KeyboardInterrupt)
+
+    def test_hi(self):
+        random.seed(23324)
+        expected_stderr = 'MOCK: irc.post: "Only you can prevent forest fires." -- Smokey the Bear\n'
+        OutputCapture().assert_outputs(self, run, args=["hi"], expected_stderr=expected_stderr)
+
+    def test_help(self):
+        expected_stderr = "MOCK: irc.post: mock_nick: Available commands: create-bug, help, hi, restart, roll-chromium-deps, rollout, whois\n"
+        OutputCapture().assert_outputs(self, run, args=["help"], expected_stderr=expected_stderr)
+
+    def test_restart(self):
+        expected_stderr = "MOCK: irc.post: Restarting...\n"
+        OutputCapture().assert_outputs(self, run, args=["restart"], expected_stderr=expected_stderr, expected_exception=TerminateQueue)
+
+    def test_rollout(self):
+        expected_stderr = "MOCK: irc.post: mock_nick: Preparing rollout for http://trac.webkit.org/changeset/21654 ...\nMOCK: irc.post: mock_nick, abarth, darin, eseidel: Created rollout: http://example.com/36936\n"
+        OutputCapture().assert_outputs(self, run, args=["rollout 21654 This patch broke the world"], expected_stderr=expected_stderr)
+
+    def test_revert(self):
+        expected_stderr = "MOCK: irc.post: mock_nick: Preparing rollout for http://trac.webkit.org/changeset/21654 ...\nMOCK: irc.post: mock_nick, abarth, darin, eseidel: Created rollout: http://example.com/36936\n"
+        OutputCapture().assert_outputs(self, run, args=["revert 21654 This patch broke the world"], expected_stderr=expected_stderr)
+
+    def test_roll_chromium_deps(self):
+        expected_stderr = "MOCK: irc.post: mock_nick: Rolling Chromium DEPS to r21654\nMOCK: irc.post: mock_nick: Created DEPS roll: http://example.com/36936\n"
+        OutputCapture().assert_outputs(self, run, args=["roll-chromium-deps 21654"], expected_stderr=expected_stderr)
+
+    def test_roll_chromium_deps_to_lkgr(self):
+        expected_stderr = "MOCK: irc.post: mock_nick: Rolling Chromium DEPS to last-known good revision\nMOCK: irc.post: mock_nick: Created DEPS roll: http://example.com/36936\n"
+        OutputCapture().assert_outputs(self, run, args=["roll-chromium-deps"], expected_stderr=expected_stderr)
+
+    def test_multi_rollout(self):
+        expected_stderr = "MOCK: irc.post: mock_nick: Preparing rollout for http://trac.webkit.org/changeset/21654, http://trac.webkit.org/changeset/21655, and http://trac.webkit.org/changeset/21656 ...\nMOCK: irc.post: mock_nick, abarth, darin, eseidel: Created rollout: http://example.com/36936\n"
+        OutputCapture().assert_outputs(self, run, args=["rollout 21654 21655 21656 This 21654 patch broke the world"], expected_stderr=expected_stderr)
+
+    def test_rollout_with_r_in_svn_revision(self):
+        expected_stderr = "MOCK: irc.post: mock_nick: Preparing rollout for http://trac.webkit.org/changeset/21654 ...\nMOCK: irc.post: mock_nick, abarth, darin, eseidel: Created rollout: http://example.com/36936\n"
+        OutputCapture().assert_outputs(self, run, args=["rollout r21654 This patch broke the world"], expected_stderr=expected_stderr)
+
+    def test_multi_rollout_with_r_in_svn_revision(self):
+        expected_stderr = "MOCK: irc.post: mock_nick: Preparing rollout for http://trac.webkit.org/changeset/21654, http://trac.webkit.org/changeset/21655, and http://trac.webkit.org/changeset/21656 ...\nMOCK: irc.post: mock_nick, abarth, darin, eseidel: Created rollout: http://example.com/36936\n"
+        OutputCapture().assert_outputs(self, run, args=["rollout r21654 21655 r21656 This r21654 patch broke the world"], expected_stderr=expected_stderr)
+
+    def test_rollout_bananas(self):
+        expected_stderr = "MOCK: irc.post: mock_nick: Usage: rollout SVN_REVISION [SVN_REVISIONS] REASON\n"
+        OutputCapture().assert_outputs(self, run, args=["rollout bananas"], expected_stderr=expected_stderr)
+
+    def test_rollout_invalidate_revision(self):
+        # When folks pass junk arguments, we should just spit the usage back at them.
+        expected_stderr = "MOCK: irc.post: mock_nick: Usage: rollout SVN_REVISION [SVN_REVISIONS] REASON\n"
+        OutputCapture().assert_outputs(self, run,
+                                       args=["rollout --component=Tools 21654"],
+                                       expected_stderr=expected_stderr)
+
+    def test_rollout_invalidate_reason(self):
+        # FIXME: I'm slightly confused as to why this doesn't return the USAGE message.
+        expected_stderr = """MOCK: irc.post: mock_nick: Preparing rollout for http://trac.webkit.org/changeset/21654 ...
+MOCK: irc.post: mock_nick, abarth, darin, eseidel: Failed to create rollout patch:
+MOCK: irc.post: The rollout reason may not begin with - (\"-bad (Requested by mock_nick on #webkit).\").
+"""
+        OutputCapture().assert_outputs(self, run,
+                                       args=["rollout 21654 -bad"],
+                                       expected_stderr=expected_stderr)
+
+    def test_multi_rollout_invalidate_reason(self):
+        expected_stderr = """MOCK: irc.post: mock_nick: Preparing rollout for http://trac.webkit.org/changeset/21654, http://trac.webkit.org/changeset/21655, and http://trac.webkit.org/changeset/21656 ...
+MOCK: irc.post: mock_nick, abarth, darin, eseidel: Failed to create rollout patch:
+MOCK: irc.post: The rollout reason may not begin with - (\"-bad (Requested by mock_nick on #webkit).\").
+"""
+        OutputCapture().assert_outputs(self, run,
+                                       args=["rollout "
+                                             "21654 21655 r21656 -bad"],
+                                       expected_stderr=expected_stderr)
+
+    def test_rollout_no_reason(self):
+        expected_stderr = "MOCK: irc.post: mock_nick: Usage: rollout SVN_REVISION [SVN_REVISIONS] REASON\n"
+        OutputCapture().assert_outputs(self, run, args=["rollout 21654"], expected_stderr=expected_stderr)
+
+    def test_multi_rollout_no_reason(self):
+        expected_stderr = "MOCK: irc.post: mock_nick: Usage: rollout SVN_REVISION [SVN_REVISIONS] REASON\n"
+        OutputCapture().assert_outputs(self, run, args=["rollout 21654 21655 r21656"], expected_stderr=expected_stderr)
diff --git a/Tools/Scripts/webkitpy/tool/bot/layouttestresultsreader.py b/Tools/Scripts/webkitpy/tool/bot/layouttestresultsreader.py
new file mode 100644
index 0000000..94a70b2
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/bot/layouttestresultsreader.py
@@ -0,0 +1,101 @@
+# Copyright (c) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from webkitpy.common.net.layouttestresults import LayoutTestResults
+from webkitpy.common.net.unittestresults import UnitTestResults
+from webkitpy.common.system.deprecated_logging import error, log
+from webkitpy.tool.steps.runtests import RunTests
+
+
+class LayoutTestResultsReader(object):
+    def __init__(self, tool, archive_directory):
+        self._tool = tool
+        self._archive_directory = archive_directory
+
+    # FIXME: This exists for mocking, but should instead be mocked via
+    # tool.filesystem.read_text_file.  They have different error handling at the moment.
+    def _read_file_contents(self, path):
+        try:
+            return self._tool.filesystem.read_text_file(path)
+        except IOError, e:  # File does not exist or can't be read.
+            return None
+
+    # FIXME: This logic should move to the port object.
+    def _create_layout_test_results(self):
+        results_path = self._tool.port().layout_tests_results_path()
+        results_html = self._read_file_contents(results_path)
+        if not results_html:
+            return None
+        return LayoutTestResults.results_from_string(results_html)
+
+    def _create_unit_test_results(self):
+        results_path = self._tool.port().unit_tests_results_path()
+        if not results_path:
+            return None
+        results_xml = self._read_file_contents(results_path)
+        if not results_xml:
+            return None
+        return UnitTestResults.results_from_string(results_xml)
+
+    def results(self):
+        layout_test_results = self._create_layout_test_results()
+        unit_test_results = self._create_unit_test_results()
+        if layout_test_results:
+            # FIXME: We should not have to set failure_limit_count, but we
+            # do until run-webkit-tests can be updated save off the value
+            # of --exit-after-N-failures in results.html/results.json.
+            # https://bugs.webkit.org/show_bug.cgi?id=58481
+            layout_test_results.set_failure_limit_count(RunTests.NON_INTERACTIVE_FAILURE_LIMIT_COUNT)
+            if unit_test_results:
+                layout_test_results.add_unit_test_failures(unit_test_results)
+        return layout_test_results
+
+    def _results_directory(self):
+        results_path = self._tool.port().layout_tests_results_path()
+        # FIXME: This is wrong in two ways:
+        # 1. It assumes that results.html is at the top level of the results tree.
+        # 2. This uses the "old" ports.py infrastructure instead of the new layout_tests/port
+        # which will not support Chromium.  However the new arch doesn't work with old-run-webkit-tests
+        # so we have to use this for now.
+        return self._tool.filesystem.dirname(results_path)
+
+    def archive(self, patch):
+        results_directory = self._results_directory()
+        results_name, _ = self._tool.filesystem.splitext(self._tool.filesystem.basename(results_directory))
+        # Note: We name the zip with the bug_id instead of patch_id to match work_item_log_path().
+        zip_path = self._tool.workspace.find_unused_filename(self._archive_directory, "%s-%s" % (patch.bug_id(), results_name), "zip")
+        if not zip_path:
+            return None
+        if not self._tool.filesystem.isdir(results_directory):
+            log("%s does not exist, not archiving." % results_directory)
+            return None
+        archive = self._tool.workspace.create_zip(zip_path, results_directory)
+        # Remove the results directory to prevent http logs, etc. from getting huge between runs.
+        # We could have create_zip remove the original, but this is more explicit.
+        self._tool.filesystem.rmtree(results_directory)
+        return archive
diff --git a/Tools/Scripts/webkitpy/tool/bot/layouttestresultsreader_unittest.py b/Tools/Scripts/webkitpy/tool/bot/layouttestresultsreader_unittest.py
new file mode 100644
index 0000000..0eb3482
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/bot/layouttestresultsreader_unittest.py
@@ -0,0 +1,105 @@
+# Copyright (c) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from webkitpy.common.system.filesystem_mock import MockFileSystem
+from webkitpy.common.system.outputcapture import OutputCapture
+from webkitpy.common.net.layouttestresults import LayoutTestResults
+from webkitpy.tool.bot.layouttestresultsreader import *
+from webkitpy.tool.mocktool import MockTool
+
+
+class LayoutTestResultsReaderTest(unittest.TestCase):
+    def test_missing_layout_test_results(self):
+        tool = MockTool()
+        reader = LayoutTestResultsReader(tool, "/var/logs")
+        layout_tests_results_path = '/mock-results/full_results.json'
+        unit_tests_results_path = '/mock-results/webkit_unit_tests_output.xml'
+        tool.filesystem = MockFileSystem({layout_tests_results_path: None,
+                                          unit_tests_results_path: None})
+        # Make sure that our filesystem mock functions as we expect.
+        self.assertRaises(IOError, tool.filesystem.read_text_file, layout_tests_results_path)
+        self.assertRaises(IOError, tool.filesystem.read_text_file, unit_tests_results_path)
+        # layout_test_results shouldn't raise even if the results.html file is missing.
+        self.assertEquals(reader.results(), None)
+
+    def test_create_unit_test_results(self):
+        tool = MockTool()
+        reader = LayoutTestResultsReader(tool, "/var/logs")
+        unit_tests_results_path = '/mock-results/webkit_unit_tests_output.xml'
+        no_failures_xml = """<?xml version="1.0" encoding="UTF-8"?>
+<testsuites tests="3" failures="0" disabled="0" errors="0" time="11.35" name="AllTests">
+  <testsuite name="RenderTableCellDeathTest" tests="3" failures="0" disabled="0" errors="0" time="0.677">
+    <testcase name="CanSetColumn" status="run" time="0.168" classname="RenderTableCellDeathTest" />
+    <testcase name="CrashIfSettingUnsetColumnIndex" status="run" time="0.129" classname="RenderTableCellDeathTest" />
+    <testcase name="CrashIfSettingUnsetRowIndex" status="run" time="0.123" classname="RenderTableCellDeathTest" />
+  </testsuite>
+</testsuites>"""
+        tool.filesystem = MockFileSystem({unit_tests_results_path: no_failures_xml})
+        self.assertEquals(reader._create_unit_test_results(), [])
+
+    def test_missing_unit_test_results_path(self):
+        tool = MockTool()
+        tool.port().unit_tests_results_path = lambda: None
+        reader = LayoutTestResultsReader(tool, "/var/logs")
+        reader._create_layout_test_results = lambda: LayoutTestResults([])
+        # layout_test_results shouldn't raise even if the unit tests xml file is missing.
+        self.assertNotEquals(reader.results(), None)
+        self.assertEquals(reader.results().failing_tests(), [])
+
+
+    def test_layout_test_results(self):
+        reader = LayoutTestResultsReader(MockTool(), "/var/logs")
+        reader._read_file_contents = lambda path: None
+        self.assertEquals(reader.results(), None)
+        reader._read_file_contents = lambda path: ""
+        self.assertEquals(reader.results(), None)
+        reader._create_layout_test_results = lambda: LayoutTestResults([])
+        results = reader.results()
+        self.assertNotEquals(results, None)
+        self.assertEquals(results.failure_limit_count(), 30)  # This value matches RunTests.NON_INTERACTIVE_FAILURE_LIMIT_COUNT
+
+    def test_archive_last_layout_test_results(self):
+        tool = MockTool()
+        reader = LayoutTestResultsReader(tool, "/var/logs")
+        patch = tool.bugs.fetch_attachment(10001)
+        tool.filesystem = MockFileSystem()
+        # Should fail because the results_directory does not exist.
+        expected_stderr = "/mock-results does not exist, not archiving.\n"
+        archive = OutputCapture().assert_outputs(self, reader.archive, [patch], expected_stderr=expected_stderr)
+        self.assertEqual(archive, None)
+
+        results_directory = "/mock-results"
+        # Sanity check what we assume our mock results directory is.
+        self.assertEqual(reader._results_directory(), results_directory)
+        tool.filesystem.maybe_make_directory(results_directory)
+        self.assertTrue(tool.filesystem.exists(results_directory))
+
+        self.assertNotEqual(reader.archive(patch), None)
+        self.assertFalse(tool.filesystem.exists(results_directory))
diff --git a/Tools/Scripts/webkitpy/tool/bot/patchanalysistask.py b/Tools/Scripts/webkitpy/tool/bot/patchanalysistask.py
new file mode 100644
index 0000000..cde1c84
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/bot/patchanalysistask.py
@@ -0,0 +1,253 @@
+# Copyright (c) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from webkitpy.common.system.executive import ScriptError
+from webkitpy.common.net.layouttestresults import LayoutTestResults
+
+
+class UnableToApplyPatch(Exception):
+    def __init__(self, patch):
+        Exception.__init__(self)
+        self.patch = patch
+
+
+class PatchAnalysisTaskDelegate(object):
+    def parent_command(self):
+        raise NotImplementedError("subclasses must implement")
+
+    def run_command(self, command):
+        raise NotImplementedError("subclasses must implement")
+
+    def command_passed(self, message, patch):
+        raise NotImplementedError("subclasses must implement")
+
+    def command_failed(self, message, script_error, patch):
+        raise NotImplementedError("subclasses must implement")
+
+    def refetch_patch(self, patch):
+        raise NotImplementedError("subclasses must implement")
+
+    def expected_failures(self):
+        raise NotImplementedError("subclasses must implement")
+
+    def test_results(self):
+        raise NotImplementedError("subclasses must implement")
+
+    def archive_last_test_results(self, patch):
+        raise NotImplementedError("subclasses must implement")
+
+    def build_style(self):
+        raise NotImplementedError("subclasses must implement")
+
+    # We could make results_archive optional, but for now it's required.
+    def report_flaky_tests(self, patch, flaky_tests, results_archive):
+        raise NotImplementedError("subclasses must implement")
+
+
+class PatchAnalysisTask(object):
+    def __init__(self, delegate, patch):
+        self._delegate = delegate
+        self._patch = patch
+        self._script_error = None
+        self._results_archive_from_patch_test_run = None
+        self._results_from_patch_test_run = None
+        self._expected_failures = delegate.expected_failures()
+
+    def _run_command(self, command, success_message, failure_message):
+        try:
+            self._delegate.run_command(command)
+            self._delegate.command_passed(success_message, patch=self._patch)
+            return True
+        except ScriptError, e:
+            self._script_error = e
+            self.failure_status_id = self._delegate.command_failed(failure_message, script_error=self._script_error, patch=self._patch)
+            return False
+
+    def _clean(self):
+        return self._run_command([
+            "clean",
+        ],
+        "Cleaned working directory",
+        "Unable to clean working directory")
+
+    def _update(self):
+        # FIXME: Ideally the status server log message should include which revision we updated to.
+        return self._run_command([
+            "update",
+        ],
+        "Updated working directory",
+        "Unable to update working directory")
+
+    def _apply(self):
+        return self._run_command([
+            "apply-attachment",
+            "--no-update",
+            "--non-interactive",
+            self._patch.id(),
+        ],
+        "Applied patch",
+        "Patch does not apply")
+
+    def _build(self):
+        return self._run_command([
+            "build",
+            "--no-clean",
+            "--no-update",
+            "--build-style=%s" % self._delegate.build_style(),
+        ],
+        "Built patch",
+        "Patch does not build")
+
+    def _build_without_patch(self):
+        return self._run_command([
+            "build",
+            "--force-clean",
+            "--no-update",
+            "--build-style=%s" % self._delegate.build_style(),
+        ],
+        "Able to build without patch",
+        "Unable to build without patch")
+
+    def _test(self):
+        return self._run_command([
+            "build-and-test",
+            "--no-clean",
+            "--no-update",
+            # Notice that we don't pass --build, which means we won't build!
+            "--test",
+            "--non-interactive",
+        ],
+        "Passed tests",
+        "Patch does not pass tests")
+
+    def _build_and_test_without_patch(self):
+        return self._run_command([
+            "build-and-test",
+            "--force-clean",
+            "--no-update",
+            "--build",
+            "--test",
+            "--non-interactive",
+        ],
+        "Able to pass tests without patch",
+        "Unable to pass tests without patch (tree is red?)")
+
+    def _land(self):
+        # Unclear if this should pass --quiet or not.  If --parent-command always does the reporting, then it should.
+        return self._run_command([
+            "land-attachment",
+            "--force-clean",
+            "--non-interactive",
+            "--parent-command=" + self._delegate.parent_command(),
+            self._patch.id(),
+        ],
+        "Landed patch",
+        "Unable to land patch")
+
+    def _report_flaky_tests(self, flaky_test_results, results_archive):
+        self._delegate.report_flaky_tests(self._patch, flaky_test_results, results_archive)
+
+    def _results_failed_different_tests(self, first, second):
+        first_failing_tests = [] if not first else first.failing_tests()
+        second_failing_tests = [] if not second else second.failing_tests()
+        return first_failing_tests != second_failing_tests
+
+    def _test_patch(self):
+        if self._test():
+            return True
+
+        # Note: archive_last_test_results deletes the results directory, making these calls order-sensitve.
+        # We could remove this dependency by building the test_results from the archive.
+        first_results = self._delegate.test_results()
+        first_results_archive = self._delegate.archive_last_test_results(self._patch)
+        first_script_error = self._script_error
+        first_failure_status_id = self.failure_status_id
+
+        if self._expected_failures.failures_were_expected(first_results):
+            return True
+
+        if self._test():
+            # Only report flaky tests if we were successful at parsing results.html and archiving results.
+            if first_results and first_results_archive:
+                self._report_flaky_tests(first_results.failing_test_results(), first_results_archive)
+            return True
+
+        second_results = self._delegate.test_results()
+        if self._results_failed_different_tests(first_results, second_results):
+            # We could report flaky tests here, but we would need to be careful
+            # to use similar checks to ExpectedFailures._can_trust_results
+            # to make sure we don't report constant failures as flakes when
+            # we happen to hit the --exit-after-N-failures limit.
+            # See https://bugs.webkit.org/show_bug.cgi?id=51272
+            return False
+
+        # Archive (and remove) second results so test_results() after
+        # build_and_test_without_patch won't use second results instead of the clean-tree results.
+        second_results_archive = self._delegate.archive_last_test_results(self._patch)
+
+        if self._build_and_test_without_patch():
+            # The error from the previous ._test() run is real, report it.
+            return self.report_failure(first_results_archive, first_results, first_script_error)
+
+        clean_tree_results = self._delegate.test_results()
+        self._expected_failures.update(clean_tree_results)
+
+        # Re-check if the original results are now to be expected to avoid a full re-try.
+        if self._expected_failures.failures_were_expected(first_results):
+            return True
+
+        # Now that we have updated information about failing tests with a clean checkout, we can
+        # tell if our original failures were unexpected and fail the patch if necessary.
+        if self._expected_failures.unexpected_failures_observed(first_results):
+            self.failure_status_id = first_failure_status_id
+            return self.report_failure(first_results_archive, first_results, first_script_error)
+
+        # We don't know what's going on.  The tree is likely very red (beyond our layout-test-results
+        # failure limit), just keep retrying the patch. until someone fixes the tree.
+        return False
+
+    def results_archive_from_patch_test_run(self, patch):
+        assert(self._patch.id() == patch.id())  # PatchAnalysisTask is not currently re-useable.
+        return self._results_archive_from_patch_test_run
+
+    def results_from_patch_test_run(self, patch):
+        assert(self._patch.id() == patch.id())  # PatchAnalysisTask is not currently re-useable.
+        return self._results_from_patch_test_run
+
+    def report_failure(self, results_archive=None, results=None, script_error=None):
+        if not self.validate():
+            return False
+        self._results_archive_from_patch_test_run = results_archive
+        self._results_from_patch_test_run = results
+        raise script_error or self._script_error
+
+    def validate(self):
+        raise NotImplementedError("subclasses must implement")
+
+    def run(self):
+        raise NotImplementedError("subclasses must implement")
diff --git a/Tools/Scripts/webkitpy/tool/bot/queueengine.py b/Tools/Scripts/webkitpy/tool/bot/queueengine.py
new file mode 100644
index 0000000..1d75359
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/bot/queueengine.py
@@ -0,0 +1,159 @@
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Copyright (c) 2009 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+# 
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+# 
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import sys
+import traceback
+
+from datetime import datetime, timedelta
+
+from webkitpy.common.system.executive import ScriptError
+from webkitpy.common.system.deprecated_logging import log, OutputTee
+
+
+# FIXME: This will be caught by "except Exception:" blocks, we should consider
+# making this inherit from SystemExit instead (or BaseException, except that's not recommended).
+class TerminateQueue(Exception):
+    pass
+
+
+class QueueEngineDelegate:
+    def queue_log_path(self):
+        raise NotImplementedError, "subclasses must implement"
+
+    def work_item_log_path(self, work_item):
+        raise NotImplementedError, "subclasses must implement"
+
+    def begin_work_queue(self):
+        raise NotImplementedError, "subclasses must implement"
+
+    def should_continue_work_queue(self):
+        raise NotImplementedError, "subclasses must implement"
+
+    def next_work_item(self):
+        raise NotImplementedError, "subclasses must implement"
+
+    def process_work_item(self, work_item):
+        raise NotImplementedError, "subclasses must implement"
+
+    def handle_unexpected_error(self, work_item, message):
+        raise NotImplementedError, "subclasses must implement"
+
+
+class QueueEngine:
+    def __init__(self, name, delegate, wakeup_event):
+        self._name = name
+        self._delegate = delegate
+        self._wakeup_event = wakeup_event
+        self._output_tee = OutputTee()
+
+    log_date_format = "%Y-%m-%d %H:%M:%S"
+    sleep_duration_text = "2 mins"  # This could be generated from seconds_to_sleep
+    seconds_to_sleep = 120
+    handled_error_code = 2
+
+    # Child processes exit with a special code to the parent queue process can detect the error was handled.
+    @classmethod
+    def exit_after_handled_error(cls, error):
+        log(error)
+        sys.exit(cls.handled_error_code)
+
+    def run(self):
+        self._begin_logging()
+
+        self._delegate.begin_work_queue()
+        while (self._delegate.should_continue_work_queue()):
+            try:
+                self._ensure_work_log_closed()
+                work_item = self._delegate.next_work_item()
+                if not work_item:
+                    self._sleep("No work item.")
+                    continue
+
+                # FIXME: Work logs should not depend on bug_id specificaly.
+                #        This looks fixed, no?
+                self._open_work_log(work_item)
+                try:
+                    if not self._delegate.process_work_item(work_item):
+                        log("Unable to process work item.")
+                        continue
+                except ScriptError, e:
+                    # Use a special exit code to indicate that the error was already
+                    # handled in the child process and we should just keep looping.
+                    if e.exit_code == self.handled_error_code:
+                        continue
+                    message = "Unexpected failure when processing patch!  Please file a bug against webkit-patch.\n%s" % e.message_with_output()
+                    self._delegate.handle_unexpected_error(work_item, message)
+            except TerminateQueue, e:
+                self._stopping("TerminateQueue exception received.")
+                return 0
+            except KeyboardInterrupt, e:
+                self._stopping("User terminated queue.")
+                return 1
+            except Exception, e:
+                traceback.print_exc()
+                # Don't try tell the status bot, in case telling it causes an exception.
+                self._sleep("Exception while preparing queue")
+        self._stopping("Delegate terminated queue.")
+        return 0
+
+    def _stopping(self, message):
+        log("\n%s" % message)
+        self._delegate.stop_work_queue(message)
+        # Be careful to shut down our OutputTee or the unit tests will be unhappy.
+        self._ensure_work_log_closed()
+        self._output_tee.remove_log(self._queue_log)
+
+    def _begin_logging(self):
+        self._queue_log = self._output_tee.add_log(self._delegate.queue_log_path())
+        self._work_log = None
+
+    def _open_work_log(self, work_item):
+        work_item_log_path = self._delegate.work_item_log_path(work_item)
+        if not work_item_log_path:
+            return
+        self._work_log = self._output_tee.add_log(work_item_log_path)
+
+    def _ensure_work_log_closed(self):
+        # If we still have a bug log open, close it.
+        if self._work_log:
+            self._output_tee.remove_log(self._work_log)
+            self._work_log = None
+
+    def _now(self):
+        """Overriden by the unit tests to allow testing _sleep_message"""
+        return datetime.now()
+
+    def _sleep_message(self, message):
+        wake_time = self._now() + timedelta(seconds=self.seconds_to_sleep)
+        return "%s Sleeping until %s (%s)." % (message, wake_time.strftime(self.log_date_format), self.sleep_duration_text)
+
+    def _sleep(self, message):
+        log(self._sleep_message(message))
+        self._wakeup_event.wait(self.seconds_to_sleep)
+        self._wakeup_event.clear()
diff --git a/Tools/Scripts/webkitpy/tool/bot/queueengine_unittest.py b/Tools/Scripts/webkitpy/tool/bot/queueengine_unittest.py
new file mode 100644
index 0000000..f959ee1
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/bot/queueengine_unittest.py
@@ -0,0 +1,187 @@
+# Copyright (c) 2009 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+# 
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+# 
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import datetime
+import os
+import shutil
+import tempfile
+import threading
+import unittest
+
+from webkitpy.common.system.executive import ScriptError
+from webkitpy.common.system.outputcapture import OutputCapture
+from webkitpy.tool.bot.queueengine import QueueEngine, QueueEngineDelegate, TerminateQueue
+
+
+class LoggingDelegate(QueueEngineDelegate):
+    def __init__(self, test):
+        self._test = test
+        self._callbacks = []
+        self._run_before = False
+        self.stop_message = None
+
+    expected_callbacks = [
+        'queue_log_path',
+        'begin_work_queue',
+        'should_continue_work_queue',
+        'next_work_item',
+        'work_item_log_path',
+        'process_work_item',
+        'should_continue_work_queue',
+        'stop_work_queue',
+    ]
+
+    def record(self, method_name):
+        self._callbacks.append(method_name)
+
+    def queue_log_path(self):
+        self.record("queue_log_path")
+        return os.path.join(self._test.temp_dir, "queue_log_path")
+
+    def work_item_log_path(self, work_item):
+        self.record("work_item_log_path")
+        return os.path.join(self._test.temp_dir, "work_log_path", "%s.log" % work_item)
+
+    def begin_work_queue(self):
+        self.record("begin_work_queue")
+
+    def should_continue_work_queue(self):
+        self.record("should_continue_work_queue")
+        if not self._run_before:
+            self._run_before = True
+            return True
+        return False
+
+    def next_work_item(self):
+        self.record("next_work_item")
+        return "work_item"
+
+    def process_work_item(self, work_item):
+        self.record("process_work_item")
+        self._test.assertEquals(work_item, "work_item")
+        return True
+
+    def handle_unexpected_error(self, work_item, message):
+        self.record("handle_unexpected_error")
+        self._test.assertEquals(work_item, "work_item")
+
+    def stop_work_queue(self, message):
+        self.record("stop_work_queue")
+        self.stop_message = message
+
+
+class RaisingDelegate(LoggingDelegate):
+    def __init__(self, test, exception):
+        LoggingDelegate.__init__(self, test)
+        self._exception = exception
+
+    def process_work_item(self, work_item):
+        self.record("process_work_item")
+        raise self._exception
+
+
+class FastQueueEngine(QueueEngine):
+    def __init__(self, delegate):
+        QueueEngine.__init__(self, "fast-queue", delegate, threading.Event())
+
+    # No sleep for the wicked.
+    seconds_to_sleep = 0
+
+    def _sleep(self, message):
+        pass
+
+
+class QueueEngineTest(unittest.TestCase):
+    def test_trivial(self):
+        delegate = LoggingDelegate(self)
+        self._run_engine(delegate)
+        self.assertEquals(delegate.stop_message, "Delegate terminated queue.")
+        self.assertEquals(delegate._callbacks, LoggingDelegate.expected_callbacks)
+        self.assertTrue(os.path.exists(os.path.join(self.temp_dir, "queue_log_path")))
+        self.assertTrue(os.path.exists(os.path.join(self.temp_dir, "work_log_path", "work_item.log")))
+
+    def test_unexpected_error(self):
+        delegate = RaisingDelegate(self, ScriptError(exit_code=3))
+        self._run_engine(delegate)
+        expected_callbacks = LoggingDelegate.expected_callbacks[:]
+        work_item_index = expected_callbacks.index('process_work_item')
+        # The unexpected error should be handled right after process_work_item starts
+        # but before any other callback.  Otherwise callbacks should be normal.
+        expected_callbacks.insert(work_item_index + 1, 'handle_unexpected_error')
+        self.assertEquals(delegate._callbacks, expected_callbacks)
+
+    def test_handled_error(self):
+        delegate = RaisingDelegate(self, ScriptError(exit_code=QueueEngine.handled_error_code))
+        self._run_engine(delegate)
+        self.assertEquals(delegate._callbacks, LoggingDelegate.expected_callbacks)
+
+    def _run_engine(self, delegate, engine=None, termination_message=None):
+        if not engine:
+            engine = QueueEngine("test-queue", delegate, threading.Event())
+        if not termination_message:
+            termination_message = "Delegate terminated queue."
+        expected_stderr = "\n%s\n" % termination_message
+        OutputCapture().assert_outputs(self, engine.run, expected_stderr=expected_stderr)
+
+    def _test_terminating_queue(self, exception, termination_message):
+        work_item_index = LoggingDelegate.expected_callbacks.index('process_work_item')
+        # The terminating error should be handled right after process_work_item.
+        # There should be no other callbacks after stop_work_queue.
+        expected_callbacks = LoggingDelegate.expected_callbacks[:work_item_index + 1]
+        expected_callbacks.append("stop_work_queue")
+
+        delegate = RaisingDelegate(self, exception)
+        self._run_engine(delegate, termination_message=termination_message)
+
+        self.assertEquals(delegate._callbacks, expected_callbacks)
+        self.assertEquals(delegate.stop_message, termination_message)
+
+    def test_terminating_error(self):
+        self._test_terminating_queue(KeyboardInterrupt(), "User terminated queue.")
+        self._test_terminating_queue(TerminateQueue(), "TerminateQueue exception received.")
+
+    def test_now(self):
+        """Make sure there are no typos in the QueueEngine.now() method."""
+        engine = QueueEngine("test", None, None)
+        self.assertTrue(isinstance(engine._now(), datetime.datetime))
+
+    def test_sleep_message(self):
+        engine = QueueEngine("test", None, None)
+        engine._now = lambda: datetime.datetime(2010, 1, 1)
+        expected_sleep_message = "MESSAGE Sleeping until 2010-01-01 00:02:00 (2 mins)."
+        self.assertEqual(engine._sleep_message("MESSAGE"), expected_sleep_message)
+
+    def setUp(self):
+        self.temp_dir = tempfile.mkdtemp(suffix="work_queue_test_logs")
+
+    def tearDown(self):
+        shutil.rmtree(self.temp_dir)
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/Tools/Scripts/webkitpy/tool/bot/sheriff.py b/Tools/Scripts/webkitpy/tool/bot/sheriff.py
new file mode 100644
index 0000000..a8c928c
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/bot/sheriff.py
@@ -0,0 +1,115 @@
+# Copyright (c) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from webkitpy.common.config import urls
+from webkitpy.common.system.deprecated_logging import log
+from webkitpy.common.system.executive import ScriptError
+from webkitpy.tool.grammar import join_with_separators
+
+
+class Sheriff(object):
+    def __init__(self, tool, sheriffbot):
+        self._tool = tool
+        self._sheriffbot = sheriffbot
+
+    def responsible_nicknames_from_commit_info(self, commit_info):
+        nestedList = [party.irc_nicknames for party in commit_info.responsible_parties() if party.irc_nicknames]
+        return reduce(lambda list, childList: list + childList, nestedList)
+
+    def post_irc_warning(self, commit_info, builders):
+        irc_nicknames = sorted(self.responsible_nicknames_from_commit_info(commit_info))
+        irc_prefix = ": " if irc_nicknames else ""
+        irc_message = "%s%s%s might have broken %s" % (
+            ", ".join(irc_nicknames),
+            irc_prefix,
+            urls.view_revision_url(commit_info.revision()),
+            join_with_separators([builder.name() for builder in builders]))
+
+        self._tool.irc().post(irc_message)
+
+    def post_irc_summary(self, failure_map):
+        failing_tests = failure_map.failing_tests()
+        if not failing_tests:
+            return
+        test_list_limit = 5
+        irc_message = "New failures: %s" % ", ".join(sorted(failing_tests)[:test_list_limit])
+        failure_count = len(failing_tests)
+        if failure_count > test_list_limit:
+            irc_message += " (and %s more...)" % (failure_count - test_list_limit)
+        self._tool.irc().post(irc_message)
+
+    def post_rollout_patch(self, svn_revision_list, rollout_reason):
+        # Ensure that svn revisions are numbers (and not options to
+        # create-rollout).
+        try:
+            svn_revisions = " ".join([str(int(revision)) for revision in svn_revision_list])
+        except:
+            raise ScriptError(message="Invalid svn revision number \"%s\"."
+                              % " ".join(svn_revision_list))
+
+        if rollout_reason.startswith("-"):
+            raise ScriptError(message="The rollout reason may not begin "
+                              "with - (\"%s\")." % rollout_reason)
+
+        output = self._sheriffbot.run_webkit_patch([
+            "create-rollout",
+            "--force-clean",
+            # In principle, we should pass --non-interactive here, but it
+            # turns out that create-rollout doesn't need it yet.  We can't
+            # pass it prophylactically because we reject unrecognized command
+            # line switches.
+            "--parent-command=sheriff-bot",
+            svn_revisions,
+            rollout_reason,
+        ])
+        return urls.parse_bug_id(output)
+
+    def post_chromium_deps_roll(self, revision, revision_name):
+        args = [
+            "post-chromium-deps-roll",
+            "--force-clean",
+            "--non-interactive",
+            "--parent-command=sheriff-bot",
+        ]
+        # revision can be None, but revision_name is always something meaningful.
+        args += [revision, revision_name]
+        output = self._sheriffbot.run_webkit_patch(args)
+        return urls.parse_bug_id(output)
+
+    def post_blame_comment_on_bug(self, commit_info, builders, tests):
+        if not commit_info.bug_id():
+            return
+        comment = "%s might have broken %s" % (
+            urls.view_revision_url(commit_info.revision()),
+            join_with_separators([builder.name() for builder in builders]))
+        if tests:
+            comment += "\nThe following tests are not passing:\n"
+            comment += "\n".join(tests)
+        self._tool.bugs.post_comment_to_bug(commit_info.bug_id(),
+                                            comment,
+                                            cc=self._sheriffbot.watchers)
diff --git a/Tools/Scripts/webkitpy/tool/bot/sheriff_unittest.py b/Tools/Scripts/webkitpy/tool/bot/sheriff_unittest.py
new file mode 100644
index 0000000..3ff5082
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/bot/sheriff_unittest.py
@@ -0,0 +1,89 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#    * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from webkitpy.common.net.buildbot import Builder
+from webkitpy.common.system.outputcapture import OutputCapture
+from webkitpy.thirdparty.mock import Mock
+from webkitpy.tool.bot.sheriff import Sheriff
+from webkitpy.tool.mocktool import MockTool
+
+
+class MockSheriffBot(object):
+    name = "mock-sheriff-bot"
+    watchers = [
+        "watcher@example.com",
+    ]
+
+    def run_webkit_patch(self, args):
+        return "Created bug https://bugs.webkit.org/show_bug.cgi?id=36936\n"
+
+
+class SheriffTest(unittest.TestCase):
+    def test_post_blame_comment_on_bug(self):
+        def run():
+            sheriff = Sheriff(MockTool(), MockSheriffBot())
+            builders = [
+                Builder("Foo", None),
+                Builder("Bar", None),
+            ]
+            commit_info = Mock()
+            commit_info.bug_id = lambda: None
+            commit_info.revision = lambda: 4321
+            # Should do nothing with no bug_id
+            sheriff.post_blame_comment_on_bug(commit_info, builders, [])
+            sheriff.post_blame_comment_on_bug(commit_info, builders, ["mock-test-1", "mock-test-2"])
+            # Should try to post a comment to the bug, but MockTool.bugs does nothing.
+            commit_info.bug_id = lambda: 1234
+            sheriff.post_blame_comment_on_bug(commit_info, builders, [])
+            sheriff.post_blame_comment_on_bug(commit_info, builders, ["mock-test-1"])
+            sheriff.post_blame_comment_on_bug(commit_info, builders, ["mock-test-1", "mock-test-2"])
+
+        expected_stderr = u"""MOCK bug comment: bug_id=1234, cc=['watcher@example.com']
+--- Begin comment ---
+http://trac.webkit.org/changeset/4321 might have broken Foo and Bar
+--- End comment ---
+
+MOCK bug comment: bug_id=1234, cc=['watcher@example.com']
+--- Begin comment ---
+http://trac.webkit.org/changeset/4321 might have broken Foo and Bar
+The following tests are not passing:
+mock-test-1
+--- End comment ---
+
+MOCK bug comment: bug_id=1234, cc=['watcher@example.com']
+--- Begin comment ---
+http://trac.webkit.org/changeset/4321 might have broken Foo and Bar
+The following tests are not passing:
+mock-test-1
+mock-test-2
+--- End comment ---
+
+"""
+        OutputCapture().assert_outputs(self, run, expected_stderr=expected_stderr)
diff --git a/Tools/Scripts/webkitpy/tool/bot/stylequeuetask.py b/Tools/Scripts/webkitpy/tool/bot/stylequeuetask.py
new file mode 100644
index 0000000..01f7f72
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/bot/stylequeuetask.py
@@ -0,0 +1,75 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from webkitpy.tool.bot.patchanalysistask import PatchAnalysisTask, PatchAnalysisTaskDelegate, UnableToApplyPatch
+
+
+class StyleQueueTaskDelegate(PatchAnalysisTaskDelegate):
+    def parent_command(self):
+        return "style-queue"
+
+
+class StyleQueueTask(PatchAnalysisTask):
+    def validate(self):
+        self._patch = self._delegate.refetch_patch(self._patch)
+        if self._patch.is_obsolete():
+            return False
+        if self._patch.bug().is_closed():
+            return False
+        if self._patch.review() == "-":
+            return False
+        return True
+
+    def _check_style(self):
+        return self._run_command([
+            "check-style-local",
+            "--non-interactive",
+            "--quiet",
+        ],
+        "Style checked",
+        "Patch did not pass style check")
+
+    def _apply_watch_list(self):
+        return self._run_command([
+            "apply-watchlist-local",
+            self._patch.bug_id(),
+        ],
+        "Watchlist applied",
+        "Unabled to apply watchlist")
+
+    def run(self):
+        if not self._clean():
+            return False
+        if not self._update():
+            return False
+        if not self._apply():
+            raise UnableToApplyPatch(self._patch)
+        self._apply_watch_list()
+        if not self._check_style():
+            return self.report_failure()
+        return True
diff --git a/Tools/Scripts/webkitpy/tool/commands/__init__.py b/Tools/Scripts/webkitpy/tool/commands/__init__.py
new file mode 100644
index 0000000..4e8eb62
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/commands/__init__.py
@@ -0,0 +1,24 @@
+# Required for Python to search this directory for module files
+
+from webkitpy.tool.commands.adduserstogroups import AddUsersToGroups
+from webkitpy.tool.commands.analyzechangelog import AnalyzeChangeLog
+from webkitpy.tool.commands.applywatchlistlocal import ApplyWatchListLocal
+from webkitpy.tool.commands.bugfortest import BugForTest
+from webkitpy.tool.commands.bugsearch import BugSearch
+from webkitpy.tool.commands.chromechannels import ChromeChannels
+from webkitpy.tool.commands.download import *
+from webkitpy.tool.commands.earlywarningsystem import *
+from webkitpy.tool.commands.expectations import OptimizeExpectations
+from webkitpy.tool.commands.findusers import FindUsers
+from webkitpy.tool.commands.gardenomatic import GardenOMatic
+from webkitpy.tool.commands.openbugs import OpenBugs
+from webkitpy.tool.commands.perfalizer import Perfalizer
+from webkitpy.tool.commands.prettydiff import PrettyDiff
+from webkitpy.tool.commands.queries import *
+from webkitpy.tool.commands.queues import *
+from webkitpy.tool.commands.rebaseline import Rebaseline
+from webkitpy.tool.commands.rebaselineserver import RebaselineServer
+from webkitpy.tool.commands.roll import *
+from webkitpy.tool.commands.sheriffbot import *
+from webkitpy.tool.commands.upload import *
+from webkitpy.tool.commands.suggestnominations import *
diff --git a/Tools/Scripts/webkitpy/tool/commands/abstractlocalservercommand.py b/Tools/Scripts/webkitpy/tool/commands/abstractlocalservercommand.py
new file mode 100644
index 0000000..6c54da2
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/commands/abstractlocalservercommand.py
@@ -0,0 +1,57 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# 1.  Redistributions of source code must retain the above copyright
+#     notice, this list of conditions and the following disclaimer.
+# 2.  Redistributions in binary form must reproduce the above copyright
+#     notice, this list of conditions and the following disclaimer in the
+#     documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from optparse import make_option
+import threading
+
+from webkitpy.tool.multicommandtool import AbstractDeclarativeCommand
+
+
+class AbstractLocalServerCommand(AbstractDeclarativeCommand):
+    server = None
+    launch_path = "/"
+
+    def __init__(self):
+        options = [
+            make_option("--httpd-port", action="store", type="int", default=8127, help="Port to use for the HTTP server"),
+            make_option("--no-show-results", action="store_false", default=True, dest="show_results", help="Don't launch a browser with the rebaseline server"),
+        ]
+        AbstractDeclarativeCommand.__init__(self, options=options)
+
+    def _prepare_config(self, options, args, tool):
+        return None
+
+    def execute(self, options, args, tool):
+        config = self._prepare_config(options, args, tool)
+
+        server_url = "http://localhost:%d%s" % (options.httpd_port, self.launch_path)
+        print "Starting server at %s" % server_url
+        print "Use the 'Exit' link in the UI, %squitquitquit or Ctrl-C to stop" % server_url
+
+        if options.show_results:
+            # FIXME: This seems racy.
+            threading.Timer(0.1, lambda: self._tool.user.open_url(server_url)).start()
+
+        httpd = self.server(httpd_port=options.httpd_port, config=config)
+        httpd.serve_forever()
diff --git a/Tools/Scripts/webkitpy/tool/commands/abstractsequencedcommand.py b/Tools/Scripts/webkitpy/tool/commands/abstractsequencedcommand.py
new file mode 100644
index 0000000..5eaf249
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/commands/abstractsequencedcommand.py
@@ -0,0 +1,51 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+# 
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+# 
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from webkitpy.common.system.executive import ScriptError
+from webkitpy.common.system.deprecated_logging import log
+from webkitpy.tool.commands.stepsequence import StepSequence
+from webkitpy.tool.multicommandtool import AbstractDeclarativeCommand
+
+
+class AbstractSequencedCommand(AbstractDeclarativeCommand):
+    steps = None
+    def __init__(self):
+        self._sequence = StepSequence(self.steps)
+        AbstractDeclarativeCommand.__init__(self, self._sequence.options())
+
+    def _prepare_state(self, options, args, tool):
+        return None
+
+    def execute(self, options, args, tool):
+        try:
+            state = self._prepare_state(options, args, tool)
+        except ScriptError, e:
+            log(e.message_with_output())
+            self._exit(e.exit_code or 2)
+
+        self._sequence.run_and_handle_errors(tool, options, state)
diff --git a/Tools/Scripts/webkitpy/tool/commands/adduserstogroups.py b/Tools/Scripts/webkitpy/tool/commands/adduserstogroups.py
new file mode 100644
index 0000000..2286958
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/commands/adduserstogroups.py
@@ -0,0 +1,65 @@
+# Copyright (c) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from webkitpy.tool.multicommandtool import AbstractDeclarativeCommand
+
+
+class AddUsersToGroups(AbstractDeclarativeCommand):
+    name = "add-users-to-groups"
+    help_text = "Add users matching subtring to specified groups"
+
+    # This probably belongs in bugzilla.py
+    known_groups = ['canconfirm', 'editbugs']
+
+    def execute(self, options, args, tool):
+        search_string = args[0]
+        # FIXME: We could allow users to specify groups on the command line.
+        list_title = 'Add users matching "%s" which groups?' % search_string
+        # FIXME: Need a way to specify that "none" is not allowed.
+        # FIXME: We could lookup what groups the current user is able to grant from bugzilla.
+        groups = tool.user.prompt_with_list(list_title, self.known_groups, can_choose_multiple=True)
+        if not groups:
+            print "No groups specified."
+            return
+
+        login_userid_pairs = tool.bugs.queries.fetch_login_userid_pairs_matching_substring(search_string)
+        if not login_userid_pairs:
+            print "No users found matching '%s'" % search_string
+            return
+
+        print "Found %s users matching %s:" % (len(login_userid_pairs), search_string)
+        for (login, user_id) in login_userid_pairs:
+            print "%s (%s)" % (login, user_id)
+
+        confirm_message = "Are you sure you want add %s users to groups %s?  (This action cannot be undone using webkit-patch.)" % (len(login_userid_pairs), groups)
+        if not tool.user.confirm(confirm_message):
+            return
+
+        for (login, user_id) in login_userid_pairs:
+            print "Adding %s to %s" % (login, groups)
+            tool.bugs.add_user_to_groups(user_id, groups)
diff --git a/Tools/Scripts/webkitpy/tool/commands/analyzechangelog.py b/Tools/Scripts/webkitpy/tool/commands/analyzechangelog.py
new file mode 100644
index 0000000..b88b61f
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/commands/analyzechangelog.py
@@ -0,0 +1,206 @@
+# Copyright (c) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import json
+import re
+import time
+
+from webkitpy.common.checkout.scm.detection import SCMDetector
+from webkitpy.common.checkout.changelog import ChangeLog
+from webkitpy.common.config.contributionareas import ContributionAreas
+from webkitpy.common.system.filesystem import FileSystem
+from webkitpy.common.system.executive import Executive
+from webkitpy.tool.multicommandtool import AbstractDeclarativeCommand
+from webkitpy.tool import steps
+
+
+class AnalyzeChangeLog(AbstractDeclarativeCommand):
+    name = "analyze-changelog"
+    help_text = "Experimental command for analyzing change logs."
+    long_help = "This command parses changelogs in a specified directory and summarizes the result as JSON files."
+
+    def __init__(self):
+        options = [
+            steps.Options.changelog_count,
+        ]
+        AbstractDeclarativeCommand.__init__(self, options=options)
+
+    @staticmethod
+    def _enumerate_changelogs(filesystem, dirname, changelog_count):
+        changelogs = [filesystem.join(dirname, filename) for filename in filesystem.listdir(dirname) if re.match('^ChangeLog(-(\d{4}-\d{2}-\d{2}))?$', filename)]
+        # Make sure ChangeLog shows up before ChangeLog-2011-01-01
+        changelogs = sorted(changelogs, key=lambda filename: filename + 'X', reverse=True)
+        return changelogs[:changelog_count]
+
+    @staticmethod
+    def _generate_jsons(filesystem, jsons, output_dir):
+        for filename in jsons:
+            print '    Generating', filename
+            filesystem.write_text_file(filesystem.join(output_dir, filename), json.dumps(jsons[filename], indent=2))
+
+    def execute(self, options, args, tool):
+        filesystem = self._tool.filesystem
+        if len(args) < 1 or not filesystem.exists(args[0]):
+            print "Need the directory name to look for changelog as the first argument"
+            return
+        changelog_dir = filesystem.abspath(args[0])
+
+        if len(args) < 2 or not filesystem.exists(args[1]):
+            print "Need the output directory name as the second argument"
+            return
+        output_dir = args[1]
+
+        startTime = time.time()
+
+        print 'Enumerating ChangeLog files...'
+        changelogs = AnalyzeChangeLog._enumerate_changelogs(filesystem, changelog_dir, options.changelog_count)
+
+        analyzer = ChangeLogAnalyzer(tool, changelogs)
+        analyzer.analyze()
+
+        print 'Generating json files...'
+        json_files = {
+            'summary.json': analyzer.summary(),
+            'contributors.json': analyzer.contributors_statistics(),
+            'areas.json': analyzer.areas_statistics(),
+        }
+        AnalyzeChangeLog._generate_jsons(filesystem, json_files, output_dir)
+        commands_dir = filesystem.dirname(filesystem.path_to_module(self.__module__))
+        print commands_dir
+        filesystem.copyfile(filesystem.join(commands_dir, 'data/summary.html'), filesystem.join(output_dir, 'summary.html'))
+
+        tick = time.time() - startTime
+        print 'Finished in %02dm:%02ds' % (int(tick / 60), int(tick % 60))
+
+
+class ChangeLogAnalyzer(object):
+    def __init__(self, host, changelog_paths):
+        self._changelog_paths = changelog_paths
+        self._filesystem = host.filesystem
+        self._contribution_areas = ContributionAreas(host.filesystem)
+        self._scm = host.scm()
+        self._parsed_revisions = {}
+
+        self._contributors_statistics = {}
+        self._areas_statistics = dict([(area, {'reviewed': 0, 'unreviewed': 0, 'contributors': {}}) for area in self._contribution_areas.names()])
+        self._summary = {'reviewed': 0, 'unreviewed': 0}
+
+        self._longest_filename = max([len(path) - len(self._scm.checkout_root) for path in changelog_paths])
+        self._filename = ''
+        self._length_of_previous_output = 0
+
+    def contributors_statistics(self):
+        return self._contributors_statistics
+
+    def areas_statistics(self):
+        return self._areas_statistics
+
+    def summary(self):
+        return self._summary
+
+    def _print_status(self, status):
+        if self._length_of_previous_output:
+            print "\r" + " " * self._length_of_previous_output,
+        new_output = ('%' + str(self._longest_filename) + 's: %s') % (self._filename, status)
+        print "\r" + new_output,
+        self._length_of_previous_output = len(new_output)
+
+    def _set_filename(self, filename):
+        if self._filename:
+            print
+        self._filename = filename
+
+    def analyze(self):
+        for path in self._changelog_paths:
+            self._set_filename(self._filesystem.relpath(path, self._scm.checkout_root))
+            with self._filesystem.open_text_file_for_reading(path) as changelog:
+                self._print_status('Parsing entries...')
+                number_of_parsed_entries = self._analyze_entries(ChangeLog.parse_entries_from_file(changelog), path)
+            self._print_status('Done (%d entries)' % number_of_parsed_entries)
+        print
+        self._summary['contributors'] = len(self._contributors_statistics)
+        self._summary['contributors_with_reviews'] = sum([1 for contributor in self._contributors_statistics.values() if contributor['reviews']['total']])
+        self._summary['contributors_without_reviews'] = self._summary['contributors'] - self._summary['contributors_with_reviews']
+
+    def _collect_statistics_for_contributor_area(self, area, contributor, contribution_type, reviewed):
+        area_contributors = self._areas_statistics[area]['contributors']
+        if contributor not in area_contributors:
+            area_contributors[contributor] = {'reviews': 0, 'reviewed': 0, 'unreviewed': 0}
+        if contribution_type == 'patches':
+            contribution_type = 'reviewed' if reviewed else 'unreviewed'
+        area_contributors[contributor][contribution_type] += 1
+
+    def _collect_statistics_for_contributor(self, contributor, contribution_type, areas, touched_files, reviewed):
+        if contributor not in self._contributors_statistics:
+            self._contributors_statistics[contributor] = {
+                'reviews': {'total': 0, 'areas': {}, 'files': {}},
+                'patches': {'reviewed': 0, 'unreviewed': 0, 'areas': {}, 'files': {}}}
+        statistics = self._contributors_statistics[contributor][contribution_type]
+
+        if contribution_type == 'reviews':
+            statistics['total'] += 1
+        elif reviewed:
+            statistics['reviewed'] += 1
+        else:
+            statistics['unreviewed'] += 1
+
+        for area in areas:
+            self._increment_dictionary_value(statistics['areas'], area)
+            self._collect_statistics_for_contributor_area(area, contributor, contribution_type, reviewed)
+        for touchedfile in touched_files:
+            self._increment_dictionary_value(statistics['files'], touchedfile)
+
+    def _increment_dictionary_value(self, dictionary, key):
+        dictionary[key] = dictionary.get(key, 0) + 1
+
+    def _analyze_entries(self, entries, changelog_path):
+        dirname = self._filesystem.dirname(changelog_path)
+        for i, entry in enumerate(entries):
+            self._print_status('(%s) entries' % i)
+            assert(entry.authors())
+
+            touchedfiles_for_entry = [self._filesystem.relpath(self._filesystem.join(dirname, name), self._scm.checkout_root) for name in entry.touched_files()]
+            areas_for_entry = self._contribution_areas.areas_for_touched_files(touchedfiles_for_entry)
+            authors_for_entry = entry.authors()
+            reviewers_for_entry = entry.reviewers()
+
+            for reviewer in reviewers_for_entry:
+                self._collect_statistics_for_contributor(reviewer.full_name, 'reviews', areas_for_entry, touchedfiles_for_entry, reviewed=True)
+
+            for author in authors_for_entry:
+                self._collect_statistics_for_contributor(author['name'], 'patches', areas_for_entry, touchedfiles_for_entry,
+                    reviewed=bool(reviewers_for_entry))
+
+            for area in areas_for_entry:
+                self._areas_statistics[area]['reviewed' if reviewers_for_entry else 'unreviewed'] += 1
+
+            self._summary['reviewed' if reviewers_for_entry else 'unreviewed'] += 1
+
+            i += 1
+        self._print_status('(%s) entries' % i)
+        return i
diff --git a/Tools/Scripts/webkitpy/tool/commands/analyzechangelog_unittest.py b/Tools/Scripts/webkitpy/tool/commands/analyzechangelog_unittest.py
new file mode 100644
index 0000000..661d2d8
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/commands/analyzechangelog_unittest.py
@@ -0,0 +1,185 @@
+# Cpyright (c) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import json
+import sys
+from webkitpy.common.config.contributionareas import ContributionAreas
+from webkitpy.common.host_mock import MockHost
+from webkitpy.common.system.filesystem_mock import MockFileSystem
+from webkitpy.common.system.executive import Executive
+from webkitpy.common.system.outputcapture import OutputCapture
+from webkitpy.tool.commands.analyzechangelog import AnalyzeChangeLog
+from webkitpy.tool.commands.analyzechangelog import ChangeLogAnalyzer
+from webkitpy.tool.commands.commandtest import CommandsTest
+from webkitpy.tool.multicommandtool import AbstractDeclarativeCommand
+
+
+class AnalyzeChangeLogTest(CommandsTest):
+    def test_enumerate_enumerate_changelogs(self):
+        filesystem = MockFileSystem({
+            'foo/ChangeLog': '',
+            'foo/ChangeLog-2010-06-23': '',
+            'foo/ChangeLog-2010-12-31': '',
+            'foo/ChangeLog-x': '',
+            'foo/ChangeLog-2011-01-01': '',
+        })
+        changelogs = AnalyzeChangeLog._enumerate_changelogs(filesystem, 'foo/', None)
+        self.assertEqual(changelogs, ['foo/ChangeLog', 'foo/ChangeLog-2011-01-01', 'foo/ChangeLog-2010-12-31', 'foo/ChangeLog-2010-06-23'])
+
+        changelogs = AnalyzeChangeLog._enumerate_changelogs(filesystem, 'foo/', 2)
+        self.assertEqual(changelogs, ['foo/ChangeLog', 'foo/ChangeLog-2011-01-01'])
+
+    def test_generate_jsons(self):
+        filesystem = MockFileSystem()
+        test_json = {'array.json': [1, 2, 3, {'key': 'value'}], 'dictionary.json': {'somekey': 'somevalue', 'array': [4, 5]}}
+
+        capture = OutputCapture()
+        capture.capture_output()
+
+        AnalyzeChangeLog._generate_jsons(filesystem, test_json, 'bar')
+        self.assertEqual(set(filesystem.files.keys()), set(['bar/array.json', 'bar/dictionary.json']))
+
+        capture.restore_output()
+
+        self.assertEqual(json.loads(filesystem.files['bar/array.json']), test_json['array.json'])
+        self.assertEqual(json.loads(filesystem.files['bar/dictionary.json']), test_json['dictionary.json'])
+
+
+class ChangeLogAnalyzerTest(CommandsTest):
+    def test_analyze_one_changelog(self):
+        host = MockHost()
+        host.filesystem.files['mock-checkout/foo/ChangeLog'] = u"""2011-11-17  Mark Rowe  <mrowe@apple.com>
+
+    <http://webkit.org/b/72646> Disable deprecation warnings around code where we cannot easily
+    switch away from the deprecated APIs.
+
+    Reviewed by Sam Weinig.
+
+    * platform/mac/WebCoreNSStringExtras.mm:
+    * platform/network/cf/SocketStreamHandleCFNet.cpp:
+    (WebCore::SocketStreamHandle::reportErrorToClient):
+
+2011-11-19  Kevin Ollivier  <kevino@theolliviers.com>
+
+    [wx] C++ bindings build fix for move of array classes to WTF.
+
+    * bindings/scripts/CodeGeneratorCPP.pm:
+    (GetCPPTypeGetter):
+    (GetNamespaceForClass):
+    (GenerateHeader):
+    (GenerateImplementation):
+
+2011-10-27  Philippe Normand  <pnormand@igalia.com> and Zan Dobersek  <zandobersek@gmail.com>
+
+        [GStreamer] WebAudio AudioFileReader implementation
+        https://bugs.webkit.org/show_bug.cgi?id=69834
+
+        Reviewed by Martin Robinson.
+
+        Basic FileReader implementation, supporting one or 2 audio
+        channels. An empty AudioDestination is also provided, its complete
+        implementation is handled in bug 69835.
+
+        * GNUmakefile.am:
+        * GNUmakefile.list.am:
+        * platform/audio/gstreamer/AudioDestinationGStreamer.cpp: Added.
+        (WebCore::AudioDestination::create):
+        (WebCore::AudioDestination::hardwareSampleRate):
+        (WebCore::AudioDestinationGStreamer::AudioDestinationGStreamer):
+        (WebCore::AudioDestinationGStreamer::~AudioDestinationGStreamer):
+        (WebCore::AudioDestinationGStreamer::start):
+        (WebCore::AudioDestinationGStreamer::stop):
+        * platform/audio/gstreamer/AudioDestinationGStreamer.h: Added.
+        (WebCore::AudioDestinationGStreamer::isPlaying):
+        (WebCore::AudioDestinationGStreamer::sampleRate):
+        (WebCore::AudioDestinationGStreamer::sourceProvider):
+        * platform/audio/gstreamer/AudioFileReaderGStreamer.cpp: Added.
+        (WebCore::getGStreamerAudioCaps):
+        (WebCore::getFloatFromByteReader):
+        (WebCore::copyGstreamerBuffersToAudioChannel):
+        (WebCore::onAppsinkNewBufferCallback):
+        (WebCore::messageCallback):
+        (WebCore::onGStreamerDeinterleavePadAddedCallback):
+        (WebCore::onGStreamerDeinterleaveReadyCallback):
+        (WebCore::onGStreamerDecodebinPadAddedCallback):
+        (WebCore::AudioFileReader::AudioFileReader):
+        (WebCore::AudioFileReader::~AudioFileReader):
+        (WebCore::AudioFileReader::handleBuffer):
+        (WebCore::AudioFileReader::handleMessage):
+        (WebCore::AudioFileReader::handleNewDeinterleavePad):
+        (WebCore::AudioFileReader::deinterleavePadsConfigured):
+        (WebCore::AudioFileReader::plugDeinterleave):
+        (WebCore::AudioFileReader::createBus):
+        (WebCore::createBusFromAudioFile):
+        (WebCore::createBusFromInMemoryAudioFile):
+        * platform/audio/gtk/AudioBusGtk.cpp: Added.
+        (WebCore::AudioBus::loadPlatformResource):
+"""
+
+        capture = OutputCapture()
+        capture.capture_output()
+
+        analyzer = ChangeLogAnalyzer(host, ['mock-checkout/foo/ChangeLog'])
+        analyzer.analyze()
+
+        capture.restore_output()
+
+        self.assertEqual(analyzer.summary(),
+            {'reviewed': 2, 'unreviewed': 1, 'contributors': 6, 'contributors_with_reviews': 2, 'contributors_without_reviews': 4})
+
+        self.assertEqual(set(analyzer.contributors_statistics().keys()),
+            set(['Sam Weinig', u'Mark Rowe', u'Kevin Ollivier', 'Martin Robinson', u'Philippe Normand', u'Zan Dobersek']))
+
+        self.assertEqual(analyzer.contributors_statistics()['Sam Weinig'],
+            {'reviews': {'files': {u'foo/platform/mac/WebCoreNSStringExtras.mm': 1, u'foo/platform/network/cf/SocketStreamHandleCFNet.cpp': 1},
+            'total': 1, 'areas': {'Network': 1}}, 'patches': {'files': {}, 'areas': {}, 'unreviewed': 0, 'reviewed': 0}})
+        self.assertEqual(analyzer.contributors_statistics()[u'Mark Rowe'],
+            {'reviews': {'files': {}, 'total': 0, 'areas': {}}, 'patches': {'files': {u'foo/platform/mac/WebCoreNSStringExtras.mm': 1,
+            u'foo/platform/network/cf/SocketStreamHandleCFNet.cpp': 1}, 'areas': {'Network': 1}, 'unreviewed': 0, 'reviewed': 1}})
+        self.assertEqual(analyzer.contributors_statistics()[u'Kevin Ollivier'],
+            {'reviews': {'files': {}, 'total': 0, 'areas': {}}, 'patches': {'files': {u'foo/bindings/scripts/CodeGeneratorCPP.pm': 1},
+                'areas': {'Bindings': 1}, 'unreviewed': 1, 'reviewed': 0}})
+
+        files_for_audio_patch = {u'foo/GNUmakefile.am': 1, u'foo/GNUmakefile.list.am': 1, 'foo/platform/audio/gstreamer/AudioDestinationGStreamer.cpp': 1,
+            'foo/platform/audio/gstreamer/AudioDestinationGStreamer.h': 1, 'foo/platform/audio/gstreamer/AudioFileReaderGStreamer.cpp': 1,
+            'foo/platform/audio/gtk/AudioBusGtk.cpp': 1}
+        author_expectation_for_audio_patch = {'reviews': {'files': {}, 'total': 0, 'areas': {}},
+            'patches': {'files': files_for_audio_patch, 'areas': {'The WebKitGTK+ Port': 1}, 'unreviewed': 0, 'reviewed': 1}}
+        self.assertEqual(analyzer.contributors_statistics()[u'Martin Robinson'],
+            {'reviews': {'files': files_for_audio_patch, 'total': 1, 'areas': {'The WebKitGTK+ Port': 1}},
+                'patches': {'files': {}, 'areas': {}, 'unreviewed': 0, 'reviewed': 0}})
+        self.assertEqual(analyzer.contributors_statistics()[u'Philippe Normand'], author_expectation_for_audio_patch)
+        self.assertEqual(analyzer.contributors_statistics()[u'Zan Dobersek'], author_expectation_for_audio_patch)
+
+        areas_statistics = analyzer.areas_statistics()
+        areas_with_patches = [area for area in areas_statistics if areas_statistics[area]['reviewed'] or areas_statistics[area]['unreviewed']]
+        self.assertEqual(set(areas_with_patches), set(['Bindings', 'Network', 'The WebKitGTK+ Port']))
+        self.assertEqual(areas_statistics['Bindings'], {'unreviewed': 1, 'reviewed': 0, 'contributors':
+            {u'Kevin Ollivier': {'reviews': 0, 'unreviewed': 1, 'reviewed': 0}}})
+        self.assertEqual(areas_statistics['Network'], {'unreviewed': 0, 'reviewed': 1, 'contributors':
+            {'Sam Weinig': {'reviews': 1, 'unreviewed': 0, 'reviewed': 0}, u'Mark Rowe': {'reviews': 0, 'unreviewed': 0, 'reviewed': 1}}})
diff --git a/Tools/Scripts/webkitpy/tool/commands/applywatchlistlocal.py b/Tools/Scripts/webkitpy/tool/commands/applywatchlistlocal.py
new file mode 100644
index 0000000..6735d48
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/commands/applywatchlistlocal.py
@@ -0,0 +1,50 @@
+# Copyright (c) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from webkitpy.tool.commands.abstractsequencedcommand import AbstractSequencedCommand
+from webkitpy.tool import steps
+
+
+class ApplyWatchListLocal(AbstractSequencedCommand):
+    name = "apply-watchlist-local"
+    help_text = "Applies the watchlist to local changes"
+    argument_names = "[BUGID]"
+    steps = [
+        steps.ApplyWatchList,
+    ]
+    long_help = """"Applies the watchlist to local changes.
+The results is logged if a bug is no given. This may be used to try out a watchlist change."""
+
+    def _prepare_state(self, options, args, tool):
+        if len(args) > 1:
+            raise Exception("Too many arguments given: %s" % (' '.join(args)))
+        if not args:
+            return {}
+        return {
+            "bug_id": args[0],
+        }
diff --git a/Tools/Scripts/webkitpy/tool/commands/applywatchlistlocal_unittest.py b/Tools/Scripts/webkitpy/tool/commands/applywatchlistlocal_unittest.py
new file mode 100644
index 0000000..91818d1
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/commands/applywatchlistlocal_unittest.py
@@ -0,0 +1,50 @@
+# Copyright (c) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+from webkitpy.tool.commands.commandtest import CommandsTest
+from webkitpy.tool.commands.applywatchlistlocal import ApplyWatchListLocal
+
+
+class ApplyWatchListLocalTest(CommandsTest):
+    def test_args_parsing(self):
+        expected_stderr = 'MockWatchList: determine_cc_and_messages\n'
+        self.assert_execute_outputs(ApplyWatchListLocal(), [''], expected_stderr=expected_stderr)
+
+    def test_args_parsing_with_bug(self):
+        expected_stderr = """MockWatchList: determine_cc_and_messages
+MOCK bug comment: bug_id=50002, cc=set(['eric@webkit.org', 'levin@chromium.org', 'abarth@webkit.org'])
+--- Begin comment ---
+Message1.
+
+Message2.
+--- End comment ---\n\n"""
+        self.assert_execute_outputs(ApplyWatchListLocal(), ['50002'], expected_stderr=expected_stderr)
+
+    def test_args_parsing_with_two_bugs(self):
+        self._assertRaisesRegexp(Exception, 'Too many arguments given: 1234 5678', self.assert_execute_outputs, ApplyWatchListLocal(), ['1234', '5678'])
diff --git a/Tools/Scripts/webkitpy/tool/commands/bugfortest.py b/Tools/Scripts/webkitpy/tool/commands/bugfortest.py
new file mode 100644
index 0000000..36aa6b5
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/commands/bugfortest.py
@@ -0,0 +1,48 @@
+# Copyright (c) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from webkitpy.tool.multicommandtool import AbstractDeclarativeCommand
+from webkitpy.tool.bot.flakytestreporter import FlakyTestReporter
+
+
+# This is mostly a command for testing FlakyTestReporter, however
+# it could be easily expanded to auto-create bugs, etc. if another
+# command outside of webkitpy wanted to use it.
+class BugForTest(AbstractDeclarativeCommand):
+    name = "bug-for-test"
+    help_text = "Finds the bugzilla bug for a given test"
+
+    def execute(self, options, args, tool):
+        reporter = FlakyTestReporter(tool, "webkitpy")
+        search_string = args[0]
+        bug = reporter._lookup_bug_for_flaky_test(search_string)
+        if bug:
+            bug = reporter._follow_duplicate_chain(bug)
+            print "%5s %s" % (bug.id(), bug.title())
+        else:
+            print "No bugs found matching '%s'" % search_string
diff --git a/Tools/Scripts/webkitpy/tool/commands/bugsearch.py b/Tools/Scripts/webkitpy/tool/commands/bugsearch.py
new file mode 100644
index 0000000..a1d74c5
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/commands/bugsearch.py
@@ -0,0 +1,49 @@
+# Copyright (c) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from webkitpy.tool.multicommandtool import AbstractDeclarativeCommand
+
+
+class BugSearch(AbstractDeclarativeCommand):
+    name = "bug-search"
+    help_text = "List bugs matching a query"
+    argument_names = "QUERY"
+    long_help = \
+"""Runs the bugzilla quicksearch QUERY on bugs.webkit.org, and lists all bugs
+returned. QUERY can be as simple as a bug number or a comma delimited list of
+bug numbers.
+See https://bugzilla.mozilla.org/page.cgi?id=quicksearch.html for full
+documentation on the query format."""
+
+    def execute(self, options, args, tool):
+        search_string = args[0]
+        bugs = tool.bugs.queries.fetch_bugs_matching_quicksearch(search_string)
+        for bug in bugs:
+            print "%5s %s" % (bug.id(), bug.title())
+        if not bugs:
+            print "No bugs found matching '%s'" % search_string
diff --git a/Tools/Scripts/webkitpy/tool/commands/chromechannels.py b/Tools/Scripts/webkitpy/tool/commands/chromechannels.py
new file mode 100644
index 0000000..da093b4
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/commands/chromechannels.py
@@ -0,0 +1,104 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from optparse import make_option
+
+from webkitpy.common.net.omahaproxy import OmahaProxy
+from webkitpy.tool.multicommandtool import AbstractDeclarativeCommand
+
+import re
+
+
+class ChromeChannels(AbstractDeclarativeCommand):
+    name = "chrome-channels"
+    help_text = "List which chrome channels include the patches in bugs returned by QUERY."
+    argument_names = "QUERY"
+    long_help = """Retrieves the current list of Chrome releases from omahaproxy.appspot.com,
+and then runs the bugzilla quicksearch QUERY on bugs.bugzilla.org. For each bug
+returned by query, a single svn commit is deduced, and a short summary is
+printed of each bug listing which Chrome channels contain each bugs associated
+commit.
+
+The QUERY can be as simple as a bug number, or a comma delimited list of bug
+numbers. See https://bugzilla.mozilla.org/page.cgi?id=quicksearch.html for full
+documentation on the query format."""
+
+    chrome_channels = OmahaProxy.chrome_channels
+    commited_pattern = "Committed r([0-9]+): <http://trac.webkit.org/changeset/\\1>"
+    rollout_pattern = "Rolled out in http://trac.webkit.org/changeset/[0-9]+"
+
+    def __init__(self):
+        AbstractDeclarativeCommand.__init__(self)
+        self._re_committed = re.compile(self.commited_pattern)
+        self._re_rollout = re.compile(self.rollout_pattern)
+        self._omahaproxy = OmahaProxy()
+
+    def _channels_for_bug(self, revisions, bug):
+        comments = bug.comments()
+        commit = None
+
+        # Scan the comments, looking for a sane list of commits and rollbacks.
+        for comment in comments:
+            commit_match = self._re_committed.search(comment['text'])
+            if commit_match:
+                if commit:
+                    return "%5s %s\n... has too confusing a commit history to parse, skipping\n" % (bug.id(), bug.title())
+                commit = int(commit_match.group(1))
+            if self._re_rollout.search(comment['text']):
+                commit = None
+        if not commit:
+            return "%5s %s\n... does not appear to have an associated commit.\n" % (bug.id(), bug.title())
+
+        # We now know that we have a commit, so gather up the list of platforms
+        # by channel, then print.
+        by_channel = {}
+        for revision in revisions:
+            channel = revision['channel']
+            if revision['commit'] < commit:
+                continue
+            if not channel in by_channel:
+                by_channel[revision['channel']] = " %6s:" % channel
+            by_channel[channel] += " %s," % revision['platform']
+        if not by_channel:
+            return "%5s %s (r%d)\n... not yet released in any Chrome channels.\n" % (bug.id(), bug.title(), commit)
+        retval = "%5s %s (r%d)\n" % (bug.id(), bug.title(), commit)
+        for channel in self.chrome_channels:
+            if channel in by_channel:
+                retval += by_channel[channel][:-1]
+                retval += "\n"
+        return retval
+
+    def execute(self, options, args, tool):
+        search_string = args[0]
+        revisions = self._omahaproxy.get_revisions()
+        bugs = tool.bugs.queries.fetch_bugs_matching_quicksearch(search_string)
+        if not bugs:
+            print "No bugs found matching '%s'" % search_string
+            return
+        for bug in bugs:
+            print self._channels_for_bug(revisions, bug),
diff --git a/Tools/Scripts/webkitpy/tool/commands/chromechannels_unittest.py b/Tools/Scripts/webkitpy/tool/commands/chromechannels_unittest.py
new file mode 100644
index 0000000..037aebb
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/commands/chromechannels_unittest.py
@@ -0,0 +1,99 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from webkitpy.tool.commands.chromechannels import ChromeChannels
+from webkitpy.tool.commands.commandtest import CommandsTest
+from webkitpy.tool.mocktool import MockTool
+from webkitpy.common.net.omahaproxy import OmahaProxy
+
+
+class MockOmahaProxy(OmahaProxy):
+    revisions = [{"commit": 20, "channel": "canary", "platform": "Mac", "date": "07/04/76"},
+                 {"commit": 20, "channel": "canary", "platform": "Windows", "date": "07/04/76"},
+                 {"commit": 25, "channel": "dev", "platform": "Mac", "date": "07/01/76"},
+                 {"commit": 30, "channel": "dev", "platform": "Windows", "date": "03/29/82"},
+                 {"commit": 30, "channel": "dev", "platform": "Linux", "date": "03/29/82"},
+                 {"commit": 15, "channel": "beta", "platform": "Windows", "date": "07/04/67"},
+                 {"commit": 15, "channel": "beta", "platform": "Linux", "date": "07/04/67"},
+                 {"commit": 10, "channel": "stable", "platform": "Windows", "date": "07/01/67"},
+                 {"commit": 20, "channel": "stable", "platform": "Linux", "date": "09/16/10"},
+                 ]
+
+    def get_revisions(self):
+        return self.revisions
+
+
+class TestableChromeChannels(ChromeChannels):
+    def __init__(self):
+        ChromeChannels.__init__(self)
+        self._omahaproxy = MockOmahaProxy()
+
+
+class ChromeChannelsTest(CommandsTest):
+
+    single_bug_expectations = {
+        50001: """50001 Bug with a patch needing review. (r35)
+... not yet released in any Chrome channels.
+""",
+        50002: """50002 The third bug
+... has too confusing a commit history to parse, skipping
+""",
+        50003: """50003 The fourth bug
+... does not appear to have an associated commit.
+""",
+        50004: """50004 The fifth bug (r15)
+ canary: Mac, Windows
+    dev: Mac, Windows, Linux
+   beta: Windows, Linux
+ stable: Linux
+"""}
+
+    def test_single_bug(self):
+        testable_chrome_channels = TestableChromeChannels()
+        tool = MockTool()
+        testable_chrome_channels.bind_to_tool(tool)
+        revisions = testable_chrome_channels._omahaproxy.get_revisions()
+        for bug_id, expectation in self.single_bug_expectations.items():
+            self.assertEqual(testable_chrome_channels._channels_for_bug(revisions, testable_chrome_channels._tool.bugs.fetch_bug(bug_id)),
+                             expectation)
+
+    def test_with_query(self):
+        expected_stdout = \
+"""50001 Bug with a patch needing review. (r35)
+... not yet released in any Chrome channels.
+50002 The third bug
+... has too confusing a commit history to parse, skipping
+50003 The fourth bug
+... does not appear to have an associated commit.
+50004 The fifth bug (r15)
+ canary: Mac, Windows
+    dev: Mac, Windows, Linux
+   beta: Windows, Linux
+ stable: Linux
+"""
+        self.assert_execute_outputs(TestableChromeChannels(), ["foo"], expected_stdout=expected_stdout)
diff --git a/Tools/Scripts/webkitpy/tool/commands/commandtest.py b/Tools/Scripts/webkitpy/tool/commands/commandtest.py
new file mode 100644
index 0000000..eea0a61
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/commands/commandtest.py
@@ -0,0 +1,48 @@
+# Copyright (C) 2009 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#    * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from webkitpy.common.system.outputcapture import OutputCapture
+from webkitpy.common.webkitunittest import TestCase
+from webkitpy.tool.mocktool import MockOptions, MockTool
+
+
+class CommandsTest(TestCase):
+    def assert_execute_outputs(self, command, args=[], expected_stdout="", expected_stderr="", expected_exception=None, options=MockOptions(), tool=MockTool()):
+        options.blocks = None
+        options.cc = 'MOCK cc'
+        options.component = 'MOCK component'
+        options.confirm = True
+        options.email = 'MOCK email'
+        options.git_commit = 'MOCK git commit'
+        options.obsolete_patches = True
+        options.open_bug = True
+        options.port = 'MOCK port'
+        options.quiet = True
+        options.reviewer = 'MOCK reviewer'
+        command.bind_to_tool(tool)
+        OutputCapture().assert_outputs(self, command.execute, [options, args, tool], expected_stdout=expected_stdout, expected_stderr=expected_stderr, expected_exception=expected_exception)
diff --git a/Tools/Scripts/webkitpy/tool/commands/data/summary.html b/Tools/Scripts/webkitpy/tool/commands/data/summary.html
new file mode 100644
index 0000000..abf80d8
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/commands/data/summary.html
@@ -0,0 +1,455 @@
+<!DOCTYPE html>
+<html>
+<head>
+<title>ChangeLog Analysis</title>
+<style type="text/css">
+
+body {
+    font-family: 'Helvetica' 'Segoe UI Light' sans-serif;
+    font-weight: 200;
+    padding: 20px;
+    min-width: 1200px;
+}
+
+* {
+    padding: 0px;
+    margin: 0px;
+    border: 0px;
+}
+
+h1, h2, h3 {
+    font-weight: 200;
+}
+
+h1 {
+    margin: 0 0 1em 0;
+}
+
+h2 {
+    font-size: 1.2em;
+    text-align: center;
+    margin-bottom: 1em;
+}
+
+h3 {
+    font-size: 1em;
+}
+
+.view {
+    margin: 0px;
+    width: 600px;
+    float: left;
+}
+
+.graph-container p {
+    width: 200px;
+    text-align: right;
+    margin: 20px 0 20px 0;
+    padding: 5px;
+    border-right: solid 1px black;
+}
+
+.graph-container table {
+    width: 100%;
+}
+
+.graph-container table, .graph-container td {
+    border-collapse: collapse;
+    border: none;
+}
+
+.graph-container td {
+    padding: 5px;
+    vertical-align: center;
+}
+
+.graph-container td:first-child {
+    width: 200px;
+    text-align: right;
+    border-right: solid 1px black;
+}
+
+.graph-container .selected {
+    background: #eee;
+}
+
+#reviewers .selected td:first-child {
+    border-radius: 10px 0px 0px 10px;
+}
+
+#areas .selected td:last-child {
+    border-radius: 0px 10px 10px 0px;
+}
+
+.graph-container .bar {
+    display: inline-block;
+    min-height: 1em;
+    background: #9f6;
+    margin-right: 0.4ex;
+}
+
+.graph-container .reviewed-patches {
+    background: #3cf;
+    margin-right: 1px;
+}
+
+.graph-container .unreviewed-patches {
+    background: #f99;
+}
+
+.constrained {
+    background: #eee;
+    border-radius: 10px;
+}
+
+.constrained .vertical-bar {
+    border-right: solid 1px #eee;
+}
+
+#header {
+    border-spacing: 5px;
+}
+
+#header section {
+    display: table-cell;
+    width: 200px;
+    vertical-align: top;
+    border: solid 2px #ccc;
+    border-collapse: collapse;
+    padding: 5px;
+    font-size: 0.8em;
+}
+
+#header dt {
+    float: left;
+}
+
+#header dt:after {
+    content: ': ';
+}
+
+#header .legend {
+    width: 600px;
+}
+
+.legend .bar {
+    width: 15ex;
+    padding: 2px;
+}
+
+.legend .reviews {
+    width: 25ex;
+}
+
+.legend td:first-child {
+    width: 18ex;
+}
+
+</style>
+</head>
+<body>
+<h1>ChangeLog Analysis</h1>
+
+<section id="header">
+<section id="summary">
+<h2>Summary</h2>
+</section>
+
+<section class="legend">
+<h2>Legend</h2>
+<div class="graph-container">
+<table>
+<tbody>
+<tr><td>Contributor's name</td>
+<td><span class="bar reviews">Reviews</span> <span class="value-container">(# of reviews)</span><br>
+<span class="bar reviewed-patches">Reviewed</span><span class="bar unreviewed-patches">Unreviewed</span>
+<span class="value-container">(# of reviewed):(# of unreviewed)</span></td></tr>
+</tbody>
+</table>
+</div>
+</section>
+</section>
+
+<section id="contributors" class="view">
+<h2 id="contributors-title">Contributors</h2>
+<div class="graph-container"></div>
+</section>
+
+<section id="areas" class="view">
+<h2 id="areas-title">Areas of contributions</h2>
+<div class="graph-container"></div>
+</section>
+
+<script>
+
+// Naive implementation of element extensions discussed on public-webapps
+
+if (!Element.prototype.append) {
+    Element.prototype.append = function () {
+        for (var i = 0; i < arguments.length; i++) {
+            // FIXME: Take care of other node types
+            if (arguments[i] instanceof Element || arguments[i] instanceof CharacterData)
+                this.appendChild(arguments[i]);
+            else
+                this.appendChild(document.createTextNode(arguments[i]));
+        }
+        return this;
+    }
+}
+
+if (!Node.prototype.remove) {
+    Node.prototype.remove = function () {
+        this.parentNode.removeChild(this);
+        return this;
+    }
+}
+
+if (!Element.create) {
+    Element.create = function () {
+        if (arguments.length < 1)
+            return null;
+        var element = document.createElement(arguments[0]);
+        if (arguments.length == 1)
+            return element;
+
+        // FIXME: the second argument can be content or IDL attributes
+        var attributes = arguments[1];
+        for (attribute in attributes)
+            element.setAttribute(attribute, attributes[attribute]);
+
+        if (arguments.length >= 3)
+            element.append.apply(element, arguments[2]);
+
+        return element;
+    }
+}
+
+if (!Node.prototype.removeAllChildren) {
+    Node.prototype.removeAllChildren = function () {
+        while (this.firstChild)
+            this.firstChild.remove();
+        return this;
+    }
+}
+
+Element.prototype.removeClassNameFromAllElements = function (className) {
+    var elements = this.getElementsByClassName(className);
+    for (var i = 0; i < elements.length; i++)
+        elements[i].classList.remove(className);
+}
+
+function getJSON(url, callback) {
+    var xhr = new XMLHttpRequest();
+    xhr.open('GET', url, true);
+    xhr.onreadystatechange = function () {
+        if (this.readyState == 4)
+            callback(JSON.parse(xhr.responseText));
+    }
+    xhr.send();
+}
+
+function GraphView(container) {
+    this._container = container;
+    this._defaultData = null;
+}
+
+GraphView.prototype.setData = function(data, constrained) {
+    if (constrained)
+        this._container.classList.add('constrained');
+    else
+        this._container.classList.remove('constrained');
+    this._clearGraph();
+    this._constructGraph(data);
+}
+
+GraphView.prototype.setDefaultData = function(data) {
+    this._defaultData = data;
+    this.setData(data);
+}
+
+GraphView.prototype.reset = function () {
+    this.setMarginTop();
+    this.setData(this._defaultData);
+}
+
+GraphView.prototype.isConstrained = function () { return this._container.classList.contains('constrained'); }
+
+GraphView.prototype.targetRow = function (node) {
+    var target = null;
+
+    while (node && node != this._container) {
+        if (node.localName == 'tr')
+            target = node;
+        node = node.parentNode;
+    }
+
+    return node && target;
+}
+
+GraphView.prototype.selectRow = function (row) {
+    this._container.removeClassNameFromAllElements('selected');
+    row.classList.add('selected');
+}
+
+GraphView.prototype.setMarginTop = function (y) { this._container.style.marginTop = y ? y + 'px' : null; }
+GraphView.prototype._graphContainer = function () { return this._container.getElementsByClassName('graph-container')[0]; }
+GraphView.prototype._clearGraph = function () { return this._graphContainer().removeAllChildren(); }
+
+GraphView.prototype._numberOfPatches = function (dataItem) {
+    return dataItem.numberOfReviewedPatches + (dataItem.numberOfUnreviewedPatches !== undefined ? dataItem.numberOfUnreviewedPatches : 0);
+}
+
+GraphView.prototype._maximumValue = function (labels, data) {
+    var numberOfPatches = this._numberOfPatches;
+    return Math.max.apply(null, labels.map(function (label) {
+        return Math.max(numberOfPatches(data[label]), data[label].numberOfReviews !== undefined ? data[label].numberOfReviews : 0);
+    }));
+}
+
+GraphView.prototype._sortLabelsByNumberOfReviwsAndReviewedPatches = function(data) {
+    var labels = Object.keys(data);
+    if (!labels.length)
+        return null;
+    var numberOfPatches = this._numberOfPatches;
+    var computeValue = function (dataItem) {
+        return numberOfPatches(dataItem) + (dataItem.numberOfReviews !== undefined ? dataItem.numberOfReviews : 0);
+    }
+    labels.sort(function (a, b) { return computeValue(data[b]) - computeValue(data[a]); });
+    return labels;
+}
+
+GraphView.prototype._constructGraph = function (data) {
+    var element = this._graphContainer();
+    var labels = this._sortLabelsByNumberOfReviwsAndReviewedPatches(data);
+    if (!labels) {
+        element.append(Element.create('p', {}, ['None']));
+        return;
+    }
+
+    var maxValue = this._maximumValue(labels, data);
+    var computeStyleForBar = function (value) { return 'width:' + (value * 85.0 / maxValue) + '%' }
+
+    var table = Element.create('table', {}, [Element.create('tbody')]);
+    for (var i = 0; i < labels.length; i++) {
+        var label = labels[i];
+        var item = data[label];
+        var row = Element.create('tr', {}, [Element.create('td', {}, [label]), Element.create('td', {})]);
+        var valueCell = row.lastChild;
+
+        if (item.numberOfReviews != undefined) {
+            valueCell.append(
+                Element.create('span', {'class': 'bar reviews', 'style': computeStyleForBar(item.numberOfReviews) }),
+                Element.create('span', {'class': 'value-container'}, [item.numberOfReviews]),
+                Element.create('br')
+            );
+        }
+
+        valueCell.append(Element.create('span', {'class': 'bar reviewed-patches', 'style': computeStyleForBar(item.numberOfReviewedPatches) }));
+        if (item.numberOfUnreviewedPatches !== undefined)
+            valueCell.append(Element.create('span', {'class': 'bar unreviewed-patches', 'style': computeStyleForBar(item.numberOfUnreviewedPatches) }));
+
+        valueCell.append(Element.create('span', {'class': 'value-container'},
+            [item.numberOfReviewedPatches + (item.numberOfUnreviewedPatches !== undefined ? ':' + item.numberOfUnreviewedPatches : '')]));
+
+        table.firstChild.append(row);
+        row.label = label;
+        row.data = item;
+    }
+    element.append(table);
+}
+
+var contributorsView = new GraphView(document.querySelector('#contributors'));
+var areasView = new GraphView(document.querySelector('#areas'));
+
+getJSON('summary.json',
+    function (summary) {
+        var summaryContainer = document.querySelector('#summary');
+        summaryContainer.append(Element.create('dl', {}, [
+            Element.create('dt', {}, ['Total entries (reviewed)']),
+            Element.create('dd', {}, [(summary['reviewed'] + summary['unreviewed']) + ' (' + summary['reviewed'] + ')']),
+            Element.create('dt', {}, ['Total contributors']),
+            Element.create('dd', {}, [summary['contributors']]),
+            Element.create('dt', {}, ['Contributors who reviewed']),
+            Element.create('dd', {}, [summary['contributors_with_reviews']]),
+        ]));
+    });
+
+getJSON('contributors.json',
+    function (contributors) {
+        for (var contributor in contributors) {
+            contributor = contributors[contributor];
+            contributor.numberOfReviews = contributor.reviews ? contributor.reviews.total : 0;
+            contributor.numberOfReviewedPatches = contributor.patches ? contributor.patches.reviewed : 0;
+            contributor.numberOfUnreviewedPatches = contributor.patches ? contributor.patches.unreviewed : 0;
+        }
+        contributorsView.setDefaultData(contributors);
+    });
+
+getJSON('areas.json',
+    function (areas) {
+        for (var area in areas) {
+            areas[area].numberOfReviewedPatches = areas[area].reviewed;
+            areas[area].numberOfUnreviewedPatches = areas[area].unreviewed;
+        }
+        areasView.setDefaultData(areas);
+    });
+
+function contributorAreas(contributorData) {
+    var areas = new Object;
+    for (var area in contributorData.reviews.areas) {
+        if (!areas[area])
+            areas[area] = {'numberOfReviewedPatches': 0};
+        areas[area].numberOfReviews = contributorData.reviews.areas[area];
+    }
+    for (var area in contributorData.patches.areas) {
+        if (!areas[area])
+            areas[area] = {'numberOfReviews': 0};
+        areas[area].numberOfReviewedPatches = contributorData.patches.areas[area];
+    }
+    return areas;
+}
+
+function areaContributors(areaData) {
+    var contributors = areaData['contributors'];
+    for (var contributor in contributors) {
+        contributor = contributors[contributor];
+        contributor.numberOfReviews = contributor.reviews;
+        contributor.numberOfReviewedPatches = contributor.reviewed;
+        contributor.numberOfUnreviewedPatches = contributor.unreviewed;
+    }
+    return contributors;
+}
+
+var mouseTimer = 0;
+window.onmouseover = function (event) {
+    clearTimeout(mouseTimer);
+
+    var row = contributorsView.targetRow(event.target);
+    if (row) {
+        if (!contributorsView.isConstrained()) {
+            contributorsView.selectRow(row);
+            areasView.setMarginTop(row.firstChild.offsetTop);
+            areasView.setData(contributorAreas(row.data), 'constrained');
+        }
+        return;
+    }
+
+    row = areasView.targetRow(event.target);
+    if (row) {
+        if (!areasView.isConstrained()) {
+            areasView.selectRow(row);
+            contributorsView.setMarginTop(row.firstChild.offsetTop);
+            contributorsView.setData(areaContributors(row.data), 'constrained');
+        }
+        return;
+    }
+
+    mouseTimer = setTimeout(function () {
+        contributorsView.reset();
+        areasView.reset();
+    }, 500);
+}
+
+</script>
+</body>
+</html>
diff --git a/Tools/Scripts/webkitpy/tool/commands/download.py b/Tools/Scripts/webkitpy/tool/commands/download.py
new file mode 100644
index 0000000..1f73d18
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/commands/download.py
@@ -0,0 +1,477 @@
+# Copyright (c) 2009, 2011 Google Inc. All rights reserved.
+# Copyright (c) 2009 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+# 
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+# 
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from webkitpy.tool import steps
+
+from webkitpy.common.checkout.changelog import ChangeLog
+from webkitpy.common.config import urls
+from webkitpy.common.system.executive import ScriptError
+from webkitpy.tool.commands.abstractsequencedcommand import AbstractSequencedCommand
+from webkitpy.tool.commands.stepsequence import StepSequence
+from webkitpy.tool.comments import bug_comment_from_commit_text
+from webkitpy.tool.grammar import pluralize
+from webkitpy.tool.multicommandtool import AbstractDeclarativeCommand
+from webkitpy.common.system.deprecated_logging import error, log
+
+
+class Clean(AbstractSequencedCommand):
+    name = "clean"
+    help_text = "Clean the working copy"
+    steps = [
+        steps.CleanWorkingDirectory,
+    ]
+
+    def _prepare_state(self, options, args, tool):
+        options.force_clean = True
+
+
+class Update(AbstractSequencedCommand):
+    name = "update"
+    help_text = "Update working copy (used internally)"
+    steps = [
+        steps.CleanWorkingDirectory,
+        steps.Update,
+    ]
+
+
+class Build(AbstractSequencedCommand):
+    name = "build"
+    help_text = "Update working copy and build"
+    steps = [
+        steps.CleanWorkingDirectory,
+        steps.Update,
+        steps.Build,
+    ]
+
+    def _prepare_state(self, options, args, tool):
+        options.build = True
+
+
+class BuildAndTest(AbstractSequencedCommand):
+    name = "build-and-test"
+    help_text = "Update working copy, build, and run the tests"
+    steps = [
+        steps.CleanWorkingDirectory,
+        steps.Update,
+        steps.Build,
+        steps.RunTests,
+    ]
+
+
+class Land(AbstractSequencedCommand):
+    name = "land"
+    help_text = "Land the current working directory diff and updates the associated bug if any"
+    argument_names = "[BUGID]"
+    show_in_main_help = True
+    steps = [
+        steps.AddSvnMimetypeForPng,
+        steps.UpdateChangeLogsWithReviewer,
+        steps.ValidateReviewer,
+        steps.ValidateChangeLogs, # We do this after UpdateChangeLogsWithReviewer to avoid not having to cache the diff twice.
+        steps.Build,
+        steps.RunTests,
+        steps.Commit,
+        steps.CloseBugForLandDiff,
+    ]
+    long_help = """land commits the current working copy diff (just as svn or git commit would).
+land will NOT build and run the tests before committing, but you can use the --build option for that.
+If a bug id is provided, or one can be found in the ChangeLog land will update the bug after committing."""
+
+    def _prepare_state(self, options, args, tool):
+        changed_files = self._tool.scm().changed_files(options.git_commit)
+        return {
+            "changed_files": changed_files,
+            "bug_id": (args and args[0]) or tool.checkout().bug_id_for_this_commit(options.git_commit, changed_files),
+        }
+
+
+class LandCowboy(AbstractSequencedCommand):
+    name = "land-cowboy"
+    help_text = "Prepares a ChangeLog and lands the current working directory diff."
+    steps = [
+        steps.PrepareChangeLog,
+        steps.EditChangeLog,
+        steps.CheckStyle,
+        steps.ConfirmDiff,
+        steps.Build,
+        steps.RunTests,
+        steps.Commit,
+        steps.CloseBugForLandDiff,
+    ]
+
+    def _prepare_state(self, options, args, tool):
+        options.check_style_filter = "-changelog"
+
+
+class LandCowhand(LandCowboy):
+    # Gender-blind term for cowboy, see: http://en.wiktionary.org/wiki/cowhand
+    name = "land-cowhand"
+
+
+class CheckStyleLocal(AbstractSequencedCommand):
+    name = "check-style-local"
+    help_text = "Run check-webkit-style on the current working directory diff"
+    steps = [
+        steps.CheckStyle,
+    ]
+
+
+class AbstractPatchProcessingCommand(AbstractDeclarativeCommand):
+    # Subclasses must implement the methods below.  We don't declare them here
+    # because we want to be able to implement them with mix-ins.
+    #
+    # def _fetch_list_of_patches_to_process(self, options, args, tool):
+    # def _prepare_to_process(self, options, args, tool):
+
+    @staticmethod
+    def _collect_patches_by_bug(patches):
+        bugs_to_patches = {}
+        for patch in patches:
+            bugs_to_patches[patch.bug_id()] = bugs_to_patches.get(patch.bug_id(), []) + [patch]
+        return bugs_to_patches
+
+    def execute(self, options, args, tool):
+        self._prepare_to_process(options, args, tool)
+        patches = self._fetch_list_of_patches_to_process(options, args, tool)
+
+        # It's nice to print out total statistics.
+        bugs_to_patches = self._collect_patches_by_bug(patches)
+        log("Processing %s from %s." % (pluralize("patch", len(patches)), pluralize("bug", len(bugs_to_patches))))
+
+        for patch in patches:
+            self._process_patch(patch, options, args, tool)
+
+
+class AbstractPatchSequencingCommand(AbstractPatchProcessingCommand):
+    prepare_steps = None
+    main_steps = None
+
+    def __init__(self):
+        options = []
+        self._prepare_sequence = StepSequence(self.prepare_steps)
+        self._main_sequence = StepSequence(self.main_steps)
+        options = sorted(set(self._prepare_sequence.options() + self._main_sequence.options()))
+        AbstractPatchProcessingCommand.__init__(self, options)
+
+    def _prepare_to_process(self, options, args, tool):
+        self._prepare_sequence.run_and_handle_errors(tool, options)
+
+    def _process_patch(self, patch, options, args, tool):
+        state = { "patch" : patch }
+        self._main_sequence.run_and_handle_errors(tool, options, state)
+
+
+class ProcessAttachmentsMixin(object):
+    def _fetch_list_of_patches_to_process(self, options, args, tool):
+        return map(lambda patch_id: tool.bugs.fetch_attachment(patch_id), args)
+
+
+class ProcessBugsMixin(object):
+    def _fetch_list_of_patches_to_process(self, options, args, tool):
+        all_patches = []
+        for bug_id in args:
+            patches = tool.bugs.fetch_bug(bug_id).reviewed_patches()
+            log("%s found on bug %s." % (pluralize("reviewed patch", len(patches)), bug_id))
+            all_patches += patches
+        if not all_patches:
+            log("No reviewed patches found, looking for unreviewed patches.")
+            for bug_id in args:
+                patches = tool.bugs.fetch_bug(bug_id).patches()
+                log("%s found on bug %s." % (pluralize("patch", len(patches)), bug_id))
+                all_patches += patches
+        return all_patches
+
+
+class ProcessURLsMixin(object):
+    def _fetch_list_of_patches_to_process(self, options, args, tool):
+        all_patches = []
+        for url in args:
+            bug_id = urls.parse_bug_id(url)
+            if bug_id:
+                patches = tool.bugs.fetch_bug(bug_id).patches()
+                log("%s found on bug %s." % (pluralize("patch", len(patches)), bug_id))
+                all_patches += patches
+
+            attachment_id = urls.parse_attachment_id(url)
+            if attachment_id:
+                all_patches += tool.bugs.fetch_attachment(attachment_id)
+
+        return all_patches
+
+
+class CheckStyle(AbstractPatchSequencingCommand, ProcessAttachmentsMixin):
+    name = "check-style"
+    help_text = "Run check-webkit-style on the specified attachments"
+    argument_names = "ATTACHMENT_ID [ATTACHMENT_IDS]"
+    main_steps = [
+        steps.CleanWorkingDirectory,
+        steps.Update,
+        steps.ApplyPatch,
+        steps.CheckStyle,
+    ]
+
+
+class BuildAttachment(AbstractPatchSequencingCommand, ProcessAttachmentsMixin):
+    name = "build-attachment"
+    help_text = "Apply and build patches from bugzilla"
+    argument_names = "ATTACHMENT_ID [ATTACHMENT_IDS]"
+    main_steps = [
+        steps.CleanWorkingDirectory,
+        steps.Update,
+        steps.ApplyPatch,
+        steps.Build,
+    ]
+
+
+class BuildAndTestAttachment(AbstractPatchSequencingCommand, ProcessAttachmentsMixin):
+    name = "build-and-test-attachment"
+    help_text = "Apply, build, and test patches from bugzilla"
+    argument_names = "ATTACHMENT_ID [ATTACHMENT_IDS]"
+    main_steps = [
+        steps.CleanWorkingDirectory,
+        steps.Update,
+        steps.ApplyPatch,
+        steps.Build,
+        steps.RunTests,
+    ]
+
+
+class AbstractPatchApplyingCommand(AbstractPatchSequencingCommand):
+    prepare_steps = [
+        steps.EnsureLocalCommitIfNeeded,
+        steps.CleanWorkingDirectoryWithLocalCommits,
+        steps.Update,
+    ]
+    main_steps = [
+        steps.ApplyPatchWithLocalCommit,
+    ]
+    long_help = """Updates the working copy.
+Downloads and applies the patches, creating local commits if necessary."""
+
+
+class ApplyAttachment(AbstractPatchApplyingCommand, ProcessAttachmentsMixin):
+    name = "apply-attachment"
+    help_text = "Apply an attachment to the local working directory"
+    argument_names = "ATTACHMENT_ID [ATTACHMENT_IDS]"
+    show_in_main_help = True
+
+
+class ApplyFromBug(AbstractPatchApplyingCommand, ProcessBugsMixin):
+    name = "apply-from-bug"
+    help_text = "Apply reviewed patches from provided bugs to the local working directory"
+    argument_names = "BUGID [BUGIDS]"
+    show_in_main_help = True
+
+
+class ApplyWatchList(AbstractPatchSequencingCommand, ProcessAttachmentsMixin):
+    name = "apply-watchlist"
+    help_text = "Applies the watchlist to the specified attachments"
+    argument_names = "ATTACHMENT_ID [ATTACHMENT_IDS]"
+    main_steps = [
+        steps.CleanWorkingDirectory,
+        steps.Update,
+        steps.ApplyPatch,
+        steps.ApplyWatchList,
+    ]
+    long_help = """"Applies the watchlist to the specified attachments.
+Downloads the attachment, applies it locally, runs the watchlist against it, and updates the bug with the result."""
+
+
+class AbstractPatchLandingCommand(AbstractPatchSequencingCommand):
+    main_steps = [
+        steps.CleanWorkingDirectory,
+        steps.Update,
+        steps.ApplyPatch,
+        steps.ValidateChangeLogs,
+        steps.ValidateReviewer,
+        steps.Build,
+        steps.RunTests,
+        steps.Commit,
+        steps.ClosePatch,
+        steps.CloseBug,
+    ]
+    long_help = """Checks to make sure builders are green.
+Updates the working copy.
+Applies the patch.
+Builds.
+Runs the layout tests.
+Commits the patch.
+Clears the flags on the patch.
+Closes the bug if no patches are marked for review."""
+
+
+class LandAttachment(AbstractPatchLandingCommand, ProcessAttachmentsMixin):
+    name = "land-attachment"
+    help_text = "Land patches from bugzilla, optionally building and testing them first"
+    argument_names = "ATTACHMENT_ID [ATTACHMENT_IDS]"
+    show_in_main_help = True
+
+
+class LandFromBug(AbstractPatchLandingCommand, ProcessBugsMixin):
+    name = "land-from-bug"
+    help_text = "Land all patches on the given bugs, optionally building and testing them first"
+    argument_names = "BUGID [BUGIDS]"
+    show_in_main_help = True
+
+
+class LandFromURL(AbstractPatchLandingCommand, ProcessURLsMixin):
+    name = "land-from-url"
+    help_text = "Land all patches on the given URLs, optionally building and testing them first"
+    argument_names = "URL [URLS]"
+
+
+class ValidateChangelog(AbstractSequencedCommand):
+    name = "validate-changelog"
+    help_text = "Validate that the ChangeLogs and reviewers look reasonable"
+    long_help = """Examines the current diff to see whether the ChangeLogs
+and the reviewers listed in the ChangeLogs look reasonable.
+"""
+    steps = [
+        steps.ValidateChangeLogs,
+        steps.ValidateReviewer,
+    ]
+
+
+class AbstractRolloutPrepCommand(AbstractSequencedCommand):
+    argument_names = "REVISION [REVISIONS] REASON"
+
+    def _commit_info(self, revision):
+        commit_info = self._tool.checkout().commit_info_for_revision(revision)
+        if commit_info and commit_info.bug_id():
+            # Note: Don't print a bug URL here because it will confuse the
+            #       SheriffBot because the SheriffBot just greps the output
+            #       of create-rollout for bug URLs.  It should do better
+            #       parsing instead.
+            log("Preparing rollout for bug %s." % commit_info.bug_id())
+        else:
+            log("Unable to parse bug number from diff.")
+        return commit_info
+
+    def _prepare_state(self, options, args, tool):
+        revision_list = []
+        for revision in str(args[0]).split():
+            if revision.isdigit():
+                revision_list.append(int(revision))
+            else:
+                raise ScriptError(message="Invalid svn revision number: " + revision)
+        revision_list.sort()
+
+        # We use the earliest revision for the bug info
+        earliest_revision = revision_list[0]
+        state = {
+            "revision": earliest_revision,
+            "revision_list": revision_list,
+            "reason": args[1],
+        }
+        commit_info = self._commit_info(earliest_revision)
+        if commit_info:
+            state["bug_id"] = commit_info.bug_id()
+            cc_list = sorted([party.bugzilla_email()
+                            for party in commit_info.responsible_parties()
+                            if party.bugzilla_email()])
+            # FIXME: We should used the list as the canonical representation.
+            state["bug_cc"] = ",".join(cc_list)
+        return state
+
+
+class PrepareRollout(AbstractRolloutPrepCommand):
+    name = "prepare-rollout"
+    help_text = "Revert the given revision(s) in the working copy and prepare ChangeLogs with revert reason"
+    long_help = """Updates the working copy.
+Applies the inverse diff for the provided revision(s).
+Creates an appropriate rollout ChangeLog, including a trac link and bug link.
+"""
+    steps = [
+        steps.CleanWorkingDirectory,
+        steps.Update,
+        steps.RevertRevision,
+        steps.PrepareChangeLogForRevert,
+    ]
+
+
+class CreateRollout(AbstractRolloutPrepCommand):
+    name = "create-rollout"
+    help_text = "Creates a bug to track the broken SVN revision(s) and uploads a rollout patch."
+    steps = [
+        steps.CleanWorkingDirectory,
+        steps.Update,
+        steps.RevertRevision,
+        steps.CreateBug,
+        steps.PrepareChangeLogForRevert,
+        steps.PostDiffForRevert,
+    ]
+
+    def _prepare_state(self, options, args, tool):
+        state = AbstractRolloutPrepCommand._prepare_state(self, options, args, tool)
+        # Currently, state["bug_id"] points to the bug that caused the
+        # regression.  We want to create a new bug that blocks the old bug
+        # so we move state["bug_id"] to state["bug_blocked"] and delete the
+        # old state["bug_id"] so that steps.CreateBug will actually create
+        # the new bug that we want (and subsequently store its bug id into
+        # state["bug_id"])
+        state["bug_blocked"] = state["bug_id"]
+        del state["bug_id"]
+        state["bug_title"] = "REGRESSION(r%s): %s" % (state["revision"], state["reason"])
+        state["bug_description"] = "%s broke the build:\n%s" % (urls.view_revision_url(state["revision"]), state["reason"])
+        # FIXME: If we had more context here, we could link to other open bugs
+        #        that mention the test that regressed.
+        if options.parent_command == "sheriff-bot":
+            state["bug_description"] += """
+
+This is an automatic bug report generated by the sheriff-bot. If this bug
+report was created because of a flaky test, please file a bug for the flaky
+test (if we don't already have one on file) and dup this bug against that bug
+so that we can track how often these flaky tests case pain.
+
+"Only you can prevent forest fires." -- Smokey the Bear
+"""
+        return state
+
+
+class Rollout(AbstractRolloutPrepCommand):
+    name = "rollout"
+    show_in_main_help = True
+    help_text = "Revert the given revision(s) in the working copy and optionally commit the revert and re-open the original bug"
+    long_help = """Updates the working copy.
+Applies the inverse diff for the provided revision.
+Creates an appropriate rollout ChangeLog, including a trac link and bug link.
+Opens the generated ChangeLogs in $EDITOR.
+Shows the prepared diff for confirmation.
+Commits the revert and updates the bug (including re-opening the bug if necessary)."""
+    steps = [
+        steps.CleanWorkingDirectory,
+        steps.Update,
+        steps.RevertRevision,
+        steps.PrepareChangeLogForRevert,
+        steps.EditChangeLog,
+        steps.ConfirmDiff,
+        steps.Build,
+        steps.Commit,
+        steps.ReopenBugAfterRollout,
+    ]
diff --git a/Tools/Scripts/webkitpy/tool/commands/download_unittest.py b/Tools/Scripts/webkitpy/tool/commands/download_unittest.py
new file mode 100644
index 0000000..b71f3da
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/commands/download_unittest.py
@@ -0,0 +1,316 @@
+# Copyright (C) 2009, 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#    * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from webkitpy.common.system.outputcapture import OutputCapture
+from webkitpy.thirdparty.mock import Mock
+from webkitpy.tool.commands.commandtest import CommandsTest
+from webkitpy.tool.commands.download import *
+from webkitpy.tool.mocktool import MockOptions, MockTool
+from webkitpy.common.checkout.checkout_mock import MockCheckout
+
+
+class AbstractRolloutPrepCommandTest(unittest.TestCase):
+    def test_commit_info(self):
+        command = AbstractRolloutPrepCommand()
+        tool = MockTool()
+        command.bind_to_tool(tool)
+        output = OutputCapture()
+
+        expected_stderr = "Preparing rollout for bug 50000.\n"
+        commit_info = output.assert_outputs(self, command._commit_info, [1234], expected_stderr=expected_stderr)
+        self.assertTrue(commit_info)
+
+        mock_commit_info = Mock()
+        mock_commit_info.bug_id = lambda: None
+        tool._checkout.commit_info_for_revision = lambda revision: mock_commit_info
+        expected_stderr = "Unable to parse bug number from diff.\n"
+        commit_info = output.assert_outputs(self, command._commit_info, [1234], expected_stderr=expected_stderr)
+        self.assertEqual(commit_info, mock_commit_info)
+
+    def test_prepare_state(self):
+        command = AbstractRolloutPrepCommand()
+        mock_commit_info = MockCheckout().commit_info_for_revision(123)
+        command._commit_info = lambda revision: mock_commit_info
+
+        state = command._prepare_state(None, ["124 123 125", "Reason"], None)
+        self.assertEqual(123, state["revision"])
+        self.assertEqual([123, 124, 125], state["revision_list"])
+
+        self.assertRaises(ScriptError, command._prepare_state, options=None, args=["125 r122  123", "Reason"], tool=None)
+        self.assertRaises(ScriptError, command._prepare_state, options=None, args=["125 foo 123", "Reason"], tool=None)
+
+        command._commit_info = lambda revision: None
+        state = command._prepare_state(None, ["124 123 125", "Reason"], None)
+        self.assertEqual(123, state["revision"])
+        self.assertEqual([123, 124, 125], state["revision_list"])
+
+
+class DownloadCommandsTest(CommandsTest):
+    def _default_options(self):
+        options = MockOptions()
+        options.build = True
+        options.build_style = True
+        options.check_style = True
+        options.check_style_filter = None
+        options.clean = True
+        options.close_bug = True
+        options.force_clean = False
+        options.non_interactive = False
+        options.parent_command = 'MOCK parent command'
+        options.quiet = False
+        options.test = True
+        options.update = True
+        return options
+
+    def test_build(self):
+        expected_stderr = "Updating working directory\nBuilding WebKit\n"
+        self.assert_execute_outputs(Build(), [], options=self._default_options(), expected_stderr=expected_stderr)
+
+    def test_build_and_test(self):
+        expected_stderr = "Updating working directory\nBuilding WebKit\nRunning Python unit tests\nRunning Perl unit tests\nRunning JavaScriptCore tests\nRunning WebKit unit tests\nRunning run-webkit-tests\n"
+        self.assert_execute_outputs(BuildAndTest(), [], options=self._default_options(), expected_stderr=expected_stderr)
+
+    def test_apply_attachment(self):
+        options = self._default_options()
+        options.update = True
+        options.local_commit = True
+        expected_stderr = "Updating working directory\nProcessing 1 patch from 1 bug.\nProcessing patch 10000 from bug 50000.\n"
+        self.assert_execute_outputs(ApplyAttachment(), [10000], options=options, expected_stderr=expected_stderr)
+
+    def test_apply_from_bug(self):
+        options = self._default_options()
+        options.update = True
+        options.local_commit = True
+
+        expected_stderr = "Updating working directory\n0 reviewed patches found on bug 50001.\nNo reviewed patches found, looking for unreviewed patches.\n1 patch found on bug 50001.\nProcessing 1 patch from 1 bug.\nProcessing patch 10002 from bug 50001.\n"
+        self.assert_execute_outputs(ApplyFromBug(), [50001], options=options, expected_stderr=expected_stderr)
+
+        expected_stderr = "Updating working directory\n2 reviewed patches found on bug 50000.\nProcessing 2 patches from 1 bug.\nProcessing patch 10000 from bug 50000.\nProcessing patch 10001 from bug 50000.\n"
+        self.assert_execute_outputs(ApplyFromBug(), [50000], options=options, expected_stderr=expected_stderr)
+
+    def test_apply_watch_list(self):
+        expected_stderr = """Processing 1 patch from 1 bug.
+Updating working directory
+MOCK run_and_throw_if_fail: ['mock-update-webkit'], cwd=/mock-checkout\nProcessing patch 10000 from bug 50000.
+MockWatchList: determine_cc_and_messages
+"""
+        self.assert_execute_outputs(ApplyWatchList(), [10000], options=self._default_options(), expected_stderr=expected_stderr, tool=MockTool(log_executive=True))
+
+    def test_land(self):
+        expected_stderr = "Building WebKit\nRunning Python unit tests\nRunning Perl unit tests\nRunning JavaScriptCore tests\nRunning WebKit unit tests\nRunning run-webkit-tests\nCommitted r49824: <http://trac.webkit.org/changeset/49824>\nUpdating bug 50000\n"
+        mock_tool = MockTool()
+        mock_tool.scm().create_patch = Mock(return_value="Patch1\nMockPatch\n")
+        mock_tool.checkout().modified_changelogs = Mock(return_value=[])
+        self.assert_execute_outputs(Land(), [50000], options=self._default_options(), expected_stderr=expected_stderr, tool=mock_tool)
+        # Make sure we're not calling expensive calls too often.
+        self.assertEqual(mock_tool.scm().create_patch.call_count, 0)
+        self.assertEqual(mock_tool.checkout().modified_changelogs.call_count, 1)
+
+    def test_land_cowboy(self):
+        expected_stderr = """MOCK run_and_throw_if_fail: ['mock-prepare-ChangeLog', '--email=MOCK email', '--merge-base=None', 'MockFile1'], cwd=/mock-checkout
+MOCK run_and_throw_if_fail: ['mock-check-webkit-style', '--git-commit', 'MOCK git commit', '--diff-files', 'MockFile1', '--filter', '-changelog'], cwd=/mock-checkout
+MOCK run_command: ['ruby', '-I', '/mock-checkout/Websites/bugs.webkit.org/PrettyPatch', '/mock-checkout/Websites/bugs.webkit.org/PrettyPatch/prettify.rb'], cwd=None, input=Patch1
+MOCK: user.open_url: file://...
+Was that diff correct?
+Building WebKit
+MOCK run_and_throw_if_fail: ['mock-build-webkit'], cwd=/mock-checkout, env={'LC_ALL': 'C', 'MOCK_ENVIRON_COPY': '1'}
+Running Python unit tests
+MOCK run_and_throw_if_fail: ['mock-test-webkitpy'], cwd=/mock-checkout
+Running Perl unit tests
+MOCK run_and_throw_if_fail: ['mock-test-webkitperl'], cwd=/mock-checkout
+Running JavaScriptCore tests
+MOCK run_and_throw_if_fail: ['mock-run-javacriptcore-tests'], cwd=/mock-checkout
+Running WebKit unit tests
+MOCK run_and_throw_if_fail: ['mock-run-webkit-unit-tests'], cwd=/mock-checkout
+Running run-webkit-tests
+MOCK run_and_throw_if_fail: ['mock-run-webkit-tests', '--quiet'], cwd=/mock-checkout
+Committed r49824: <http://trac.webkit.org/changeset/49824>
+Committed r49824: <http://trac.webkit.org/changeset/49824>
+No bug id provided.
+"""
+        mock_tool = MockTool(log_executive=True)
+        self.assert_execute_outputs(LandCowboy(), [50000], options=self._default_options(), expected_stderr=expected_stderr, tool=mock_tool)
+
+    def test_land_red_builders(self):
+        expected_stderr = 'Building WebKit\nRunning Python unit tests\nRunning Perl unit tests\nRunning JavaScriptCore tests\nRunning WebKit unit tests\nRunning run-webkit-tests\nCommitted r49824: <http://trac.webkit.org/changeset/49824>\nUpdating bug 50000\n'
+        mock_tool = MockTool()
+        mock_tool.buildbot.light_tree_on_fire()
+        self.assert_execute_outputs(Land(), [50000], options=self._default_options(), expected_stderr=expected_stderr, tool=mock_tool)
+
+    def test_check_style(self):
+        expected_stderr = """Processing 1 patch from 1 bug.
+Updating working directory
+MOCK run_and_throw_if_fail: ['mock-update-webkit'], cwd=/mock-checkout
+Processing patch 10000 from bug 50000.
+MOCK run_and_throw_if_fail: ['mock-check-webkit-style', '--git-commit', 'MOCK git commit', '--diff-files', 'MockFile1'], cwd=/mock-checkout
+"""
+        self.assert_execute_outputs(CheckStyle(), [10000], options=self._default_options(), expected_stderr=expected_stderr, tool=MockTool(log_executive=True))
+
+    def test_build_attachment(self):
+        expected_stderr = "Processing 1 patch from 1 bug.\nUpdating working directory\nProcessing patch 10000 from bug 50000.\nBuilding WebKit\n"
+        self.assert_execute_outputs(BuildAttachment(), [10000], options=self._default_options(), expected_stderr=expected_stderr)
+
+    def test_land_attachment(self):
+        # FIXME: This expected result is imperfect, notice how it's seeing the same patch as still there after it thought it would have cleared the flags.
+        expected_stderr = """Processing 1 patch from 1 bug.
+Updating working directory
+Processing patch 10000 from bug 50000.
+Building WebKit
+Running Python unit tests
+Running Perl unit tests
+Running JavaScriptCore tests
+Running WebKit unit tests
+Running run-webkit-tests
+Committed r49824: <http://trac.webkit.org/changeset/49824>
+Not closing bug 50000 as attachment 10000 has review=+.  Assuming there are more patches to land from this bug.
+"""
+        self.assert_execute_outputs(LandAttachment(), [10000], options=self._default_options(), expected_stderr=expected_stderr)
+
+    def test_land_from_bug(self):
+        # FIXME: This expected result is imperfect, notice how it's seeing the same patch as still there after it thought it would have cleared the flags.
+        expected_stderr = """2 reviewed patches found on bug 50000.
+Processing 2 patches from 1 bug.
+Updating working directory
+Processing patch 10000 from bug 50000.
+Building WebKit
+Running Python unit tests
+Running Perl unit tests
+Running JavaScriptCore tests
+Running WebKit unit tests
+Running run-webkit-tests
+Committed r49824: <http://trac.webkit.org/changeset/49824>
+Not closing bug 50000 as attachment 10000 has review=+.  Assuming there are more patches to land from this bug.
+Updating working directory
+Processing patch 10001 from bug 50000.
+Building WebKit
+Running Python unit tests
+Running Perl unit tests
+Running JavaScriptCore tests
+Running WebKit unit tests
+Running run-webkit-tests
+Committed r49824: <http://trac.webkit.org/changeset/49824>
+Not closing bug 50000 as attachment 10000 has review=+.  Assuming there are more patches to land from this bug.
+"""
+        self.assert_execute_outputs(LandFromBug(), [50000], options=self._default_options(), expected_stderr=expected_stderr)
+
+    def test_land_from_url(self):
+        # FIXME: This expected result is imperfect, notice how it's seeing the same patch as still there after it thought it would have cleared the flags.
+        expected_stderr = """2 patches found on bug 50000.
+Processing 2 patches from 1 bug.
+Updating working directory
+Processing patch 10000 from bug 50000.
+Building WebKit
+Running Python unit tests
+Running Perl unit tests
+Running JavaScriptCore tests
+Running WebKit unit tests
+Running run-webkit-tests
+Committed r49824: <http://trac.webkit.org/changeset/49824>
+Not closing bug 50000 as attachment 10000 has review=+.  Assuming there are more patches to land from this bug.
+Updating working directory
+Processing patch 10001 from bug 50000.
+Building WebKit
+Running Python unit tests
+Running Perl unit tests
+Running JavaScriptCore tests
+Running WebKit unit tests
+Running run-webkit-tests
+Committed r49824: <http://trac.webkit.org/changeset/49824>
+Not closing bug 50000 as attachment 10000 has review=+.  Assuming there are more patches to land from this bug.
+"""
+        self.assert_execute_outputs(LandFromURL(), ["https://bugs.webkit.org/show_bug.cgi?id=50000"], options=self._default_options(), expected_stderr=expected_stderr)
+
+    def test_prepare_rollout(self):
+        expected_stderr = "Preparing rollout for bug 50000.\nUpdating working directory\n"
+        self.assert_execute_outputs(PrepareRollout(), [852, "Reason"], options=self._default_options(), expected_stderr=expected_stderr)
+
+    def test_create_rollout(self):
+        expected_stderr = """Preparing rollout for bug 50000.
+Updating working directory
+MOCK create_bug
+bug_title: REGRESSION(r852): Reason
+bug_description: http://trac.webkit.org/changeset/852 broke the build:
+Reason
+component: MOCK component
+cc: MOCK cc
+blocked: 50000
+MOCK add_patch_to_bug: bug_id=60001, description=ROLLOUT of r852, mark_for_review=False, mark_for_commit_queue=True, mark_for_landing=False
+-- Begin comment --
+Any committer can land this patch automatically by marking it commit-queue+.  The commit-queue will build and test the patch before landing to ensure that the rollout will be successful.  This process takes approximately 15 minutes.
+
+If you would like to land the rollout faster, you can use the following command:
+
+  webkit-patch land-attachment ATTACHMENT_ID
+
+where ATTACHMENT_ID is the ID of this attachment.
+-- End comment --
+"""
+        self.assert_execute_outputs(CreateRollout(), [852, "Reason"], options=self._default_options(), expected_stderr=expected_stderr)
+        self.assert_execute_outputs(CreateRollout(), ["855 852 854", "Reason"], options=self._default_options(), expected_stderr=expected_stderr)
+
+    def test_create_rollout_resolved(self):
+        expected_stderr = """Preparing rollout for bug 50004.
+Updating working directory
+MOCK create_bug
+bug_title: REGRESSION(r3001): Reason
+bug_description: http://trac.webkit.org/changeset/3001 broke the build:
+Reason
+component: MOCK component
+cc: MOCK cc
+blocked: 50004
+MOCK reopen_bug 50004 with comment 'Re-opened since this is blocked by bug 60001'
+MOCK add_patch_to_bug: bug_id=60001, description=ROLLOUT of r3001, mark_for_review=False, mark_for_commit_queue=True, mark_for_landing=False
+-- Begin comment --
+Any committer can land this patch automatically by marking it commit-queue+.  The commit-queue will build and test the patch before landing to ensure that the rollout will be successful.  This process takes approximately 15 minutes.
+
+If you would like to land the rollout faster, you can use the following command:
+
+  webkit-patch land-attachment ATTACHMENT_ID
+
+where ATTACHMENT_ID is the ID of this attachment.
+-- End comment --
+"""
+        self.assert_execute_outputs(CreateRollout(), [3001, "Reason"], options=self._default_options(), expected_stderr=expected_stderr)
+
+    def test_rollout(self):
+        expected_stderr = """Preparing rollout for bug 50000.
+Updating working directory
+MOCK: user.open_url: file://...
+Was that diff correct?
+Building WebKit
+Committed r49824: <http://trac.webkit.org/changeset/49824>
+MOCK reopen_bug 50000 with comment 'Reverted r852 for reason:
+
+Reason
+
+Committed r49824: <http://trac.webkit.org/changeset/49824>'
+"""
+        self.assert_execute_outputs(Rollout(), [852, "Reason"], options=self._default_options(), expected_stderr=expected_stderr)
+
diff --git a/Tools/Scripts/webkitpy/tool/commands/earlywarningsystem.py b/Tools/Scripts/webkitpy/tool/commands/earlywarningsystem.py
new file mode 100644
index 0000000..b7d5df3
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/commands/earlywarningsystem.py
@@ -0,0 +1,212 @@
+# Copyright (c) 2009 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from optparse import make_option
+
+from webkitpy.common.config.committers import CommitterList
+from webkitpy.common.config.ports import DeprecatedPort
+from webkitpy.common.system.deprecated_logging import error, log
+from webkitpy.common.system.executive import ScriptError
+from webkitpy.tool.bot.earlywarningsystemtask import EarlyWarningSystemTask, EarlyWarningSystemTaskDelegate
+from webkitpy.tool.bot.expectedfailures import ExpectedFailures
+from webkitpy.tool.bot.layouttestresultsreader import LayoutTestResultsReader
+from webkitpy.tool.bot.patchanalysistask import UnableToApplyPatch
+from webkitpy.tool.bot.queueengine import QueueEngine
+from webkitpy.tool.commands.queues import AbstractReviewQueue
+
+
+class AbstractEarlyWarningSystem(AbstractReviewQueue, EarlyWarningSystemTaskDelegate):
+    _build_style = "release"
+    # FIXME: Switch _default_run_tests from opt-in to opt-out once more bots are ready to run tests.
+    _default_run_tests = False
+
+    def __init__(self):
+        options = [make_option("--run-tests", action="store_true", dest="run_tests", default=self._default_run_tests, help="Run the Layout tests for each patch")]
+        AbstractReviewQueue.__init__(self, options=options)
+        self.port = DeprecatedPort.port(self.port_name)
+
+    def begin_work_queue(self):
+        # FIXME: This violates abstraction
+        self._tool._deprecated_port = self.port
+        AbstractReviewQueue.begin_work_queue(self)
+        self._expected_failures = ExpectedFailures()
+        self._layout_test_results_reader = LayoutTestResultsReader(self._tool, self._log_directory())
+
+    def _failing_tests_message(self, task, patch):
+        results = task.results_from_patch_test_run(patch)
+        unexpected_failures = self._expected_failures.unexpected_failures_observed(results)
+        if not unexpected_failures:
+            return None
+        return "New failing tests:\n%s" % "\n".join(unexpected_failures)
+
+    def _post_reject_message_on_bug(self, tool, patch, status_id, extra_message_text=None):
+        results_link = tool.status_server.results_url_for_status(status_id)
+        message = "Attachment %s did not pass %s (%s):\nOutput: %s" % (patch.id(), self.name, self.port_name, results_link)
+        # FIXME: We might want to add some text about rejecting from the commit-queue in
+        # the case where patch.commit_queue() isn't already set to '-'.
+        if self.watchers:
+            tool.bugs.add_cc_to_bug(patch.bug_id(), self.watchers)
+        tool.bugs.set_flag_on_attachment(patch.id(), "commit-queue", "-", message, extra_message_text)
+
+    def review_patch(self, patch):
+        task = EarlyWarningSystemTask(self, patch, self._options.run_tests)
+        if not task.validate():
+            self._did_error(patch, "%s did not process patch." % self.name)
+            return False
+        try:
+            return task.run()
+        except UnableToApplyPatch, e:
+            self._did_error(patch, "%s unable to apply patch." % self.name)
+            return False
+        except ScriptError, e:
+            self._post_reject_message_on_bug(self._tool, patch, task.failure_status_id, self._failing_tests_message(task, patch))
+            results_archive = task.results_archive_from_patch_test_run(patch)
+            if results_archive:
+                self._upload_results_archive_for_patch(patch, results_archive)
+            self._did_fail(patch)
+            # FIXME: We're supposed to be able to raise e again here and have
+            # one of our base classes mark the patch as fail, but there seems
+            # to be an issue with the exit_code.
+            return False
+
+    # EarlyWarningSystemDelegate methods
+
+    def parent_command(self):
+        return self.name
+
+    def run_command(self, command):
+        self.run_webkit_patch(command + [self.port.flag()])
+
+    def command_passed(self, message, patch):
+        pass
+
+    def command_failed(self, message, script_error, patch):
+        failure_log = self._log_from_script_error_for_upload(script_error)
+        return self._update_status(message, patch=patch, results_file=failure_log)
+
+    def expected_failures(self):
+        return self._expected_failures
+
+    def test_results(self):
+        return self._layout_test_results_reader.results()
+
+    def archive_last_test_results(self, patch):
+        return self._layout_test_results_reader.archive(patch)
+
+    def build_style(self):
+        return self._build_style
+
+    def refetch_patch(self, patch):
+        return self._tool.bugs.fetch_attachment(patch.id())
+
+    def report_flaky_tests(self, patch, flaky_test_results, results_archive):
+        pass
+
+    # StepSequenceErrorHandler methods
+
+    @classmethod
+    def handle_script_error(cls, tool, state, script_error):
+        # FIXME: Why does this not exit(1) like the superclass does?
+        log(script_error.message_with_output())
+
+
+class GtkEWS(AbstractEarlyWarningSystem):
+    name = "gtk-ews"
+    port_name = "gtk"
+    watchers = AbstractEarlyWarningSystem.watchers + [
+        "gns@gnome.org",
+        "xan.lopez@gmail.com",
+    ]
+
+
+class EflEWS(AbstractEarlyWarningSystem):
+    name = "efl-ews"
+    port_name = "efl"
+    watchers = AbstractEarlyWarningSystem.watchers + [
+        "leandro@profusion.mobi",
+        "antognolli@profusion.mobi",
+        "lucas.demarchi@profusion.mobi",
+        "gyuyoung.kim@samsung.com",
+    ]
+
+
+class QtEWS(AbstractEarlyWarningSystem):
+    name = "qt-ews"
+    port_name = "qt"
+    watchers = AbstractEarlyWarningSystem.watchers + [
+        "webkit-ews@sed.inf.u-szeged.hu",
+    ]
+
+
+class QtWK2EWS(AbstractEarlyWarningSystem):
+    name = "qt-wk2-ews"
+    port_name = "qt"
+    watchers = AbstractEarlyWarningSystem.watchers + [
+        "webkit-ews@sed.inf.u-szeged.hu",
+    ]
+
+
+class WinEWS(AbstractEarlyWarningSystem):
+    name = "win-ews"
+    port_name = "win"
+    # Use debug, the Apple Win port fails to link Release on 32-bit Windows.
+    # https://bugs.webkit.org/show_bug.cgi?id=39197
+    _build_style = "debug"
+
+
+class AbstractChromiumEWS(AbstractEarlyWarningSystem):
+    port_name = "chromium"
+    watchers = AbstractEarlyWarningSystem.watchers + [
+        "dglazkov@chromium.org",
+    ]
+
+
+class ChromiumLinuxEWS(AbstractChromiumEWS):
+    # FIXME: We should rename this command to cr-linux-ews, but that requires
+    #        a database migration. :(
+    name = "chromium-ews"
+    port_name = "chromium-xvfb"
+    _default_run_tests = True
+
+
+class ChromiumWindowsEWS(AbstractChromiumEWS):
+    name = "cr-win-ews"
+
+
+class ChromiumAndroidEWS(AbstractChromiumEWS):
+    name = "cr-android-ews"
+    port_name = "chromium-android"
+    watchers = AbstractChromiumEWS.watchers + [
+        "peter+ews@chromium.org",
+    ]
+
+
+class MacEWS(AbstractEarlyWarningSystem):
+    name = "mac-ews"
+    port_name = "mac"
+    _default_run_tests = True
diff --git a/Tools/Scripts/webkitpy/tool/commands/earlywarningsystem_unittest.py b/Tools/Scripts/webkitpy/tool/commands/earlywarningsystem_unittest.py
new file mode 100644
index 0000000..7feff0d
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/commands/earlywarningsystem_unittest.py
@@ -0,0 +1,92 @@
+# Copyright (C) 2009 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#    * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from webkitpy.thirdparty.mock import Mock
+from webkitpy.common.system.outputcapture import OutputCapture
+from webkitpy.tool.bot.queueengine import QueueEngine
+from webkitpy.tool.commands.earlywarningsystem import *
+from webkitpy.tool.commands.queuestest import QueuesTest
+from webkitpy.tool.mocktool import MockTool, MockOptions
+
+
+class AbstractEarlyWarningSystemTest(QueuesTest):
+    def test_failing_tests_message(self):
+        # Needed to define port_name, used in AbstractEarlyWarningSystem.__init__
+        class TestEWS(AbstractEarlyWarningSystem):
+            port_name = "win"  # Needs to be a port which port/factory understands.
+
+        ews = TestEWS()
+        ews.bind_to_tool(MockTool())
+        ews._options = MockOptions(port=None, confirm=False)
+        OutputCapture().assert_outputs(self, ews.begin_work_queue, expected_stderr=self._default_begin_work_queue_stderr(ews.name))
+        ews._expected_failures.unexpected_failures_observed = lambda results: set(["foo.html", "bar.html"])
+        task = Mock()
+        patch = ews._tool.bugs.fetch_attachment(10000)
+        self.assertEqual(ews._failing_tests_message(task, patch), "New failing tests:\nbar.html\nfoo.html")
+
+
+class EarlyWarningSytemTest(QueuesTest):
+    def _default_expected_stderr(self, ews):
+        string_replacemnts = {
+            "name": ews.name,
+            "port": ews.port_name,
+        }
+        expected_stderr = {
+            "begin_work_queue": self._default_begin_work_queue_stderr(ews.name),
+            "handle_unexpected_error": "Mock error message\n",
+            "next_work_item": "",
+            "process_work_item": "MOCK: update_status: %(name)s Pass\nMOCK: release_work_item: %(name)s 10000\n" % string_replacemnts,
+            "handle_script_error": "ScriptError error message\n\nMOCK output\n",
+        }
+        return expected_stderr
+
+    def _test_builder_ews(self, ews):
+        ews.bind_to_tool(MockTool())
+        options = Mock()
+        options.port = None
+        options.run_tests = ews._default_run_tests
+        self.assert_queue_outputs(ews, expected_stderr=self._default_expected_stderr(ews), options=options)
+
+    def _test_testing_ews(self, ews):
+        ews.test_results = lambda: None
+        ews.bind_to_tool(MockTool())
+        expected_stderr = self._default_expected_stderr(ews)
+        expected_stderr["handle_script_error"] = "ScriptError error message\n\nMOCK output\n"
+        self.assert_queue_outputs(ews, expected_stderr=expected_stderr)
+
+    def test_builder_ewses(self):
+        self._test_builder_ews(MacEWS())
+        self._test_builder_ews(ChromiumWindowsEWS())
+        self._test_builder_ews(ChromiumAndroidEWS())
+        self._test_builder_ews(QtEWS())
+        self._test_builder_ews(QtWK2EWS())
+        self._test_builder_ews(GtkEWS())
+        self._test_builder_ews(EflEWS())
+
+    def test_testing_ewses(self):
+        self._test_testing_ews(ChromiumLinuxEWS())
diff --git a/Tools/Scripts/webkitpy/tool/commands/expectations.py b/Tools/Scripts/webkitpy/tool/commands/expectations.py
new file mode 100644
index 0000000..0e1050b
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/commands/expectations.py
@@ -0,0 +1,44 @@
+# Copyright (c) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from webkitpy.layout_tests.models.test_configuration import TestConfigurationConverter
+from webkitpy.layout_tests.models.test_expectations import TestExpectationParser
+from webkitpy.layout_tests.models.test_expectations import TestExpectations
+from webkitpy.tool.multicommandtool import AbstractDeclarativeCommand
+
+
+class OptimizeExpectations(AbstractDeclarativeCommand):
+    name = "optimize-expectations"
+    help_text = "Fixes simple style issues in test_expectations file.  (Currently works only for chromium port.)"
+
+    def execute(self, options, args, tool):
+        port = tool.port_factory.get("chromium-win-win7")  # FIXME: This should be selectable.
+        parser = TestExpectationParser(port, [], allow_rebaseline_modifier=False)
+        expectation_lines = parser.parse(port.test_expectations())
+        converter = TestConfigurationConverter(port.all_test_configurations(), port.configuration_specifier_macros())
+        tool.filesystem.write_text_file(port.path_to_test_expectations_file(), TestExpectations.list_to_string(expectation_lines, converter))
diff --git a/Tools/Scripts/webkitpy/tool/commands/findusers.py b/Tools/Scripts/webkitpy/tool/commands/findusers.py
new file mode 100644
index 0000000..4363c8c
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/commands/findusers.py
@@ -0,0 +1,44 @@
+# Copyright (c) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from webkitpy.tool.multicommandtool import AbstractDeclarativeCommand
+
+
+class FindUsers(AbstractDeclarativeCommand):
+    name = "find-users"
+    help_text = "Find users matching substring"
+
+    def execute(self, options, args, tool):
+        search_string = args[0]
+        login_userid_pairs = tool.bugs.queries.fetch_login_userid_pairs_matching_substring(search_string)
+        for (login, user_id) in login_userid_pairs:
+            user = tool.bugs.fetch_user(user_id)
+            groups_string = ", ".join(user['groups']) if user['groups'] else "none"
+            print "%s <%s> (%s) (%s)" % (user['name'], user['login'], user_id, groups_string)
+        else:
+            print "No users found matching '%s'" % search_string
diff --git a/Tools/Scripts/webkitpy/tool/commands/gardenomatic.py b/Tools/Scripts/webkitpy/tool/commands/gardenomatic.py
new file mode 100644
index 0000000..6cb1519
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/commands/gardenomatic.py
@@ -0,0 +1,59 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# 1.  Redistributions of source code must retain the above copyright
+#     notice, this list of conditions and the following disclaimer.
+# 2.  Redistributions in binary form must reproduce the above copyright
+#     notice, this list of conditions and the following disclaimer in the
+#     documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from webkitpy.layout_tests.port import builders
+from webkitpy.tool.commands.rebaseline import AbstractRebaseliningCommand
+from webkitpy.tool.servers.gardeningserver import GardeningHTTPServer
+
+
+class GardenOMatic(AbstractRebaseliningCommand):
+    name = "garden-o-matic"
+    help_text = "Command for gardening the WebKit tree."
+
+    def __init__(self):
+        return super(AbstractRebaseliningCommand, self).__init__(options=(self.platform_options + [
+            self.move_overwritten_baselines_option,
+            self.results_directory_option,
+            self.no_optimize_option,
+            ]))
+
+    def execute(self, options, args, tool):
+        print "This command runs a local HTTP server that changes your working copy"
+        print "based on the actions you take in the web-based UI."
+
+        args = {}
+        if options.platform:
+            # FIXME: This assumes that the port implementation (chromium-, gtk-, etc.) is the first part of options.platform.
+            args['platform'] = options.platform.split('-')[0]
+            builder = builders.builder_name_for_port_name(options.platform)
+            if builder:
+                args['builder'] = builder
+        if options.results_directory:
+            args['useLocalResults'] = "true"
+
+        httpd = GardeningHTTPServer(httpd_port=8127, config={'tool': tool, 'options': options})
+        self._tool.user.open_url(httpd.url(args))
+
+        print "Local HTTP server started."
+        httpd.serve_forever()
diff --git a/Tools/Scripts/webkitpy/tool/commands/openbugs.py b/Tools/Scripts/webkitpy/tool/commands/openbugs.py
new file mode 100644
index 0000000..1b51c9f
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/commands/openbugs.py
@@ -0,0 +1,63 @@
+# Copyright (c) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+# 
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+# 
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import re
+import sys
+
+from webkitpy.tool.multicommandtool import AbstractDeclarativeCommand
+from webkitpy.common.system.deprecated_logging import log
+
+
+class OpenBugs(AbstractDeclarativeCommand):
+    name = "open-bugs"
+    help_text = "Finds all bug numbers passed in arguments (or stdin if no args provided) and opens them in a web browser"
+
+    bug_number_regexp = re.compile(r"\b\d{4,6}\b")
+
+    def _open_bugs(self, bug_ids):
+        for bug_id in bug_ids:
+            bug_url = self._tool.bugs.bug_url_for_bug_id(bug_id)
+            self._tool.user.open_url(bug_url)
+
+    # _find_bugs_in_string mostly exists for easy unit testing.
+    def _find_bugs_in_string(self, string):
+        return self.bug_number_regexp.findall(string)
+
+    def _find_bugs_in_iterable(self, iterable):
+        return sum([self._find_bugs_in_string(string) for string in iterable], [])
+
+    def execute(self, options, args, tool):
+        if args:
+            bug_ids = self._find_bugs_in_iterable(args)
+        else:
+            # This won't open bugs until stdin is closed but could be made to easily.  That would just make unit testing slightly harder.
+            bug_ids = self._find_bugs_in_iterable(sys.stdin)
+
+        log("%s bugs found in input." % len(bug_ids))
+
+        self._open_bugs(bug_ids)
diff --git a/Tools/Scripts/webkitpy/tool/commands/openbugs_unittest.py b/Tools/Scripts/webkitpy/tool/commands/openbugs_unittest.py
new file mode 100644
index 0000000..40a6e1b
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/commands/openbugs_unittest.py
@@ -0,0 +1,50 @@
+# Copyright (C) 2009 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#    * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from webkitpy.tool.commands.commandtest import CommandsTest
+from webkitpy.tool.commands.openbugs import OpenBugs
+
+class OpenBugsTest(CommandsTest):
+
+    find_bugs_in_string_expectations = [
+        ["123", []],
+        ["1234", ["1234"]],
+        ["12345", ["12345"]],
+        ["123456", ["123456"]],
+        ["1234567", []],
+        [" 123456 234567", ["123456", "234567"]],
+    ]
+
+    def test_find_bugs_in_string(self):
+        openbugs = OpenBugs()
+        for expectation in self.find_bugs_in_string_expectations:
+            self.assertEquals(openbugs._find_bugs_in_string(expectation[0]), expectation[1])
+
+    def test_args_parsing(self):
+        expected_stderr = "2 bugs found in input.\nMOCK: user.open_url: http://example.com/12345\nMOCK: user.open_url: http://example.com/23456\n"
+        self.assert_execute_outputs(OpenBugs(), ["12345\n23456"], expected_stderr=expected_stderr)
diff --git a/Tools/Scripts/webkitpy/tool/commands/perfalizer.py b/Tools/Scripts/webkitpy/tool/commands/perfalizer.py
new file mode 100644
index 0000000..ae9f63a
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/commands/perfalizer.py
@@ -0,0 +1,215 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from webkitpy.common.system.deprecated_logging import log
+from webkitpy.tool.bot.expectedfailures import ExpectedFailures
+from webkitpy.tool.bot.irc_command import IRCCommand
+from webkitpy.tool.bot.irc_command import Help
+from webkitpy.tool.bot.irc_command import Hi
+from webkitpy.tool.bot.irc_command import Restart
+from webkitpy.tool.bot.ircbot import IRCBot
+from webkitpy.tool.bot.patchanalysistask import PatchAnalysisTask, PatchAnalysisTaskDelegate, UnableToApplyPatch
+from webkitpy.tool.bot.sheriff import Sheriff
+from webkitpy.tool.commands.queues import AbstractQueue
+from webkitpy.tool.commands.stepsequence import StepSequenceErrorHandler
+
+
+class PerfalizerTask(PatchAnalysisTask):
+    def __init__(self, tool, patch, logger):
+        PatchAnalysisTask.__init__(self, self, patch)
+        self._port = tool.port_factory.get()
+        self._tool = tool
+        self._logger = logger
+
+    def _copy_build_product_without_patch(self):
+        filesystem = self._tool.filesystem
+        configuration = filesystem.basename(self._port._build_path())
+        self._build_directory = filesystem.dirname(self._port._build_path())
+        self._build_directory_without_patch = self._build_directory + 'WithoutPatch'
+
+        try:
+            filesystem.rmtree(self._build_directory_without_patch)
+            filesystem.copytree(filesystem.join(self._build_directory, configuration),
+                filesystem.join(self._build_directory_without_patch, configuration))
+            return True
+        except:
+            return False
+
+    def run(self):
+        if not self._patch.committer() and not self._patch.attacher().can_commit:
+            self._logger('The patch %d is not authorized by a commmitter' % self._patch.id())
+            return False
+
+        self._logger('Preparing to run performance tests for the attachment %d...' % self._patch.id())
+        if not self._clean() or not self._update():
+            return False
+
+        head_revision = self._tool.scm().head_svn_revision()
+
+        self._logger('Building WebKit at r%s without the patch' % head_revision)
+        if not self._build_without_patch():
+            return False
+
+        if not self._port.check_build(needs_http=False):
+            self._logger('Failed to build DumpRenderTree.')
+            return False
+
+        if not self._copy_build_product_without_patch():
+            self._logger('Failed to copy the build product from %s to %s' % (self._build_directory, self._build_directory_without_patch))
+            return False
+
+        self._logger('Building WebKit at r%s with the patch' % head_revision)
+        if not self._apply() or not self._build():
+            return False
+
+        if not self._port.check_build(needs_http=False):
+            self._logger('Failed to build DumpRenderTree.')
+            return False
+
+        filesystem = self._tool.filesystem
+        if filesystem.exists(self._json_path()):
+            filesystem.remove(self._json_path())
+
+        self._logger("Running performance tests...")
+        if self._run_perf_test(self._build_directory_without_patch, 'without %d' % self._patch.id()) < 0:
+            self._logger('Failed to run performance tests without the patch.')
+            return False
+
+        if self._run_perf_test(self._build_directory, 'with %d' % self._patch.id()) < 0:
+            self._logger('Failed to run performance tests with the patch.')
+            return False
+
+        if not filesystem.exists(self._results_page_path()):
+            self._logger('Failed to generate the results page.')
+            return False
+
+        results_page = filesystem.read_text_file(self._results_page_path())
+        self._tool.bugs.add_attachment_to_bug(self._patch.bug_id(), results_page,
+            description="Performance tests results for %d" % self._patch.id(), mimetype='text/html')
+
+        self._logger("Uploaded the results on the bug %d" % self._patch.bug_id())
+        return True
+
+    def parent_command(self):
+        return "perfalizer"
+
+    def run_webkit_patch(self, args):
+        webkit_patch_args = [self._tool.path()]
+        webkit_patch_args.extend(args)
+        return self._tool.executive.run_and_throw_if_fail(webkit_patch_args, cwd=self._tool.scm().checkout_root)
+
+    def _json_path(self):
+        return self._tool.filesystem.join(self._build_directory, 'PerformanceTestResults.json')
+
+    def _results_page_path(self):
+        return self._tool.filesystem.join(self._build_directory, 'PerformanceTestResults.html')
+
+    def _run_perf_test(self, build_path, description):
+        filesystem = self._tool.filesystem
+        script_path = filesystem.join(filesystem.dirname(self._tool.path()), 'run-perf-tests')
+        perf_test_runner_args = [script_path, '--no-build', '--no-show-results', '--build-directory', build_path,
+            '--output-json-path', self._json_path(), '--description', description]
+        return self._tool.executive.run_and_throw_if_fail(perf_test_runner_args, cwd=self._tool.scm().checkout_root)
+
+    def run_command(self, command):
+        self.run_webkit_patch(command)
+
+    def command_passed(self, message, patch):
+        pass
+
+    def command_failed(self, message, script_error, patch):
+        self._logger(message)
+
+    def refetch_patch(self, patch):
+        return self._tool.bugs.fetch_attachment(patch.id())
+
+    def expected_failures(self):
+        return ExpectedFailures()
+
+    def build_style(self):
+        return "release"
+
+
+class PerfTest(IRCCommand):
+    def execute(self, nick, args, tool, sheriff):
+        if not args:
+            tool.irc().post(nick + ": Please specify an attachment/patch id")
+            return
+
+        patch_id = args[0]
+        patch = tool.bugs.fetch_attachment(patch_id)
+        if not patch:
+            tool.irc().post(nick + ": Could not fetch the patch")
+            return
+
+        task = PerfalizerTask(tool, patch, lambda message: tool.irc().post('%s: %s' % (nick, message)))
+        task.run()
+
+
+class Perfalizer(AbstractQueue, StepSequenceErrorHandler):
+    name = "perfalizer"
+    watchers = AbstractQueue.watchers + ["rniwa@webkit.org"]
+
+    _commands = {
+        "help": Help,
+        "hi": Hi,
+        "restart": Restart,
+        "test": PerfTest,
+    }
+
+    # AbstractQueue methods
+
+    def begin_work_queue(self):
+        AbstractQueue.begin_work_queue(self)
+        self._sheriff = Sheriff(self._tool, self)
+        self._irc_bot = IRCBot("perfalizer", self._tool, self._sheriff, self._commands)
+        self._tool.ensure_irc_connected(self._irc_bot.irc_delegate())
+
+    def work_item_log_path(self, failure_map):
+        return None
+
+    def _is_old_failure(self, revision):
+        return self._tool.status_server.svn_revision(revision)
+
+    def next_work_item(self):
+        self._irc_bot.process_pending_messages()
+        return
+
+    def process_work_item(self, failure_map):
+        return True
+
+    def handle_unexpected_error(self, failure_map, message):
+        log(message)
+
+    # StepSequenceErrorHandler methods
+
+    @classmethod
+    def handle_script_error(cls, tool, state, script_error):
+        # Ideally we would post some information to IRC about what went wrong
+        # here, but we don't have the IRC password in the child process.
+        pass
diff --git a/Tools/Scripts/webkitpy/tool/commands/perfalizer_unittest.py b/Tools/Scripts/webkitpy/tool/commands/perfalizer_unittest.py
new file mode 100644
index 0000000..feb7b05
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/commands/perfalizer_unittest.py
@@ -0,0 +1,112 @@
+# Copyright (C) 2012 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#    * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from webkitpy.common.net.buildbot import Builder
+from webkitpy.common.system.executive import ScriptError
+from webkitpy.common.system.outputcapture import OutputCapture
+from webkitpy.layout_tests.port.test import TestPort
+from webkitpy.tool.commands.perfalizer import PerfalizerTask
+from webkitpy.tool.mocktool import MockTool
+
+
+class PerfalizerTaskTest(unittest.TestCase):
+    def _create_and_run_perfalizer(self, commands_to_fail=[]):
+        tool = MockTool()
+        patch = tool.bugs.fetch_attachment(10000)
+
+        logs = []
+
+        def logger(message):
+            logs.append(message)
+
+        def run_webkit_patch(args):
+            if args[0] in commands_to_fail:
+                raise ScriptError
+
+        def run_perf_test(build_path, description):
+            self.assertTrue(description == 'without 10000' or description == 'with 10000')
+            if 'run-perf-tests' in commands_to_fail:
+                return -1
+            if 'results-page' not in commands_to_fail:
+                tool.filesystem.write_text_file(tool.filesystem.join(build_path, 'PerformanceTestResults.html'), 'results page')
+            return 0
+
+        perfalizer = PerfalizerTask(tool, patch, logger)
+        perfalizer._port = TestPort(tool)
+        perfalizer.run_webkit_patch = run_webkit_patch
+        perfalizer._run_perf_test = run_perf_test
+
+        capture = OutputCapture()
+        capture.capture_output()
+
+        if commands_to_fail:
+            self.assertFalse(perfalizer.run())
+        else:
+            self.assertTrue(perfalizer.run())
+
+        capture.restore_output()
+
+        return logs
+
+    def test_run(self):
+        self.assertEqual(self._create_and_run_perfalizer(), [
+            'Preparing to run performance tests for the attachment 10000...',
+            'Building WebKit at r1234 without the patch',
+            'Building WebKit at r1234 with the patch',
+            'Running performance tests...',
+            'Uploaded the results on the bug 50000'])
+
+    def test_run_with_clean_fails(self):
+        self.assertEqual(self._create_and_run_perfalizer(['clean']), [
+            'Preparing to run performance tests for the attachment 10000...',
+            'Unable to clean working directory'])
+
+    def test_run_with_update_fails(self):
+        logs = self._create_and_run_perfalizer(['update'])
+        self.assertEqual(len(logs), 2)
+        self.assertEqual(logs[-1], 'Unable to update working directory')
+
+    def test_run_with_build_fails(self):
+        logs = self._create_and_run_perfalizer(['build'])
+        self.assertEqual(len(logs), 3)
+
+    def test_run_with_build_fails(self):
+        logs = self._create_and_run_perfalizer(['apply-attachment'])
+        self.assertEqual(len(logs), 4)
+
+    def test_run_with_perf_test_fails(self):
+        logs = self._create_and_run_perfalizer(['run-perf-tests'])
+        self.assertEqual(len(logs), 5)
+        self.assertEqual(logs[-1], 'Failed to run performance tests without the patch.')
+
+    def test_run_without_results_page(self):
+        logs = self._create_and_run_perfalizer(['results-page'])
+        self.assertEqual(len(logs), 5)
+        self.assertEqual(logs[-1], 'Failed to generate the results page.')
diff --git a/Tools/Scripts/webkitpy/tool/commands/prettydiff.py b/Tools/Scripts/webkitpy/tool/commands/prettydiff.py
new file mode 100644
index 0000000..66a06a6
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/commands/prettydiff.py
@@ -0,0 +1,39 @@
+# Copyright (c) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from webkitpy.tool.commands.abstractsequencedcommand import AbstractSequencedCommand
+from webkitpy.tool import steps
+
+
+class PrettyDiff(AbstractSequencedCommand):
+    name = "pretty-diff"
+    help_text = "Shows the pretty diff in the default browser"
+    show_in_main_help = True
+    steps = [
+        steps.ConfirmDiff,
+    ]
diff --git a/Tools/Scripts/webkitpy/tool/commands/queries.py b/Tools/Scripts/webkitpy/tool/commands/queries.py
new file mode 100644
index 0000000..b7e4a85
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/commands/queries.py
@@ -0,0 +1,568 @@
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Copyright (c) 2009 Apple Inc. All rights reserved.
+# Copyright (c) 2012 Intel Corporation. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import fnmatch
+import re
+
+from datetime import datetime
+from optparse import make_option
+
+from webkitpy.tool import steps
+
+from webkitpy.common.checkout.commitinfo import CommitInfo
+from webkitpy.common.config.committers import CommitterList
+import webkitpy.common.config.urls as config_urls
+from webkitpy.common.net.buildbot import BuildBot
+from webkitpy.common.net.regressionwindow import RegressionWindow
+from webkitpy.common.system.crashlogs import CrashLogs
+from webkitpy.common.system.user import User
+from webkitpy.tool.grammar import pluralize
+from webkitpy.tool.multicommandtool import AbstractDeclarativeCommand
+from webkitpy.common.system.deprecated_logging import log
+from webkitpy.layout_tests.models.test_expectations import TestExpectations
+from webkitpy.layout_tests.port import platform_options, configuration_options
+
+
+class SuggestReviewers(AbstractDeclarativeCommand):
+    name = "suggest-reviewers"
+    help_text = "Suggest reviewers for a patch based on recent changes to the modified files."
+
+    def __init__(self):
+        options = [
+            steps.Options.git_commit,
+        ]
+        AbstractDeclarativeCommand.__init__(self, options=options)
+
+    def execute(self, options, args, tool):
+        reviewers = tool.checkout().suggested_reviewers(options.git_commit)
+        print "\n".join([reviewer.full_name for reviewer in reviewers])
+
+
+class BugsToCommit(AbstractDeclarativeCommand):
+    name = "bugs-to-commit"
+    help_text = "List bugs in the commit-queue"
+
+    def execute(self, options, args, tool):
+        # FIXME: This command is poorly named.  It's fetching the commit-queue list here.  The name implies it's fetching pending-commit (all r+'d patches).
+        bug_ids = tool.bugs.queries.fetch_bug_ids_from_commit_queue()
+        for bug_id in bug_ids:
+            print "%s" % bug_id
+
+
+class PatchesInCommitQueue(AbstractDeclarativeCommand):
+    name = "patches-in-commit-queue"
+    help_text = "List patches in the commit-queue"
+
+    def execute(self, options, args, tool):
+        patches = tool.bugs.queries.fetch_patches_from_commit_queue()
+        log("Patches in commit queue:")
+        for patch in patches:
+            print patch.url()
+
+
+class PatchesToCommitQueue(AbstractDeclarativeCommand):
+    name = "patches-to-commit-queue"
+    help_text = "Patches which should be added to the commit queue"
+    def __init__(self):
+        options = [
+            make_option("--bugs", action="store_true", dest="bugs", help="Output bug links instead of patch links"),
+        ]
+        AbstractDeclarativeCommand.__init__(self, options=options)
+
+    @staticmethod
+    def _needs_commit_queue(patch):
+        if patch.commit_queue() == "+": # If it's already cq+, ignore the patch.
+            log("%s already has cq=%s" % (patch.id(), patch.commit_queue()))
+            return False
+
+        # We only need to worry about patches from contributers who are not yet committers.
+        committer_record = CommitterList().committer_by_email(patch.attacher_email())
+        if committer_record:
+            log("%s committer = %s" % (patch.id(), committer_record))
+        return not committer_record
+
+    def execute(self, options, args, tool):
+        patches = tool.bugs.queries.fetch_patches_from_pending_commit_list()
+        patches_needing_cq = filter(self._needs_commit_queue, patches)
+        if options.bugs:
+            bugs_needing_cq = map(lambda patch: patch.bug_id(), patches_needing_cq)
+            bugs_needing_cq = sorted(set(bugs_needing_cq))
+            for bug_id in bugs_needing_cq:
+                print "%s" % tool.bugs.bug_url_for_bug_id(bug_id)
+        else:
+            for patch in patches_needing_cq:
+                print "%s" % tool.bugs.attachment_url_for_id(patch.id(), action="edit")
+
+
+class PatchesToReview(AbstractDeclarativeCommand):
+    name = "patches-to-review"
+    help_text = "List bugs which have attachments pending review"
+
+    def __init__(self):
+        options = [
+            make_option("--all", action="store_true",
+                        help="Show all bugs regardless of who is on CC (it might take a while)"),
+            make_option("--include-cq-denied", action="store_true",
+                        help="By default, r? patches with cq- are omitted unless this option is set"),
+            make_option("--cc-email",
+                        help="Specifies the email on the CC field (defaults to your bugzilla login email)"),
+        ]
+        AbstractDeclarativeCommand.__init__(self, options=options)
+
+    def _print_report(self, report, cc_email, print_all):
+        if print_all:
+            print "Bugs with attachments pending review:"
+        else:
+            print "Bugs with attachments pending review that has %s in the CC list:" % cc_email
+
+        print "http://webkit.org/b/bugid   Description (age in days)"
+        for row in report:
+            print "%s (%d)" % (row[1], row[0])
+
+        print "Total: %d" % len(report)
+
+    def _generate_report(self, bugs, include_cq_denied):
+        report = []
+
+        for bug in bugs:
+            patch = bug.unreviewed_patches()[-1]
+
+            if not include_cq_denied and patch.commit_queue() == "-":
+                continue
+
+            age_in_days = (datetime.today() - patch.attach_date()).days
+            report.append((age_in_days, "http://webkit.org/b/%-7s %s" % (bug.id(), bug.title())))
+
+        report.sort()
+        return report
+
+    def execute(self, options, args, tool):
+        tool.bugs.authenticate()
+
+        cc_email = options.cc_email
+        if not cc_email and not options.all:
+            cc_email = tool.bugs.username
+
+        bugs = tool.bugs.queries.fetch_bugs_from_review_queue(cc_email=cc_email)
+        report = self._generate_report(bugs, options.include_cq_denied)
+        self._print_report(report, cc_email, options.all)
+
+class WhatBroke(AbstractDeclarativeCommand):
+    name = "what-broke"
+    help_text = "Print failing buildbots (%s) and what revisions broke them" % config_urls.buildbot_url
+
+    def _print_builder_line(self, builder_name, max_name_width, status_message):
+        print "%s : %s" % (builder_name.ljust(max_name_width), status_message)
+
+    def _print_blame_information_for_builder(self, builder_status, name_width, avoid_flakey_tests=True):
+        builder = self._tool.buildbot.builder_with_name(builder_status["name"])
+        red_build = builder.build(builder_status["build_number"])
+        regression_window = builder.find_regression_window(red_build)
+        if not regression_window.failing_build():
+            self._print_builder_line(builder.name(), name_width, "FAIL (error loading build information)")
+            return
+        if not regression_window.build_before_failure():
+            self._print_builder_line(builder.name(), name_width, "FAIL (blame-list: sometime before %s?)" % regression_window.failing_build().revision())
+            return
+
+        revisions = regression_window.revisions()
+        first_failure_message = ""
+        if (regression_window.failing_build() == builder.build(builder_status["build_number"])):
+            first_failure_message = " FIRST FAILURE, possibly a flaky test"
+        self._print_builder_line(builder.name(), name_width, "FAIL (blame-list: %s%s)" % (revisions, first_failure_message))
+        for revision in revisions:
+            commit_info = self._tool.checkout().commit_info_for_revision(revision)
+            if commit_info:
+                print commit_info.blame_string(self._tool.bugs)
+            else:
+                print "FAILED to fetch CommitInfo for r%s, likely missing ChangeLog" % revision
+
+    def execute(self, options, args, tool):
+        builder_statuses = tool.buildbot.builder_statuses()
+        longest_builder_name = max(map(len, map(lambda builder: builder["name"], builder_statuses)))
+        failing_builders = 0
+        for builder_status in builder_statuses:
+            # If the builder is green, print OK, exit.
+            if builder_status["is_green"]:
+                continue
+            self._print_blame_information_for_builder(builder_status, name_width=longest_builder_name)
+            failing_builders += 1
+        if failing_builders:
+            print "%s of %s are failing" % (failing_builders, pluralize("builder", len(builder_statuses)))
+        else:
+            print "All builders are passing!"
+
+
+class ResultsFor(AbstractDeclarativeCommand):
+    name = "results-for"
+    help_text = "Print a list of failures for the passed revision from bots on %s" % config_urls.buildbot_url
+    argument_names = "REVISION"
+
+    def _print_layout_test_results(self, results):
+        if not results:
+            print " No results."
+            return
+        for title, files in results.parsed_results().items():
+            print " %s" % title
+            for filename in files:
+                print "  %s" % filename
+
+    def execute(self, options, args, tool):
+        builders = self._tool.buildbot.builders()
+        for builder in builders:
+            print "%s:" % builder.name()
+            build = builder.build_for_revision(args[0], allow_failed_lookups=True)
+            self._print_layout_test_results(build.layout_test_results())
+
+
+class FailureReason(AbstractDeclarativeCommand):
+    name = "failure-reason"
+    help_text = "Lists revisions where individual test failures started at %s" % config_urls.buildbot_url
+
+    def _blame_line_for_revision(self, revision):
+        try:
+            commit_info = self._tool.checkout().commit_info_for_revision(revision)
+        except Exception, e:
+            return "FAILED to fetch CommitInfo for r%s, exception: %s" % (revision, e)
+        if not commit_info:
+            return "FAILED to fetch CommitInfo for r%s, likely missing ChangeLog" % revision
+        return commit_info.blame_string(self._tool.bugs)
+
+    def _print_blame_information_for_transition(self, regression_window, failing_tests):
+        red_build = regression_window.failing_build()
+        print "SUCCESS: Build %s (r%s) was the first to show failures: %s" % (red_build._number, red_build.revision(), failing_tests)
+        print "Suspect revisions:"
+        for revision in regression_window.revisions():
+            print self._blame_line_for_revision(revision)
+
+    def _explain_failures_for_builder(self, builder, start_revision):
+        print "Examining failures for \"%s\", starting at r%s" % (builder.name(), start_revision)
+        revision_to_test = start_revision
+        build = builder.build_for_revision(revision_to_test, allow_failed_lookups=True)
+        layout_test_results = build.layout_test_results()
+        if not layout_test_results:
+            # FIXME: This could be made more user friendly.
+            print "Failed to load layout test results from %s; can't continue. (start revision = r%s)" % (build.results_url(), start_revision)
+            return 1
+
+        results_to_explain = set(layout_test_results.failing_tests())
+        last_build_with_results = build
+        print "Starting at %s" % revision_to_test
+        while results_to_explain:
+            revision_to_test -= 1
+            new_build = builder.build_for_revision(revision_to_test, allow_failed_lookups=True)
+            if not new_build:
+                print "No build for %s" % revision_to_test
+                continue
+            build = new_build
+            latest_results = build.layout_test_results()
+            if not latest_results:
+                print "No results build %s (r%s)" % (build._number, build.revision())
+                continue
+            failures = set(latest_results.failing_tests())
+            if len(failures) >= 20:
+                # FIXME: We may need to move this logic into the LayoutTestResults class.
+                # The buildbot stops runs after 20 failures so we don't have full results to work with here.
+                print "Too many failures in build %s (r%s), ignoring." % (build._number, build.revision())
+                continue
+            fixed_results = results_to_explain - failures
+            if not fixed_results:
+                print "No change in build %s (r%s), %s unexplained failures (%s in this build)" % (build._number, build.revision(), len(results_to_explain), len(failures))
+                last_build_with_results = build
+                continue
+            regression_window = RegressionWindow(build, last_build_with_results)
+            self._print_blame_information_for_transition(regression_window, fixed_results)
+            last_build_with_results = build
+            results_to_explain -= fixed_results
+        if results_to_explain:
+            print "Failed to explain failures: %s" % results_to_explain
+            return 1
+        print "Explained all results for %s" % builder.name()
+        return 0
+
+    def _builder_to_explain(self):
+        builder_statuses = self._tool.buildbot.builder_statuses()
+        red_statuses = [status for status in builder_statuses if not status["is_green"]]
+        print "%s failing" % (pluralize("builder", len(red_statuses)))
+        builder_choices = [status["name"] for status in red_statuses]
+        # We could offer an "All" choice here.
+        chosen_name = self._tool.user.prompt_with_list("Which builder to diagnose:", builder_choices)
+        # FIXME: prompt_with_list should really take a set of objects and a set of names and then return the object.
+        for status in red_statuses:
+            if status["name"] == chosen_name:
+                return (self._tool.buildbot.builder_with_name(chosen_name), status["built_revision"])
+
+    def execute(self, options, args, tool):
+        (builder, latest_revision) = self._builder_to_explain()
+        start_revision = self._tool.user.prompt("Revision to walk backwards from? [%s] " % latest_revision) or latest_revision
+        if not start_revision:
+            print "Revision required."
+            return 1
+        return self._explain_failures_for_builder(builder, start_revision=int(start_revision))
+
+
+class FindFlakyTests(AbstractDeclarativeCommand):
+    name = "find-flaky-tests"
+    help_text = "Lists tests that often fail for a single build at %s" % config_urls.buildbot_url
+
+    def _find_failures(self, builder, revision):
+        build = builder.build_for_revision(revision, allow_failed_lookups=True)
+        if not build:
+            print "No build for %s" % revision
+            return (None, None)
+        results = build.layout_test_results()
+        if not results:
+            print "No results build %s (r%s)" % (build._number, build.revision())
+            return (None, None)
+        failures = set(results.failing_tests())
+        if len(failures) >= 20:
+            # FIXME: We may need to move this logic into the LayoutTestResults class.
+            # The buildbot stops runs after 20 failures so we don't have full results to work with here.
+            print "Too many failures in build %s (r%s), ignoring." % (build._number, build.revision())
+            return (None, None)
+        return (build, failures)
+
+    def _increment_statistics(self, flaky_tests, flaky_test_statistics):
+        for test in flaky_tests:
+            count = flaky_test_statistics.get(test, 0)
+            flaky_test_statistics[test] = count + 1
+
+    def _print_statistics(self, statistics):
+        print "=== Results ==="
+        print "Occurances Test name"
+        for value, key in sorted([(value, key) for key, value in statistics.items()]):
+            print "%10d %s" % (value, key)
+
+    def _walk_backwards_from(self, builder, start_revision, limit):
+        flaky_test_statistics = {}
+        all_previous_failures = set([])
+        one_time_previous_failures = set([])
+        previous_build = None
+        for i in range(limit):
+            revision = start_revision - i
+            print "Analyzing %s ... " % revision,
+            (build, failures) = self._find_failures(builder, revision)
+            if failures == None:
+                # Notice that we don't loop on the empty set!
+                continue
+            print "has %s failures" % len(failures)
+            flaky_tests = one_time_previous_failures - failures
+            if flaky_tests:
+                print "Flaky tests: %s %s" % (sorted(flaky_tests),
+                                              previous_build.results_url())
+            self._increment_statistics(flaky_tests, flaky_test_statistics)
+            one_time_previous_failures = failures - all_previous_failures
+            all_previous_failures = failures
+            previous_build = build
+        self._print_statistics(flaky_test_statistics)
+
+    def _builder_to_analyze(self):
+        statuses = self._tool.buildbot.builder_statuses()
+        choices = [status["name"] for status in statuses]
+        chosen_name = self._tool.user.prompt_with_list("Which builder to analyze:", choices)
+        for status in statuses:
+            if status["name"] == chosen_name:
+                return (self._tool.buildbot.builder_with_name(chosen_name), status["built_revision"])
+
+    def execute(self, options, args, tool):
+        (builder, latest_revision) = self._builder_to_analyze()
+        limit = self._tool.user.prompt("How many revisions to look through? [10000] ") or 10000
+        return self._walk_backwards_from(builder, latest_revision, limit=int(limit))
+
+
+class TreeStatus(AbstractDeclarativeCommand):
+    name = "tree-status"
+    help_text = "Print the status of the %s buildbots" % config_urls.buildbot_url
+    long_help = """Fetches build status from http://build.webkit.org/one_box_per_builder
+and displayes the status of each builder."""
+
+    def execute(self, options, args, tool):
+        for builder in tool.buildbot.builder_statuses():
+            status_string = "ok" if builder["is_green"] else "FAIL"
+            print "%s : %s" % (status_string.ljust(4), builder["name"])
+
+
+class CrashLog(AbstractDeclarativeCommand):
+    name = "crash-log"
+    help_text = "Print the newest crash log for the given process"
+    long_help = """Finds the newest crash log matching the given process name
+and PID and prints it to stdout."""
+    argument_names = "PROCESS_NAME [PID]"
+
+    def execute(self, options, args, tool):
+        crash_logs = CrashLogs(tool)
+        pid = None
+        if len(args) > 1:
+            pid = int(args[1])
+        print crash_logs.find_newest_log(args[0], pid)
+
+
+class PrintExpectations(AbstractDeclarativeCommand):
+    name = 'print-expectations'
+    help_text = 'Print the expected result for the given test(s) on the given port(s)'
+
+    def __init__(self):
+        options = [
+            make_option('--all', action='store_true', default=False,
+                        help='display the expectations for *all* tests'),
+            make_option('-x', '--exclude-keyword', action='append', default=[],
+                        help='limit to tests not matching the given keyword (for example, "skip", "slow", or "crash". May specify multiple times'),
+            make_option('-i', '--include-keyword', action='append', default=[],
+                        help='limit to tests with the given keyword (for example, "skip", "slow", or "crash". May specify multiple times'),
+            make_option('--csv', action='store_true', default=False,
+                        help='Print a CSV-style report that includes the port name, modifiers, tests, and expectations'),
+            make_option('-f', '--full', action='store_true', default=False,
+                        help='Print a full TestExpectations-style line for every match'),
+        ] + platform_options(use_globs=True)
+
+        AbstractDeclarativeCommand.__init__(self, options=options)
+        self._expectation_models = {}
+
+    def execute(self, options, args, tool):
+        if not args and not options.all:
+            print "You must either specify one or more test paths or --all."
+            return
+
+        if options.platform:
+            port_names = fnmatch.filter(tool.port_factory.all_port_names(), options.platform)
+            if not port_names:
+                default_port = tool.port_factory.get(options.platform)
+                if default_port:
+                    port_names = [default_port.name()]
+                else:
+                    print "No port names match '%s'" % options.platform
+                    return
+            else:
+                default_port = tool.port_factory.get(port_names[0])
+        else:
+            default_port = tool.port_factory.get(options=options)
+            port_names = [default_port.name()]
+
+        tests = default_port.tests(args)
+        for port_name in port_names:
+            model = self._model(options, port_name, tests)
+            tests_to_print = self._filter_tests(options, model, tests)
+            lines = [model.get_expectation_line(test) for test in sorted(tests_to_print)]
+            if port_name != port_names[0]:
+                print
+            print '\n'.join(self._format_lines(options, port_name, lines))
+
+    def _filter_tests(self, options, model, tests):
+        filtered_tests = set()
+        if options.include_keyword:
+            for keyword in options.include_keyword:
+                filtered_tests.update(model.get_test_set_for_keyword(keyword))
+        else:
+            filtered_tests = tests
+
+        for keyword in options.exclude_keyword:
+            filtered_tests.difference_update(model.get_test_set_for_keyword(keyword))
+        return filtered_tests
+
+    def _format_lines(self, options, port_name, lines):
+        output = []
+        if options.csv:
+            for line in lines:
+                output.append("%s,%s" % (port_name, line.to_csv()))
+        elif lines:
+            include_modifiers = options.full
+            include_expectations = options.full or len(options.include_keyword) != 1 or len(options.exclude_keyword)
+            output.append("// For %s" % port_name)
+            for line in lines:
+                output.append("%s" % line.to_string(None, include_modifiers, include_expectations, include_comment=False))
+        return output
+
+    def _model(self, options, port_name, tests):
+        port = self._tool.port_factory.get(port_name, options)
+        expectations_path = port.path_to_test_expectations_file()
+        if not expectations_path in self._expectation_models:
+            self._expectation_models[expectations_path] = TestExpectations(port, tests).model()
+        return self._expectation_models[expectations_path]
+
+
+class PrintBaselines(AbstractDeclarativeCommand):
+    name = 'print-baselines'
+    help_text = 'Prints the baseline locations for given test(s) on the given port(s)'
+
+    def __init__(self):
+        options = [
+            make_option('--all', action='store_true', default=False,
+                        help='display the baselines for *all* tests'),
+            make_option('--csv', action='store_true', default=False,
+                        help='Print a CSV-style report that includes the port name, test_name, test platform, baseline type, baseline location, and baseline platform'),
+            make_option('--include-virtual-tests', action='store_true',
+                        help='Include virtual tests'),
+        ] + platform_options(use_globs=True)
+        AbstractDeclarativeCommand.__init__(self, options=options)
+        self._platform_regexp = re.compile('platform/([^\/]+)/(.+)')
+
+    def execute(self, options, args, tool):
+        if not args and not options.all:
+            print "You must either specify one or more test paths or --all."
+            return
+
+        default_port = tool.port_factory.get()
+        if options.platform:
+            port_names = fnmatch.filter(tool.port_factory.all_port_names(), options.platform)
+            if not port_names:
+                print "No port names match '%s'" % options.platform
+        else:
+            port_names = [default_port.name()]
+
+        if options.include_virtual_tests:
+            tests = sorted(default_port.tests(args))
+        else:
+            # FIXME: make real_tests() a public method.
+            tests = sorted(default_port._real_tests(args))
+
+        for port_name in port_names:
+            if port_name != port_names[0]:
+                print
+            if not options.csv:
+                print "// For %s" % port_name
+            port = tool.port_factory.get(port_name)
+            for test_name in tests:
+                self._print_baselines(options, port_name, test_name, port.expected_baselines_by_extension(test_name))
+
+    def _print_baselines(self, options, port_name, test_name, baselines):
+        for extension in sorted(baselines.keys()):
+            baseline_location = baselines[extension]
+            if baseline_location:
+                if options.csv:
+                    print "%s,%s,%s,%s,%s,%s" % (port_name, test_name, self._platform_for_path(test_name),
+                                                 extension[1:], baseline_location, self._platform_for_path(baseline_location))
+                else:
+                    print baseline_location
+
+    def _platform_for_path(self, relpath):
+        platform_matchobj = self._platform_regexp.match(relpath)
+        if platform_matchobj:
+            return platform_matchobj.group(1)
+        return None
diff --git a/Tools/Scripts/webkitpy/tool/commands/queries_unittest.py b/Tools/Scripts/webkitpy/tool/commands/queries_unittest.py
new file mode 100644
index 0000000..79bf1ca
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/commands/queries_unittest.py
@@ -0,0 +1,284 @@
+# Copyright (C) 2009 Google Inc. All rights reserved.
+# Copyright (C) 2012 Intel Corporation. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#    * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from webkitpy.common.system.outputcapture import OutputCapture
+from webkitpy.common.net.bugzilla import Bugzilla
+from webkitpy.common.system.outputcapture import OutputCapture
+from webkitpy.thirdparty.mock import Mock
+from webkitpy.layout_tests.port.test import TestPort
+from webkitpy.tool.commands.commandtest import CommandsTest
+from webkitpy.tool.commands.queries import *
+from webkitpy.tool.mocktool import MockTool, MockOptions
+
+
+class MockTestPort1(object):
+    def skips_layout_test(self, test_name):
+        return test_name in ["media/foo/bar.html", "foo"]
+
+
+class MockTestPort2(object):
+    def skips_layout_test(self, test_name):
+        return test_name == "media/foo/bar.html"
+
+
+class MockPortFactory(object):
+    def __init__(self):
+        self._all_ports = {
+            "test_port1": MockTestPort1(),
+            "test_port2": MockTestPort2(),
+        }
+
+    def all_port_names(self, options=None):
+        return self._all_ports.keys()
+
+    def get(self, port_name):
+        return self._all_ports.get(port_name)
+
+
+class QueryCommandsTest(CommandsTest):
+    def test_bugs_to_commit(self):
+        expected_stderr = "Warning, attachment 10001 on bug 50000 has invalid committer (non-committer@example.com)\n"
+        self.assert_execute_outputs(BugsToCommit(), None, "50000\n50003\n", expected_stderr)
+
+    def test_patches_in_commit_queue(self):
+        expected_stdout = "http://example.com/10000\nhttp://example.com/10002\n"
+        expected_stderr = "Warning, attachment 10001 on bug 50000 has invalid committer (non-committer@example.com)\nPatches in commit queue:\n"
+        self.assert_execute_outputs(PatchesInCommitQueue(), None, expected_stdout, expected_stderr)
+
+    def test_patches_to_commit_queue(self):
+        expected_stdout = "http://example.com/10003&action=edit\n"
+        expected_stderr = "10000 already has cq=+\n10001 already has cq=+\n10004 committer = \"Eric Seidel\" <eric@webkit.org>\n"
+        options = Mock()
+        options.bugs = False
+        self.assert_execute_outputs(PatchesToCommitQueue(), None, expected_stdout, expected_stderr, options=options)
+
+        expected_stdout = "http://example.com/50003\n"
+        options.bugs = True
+        self.assert_execute_outputs(PatchesToCommitQueue(), None, expected_stdout, expected_stderr, options=options)
+
+    def test_patches_to_review(self):
+        options = Mock()
+
+        # When no cc_email is provided, we use the Bugzilla username by default.
+        # The MockBugzilla will fake the authentication using username@webkit.org
+        # as login and it should match the username at the report header.
+        options.cc_email = None
+        options.include_cq_denied = False
+        options.all = False
+        expected_stdout = \
+            "Bugs with attachments pending review that has username@webkit.org in the CC list:\n" \
+            "http://webkit.org/b/bugid   Description (age in days)\n" \
+            "Total: 0\n"
+        expected_stderr = ""
+        self.assert_execute_outputs(PatchesToReview(), None, expected_stdout, expected_stderr, options=options)
+
+        options.cc_email = "abarth@webkit.org"
+        options.include_cq_denied = True
+        options.all = False
+        expected_stdout = \
+            "Bugs with attachments pending review that has abarth@webkit.org in the CC list:\n" \
+            "http://webkit.org/b/bugid   Description (age in days)\n" \
+            "http://webkit.org/b/50001   Bug with a patch needing review. (0)\n" \
+            "Total: 1\n"
+        expected_stderr = ""
+        self.assert_execute_outputs(PatchesToReview(), None, expected_stdout, expected_stderr, options=options)
+
+        options.cc_email = None
+        options.include_cq_denied = True
+        options.all = True
+        expected_stdout = \
+            "Bugs with attachments pending review:\n" \
+            "http://webkit.org/b/bugid   Description (age in days)\n" \
+            "http://webkit.org/b/50001   Bug with a patch needing review. (0)\n" \
+            "Total: 1\n"
+        self.assert_execute_outputs(PatchesToReview(), None, expected_stdout, expected_stderr, options=options)
+
+        options.cc_email = None
+        options.include_cq_denied = False
+        options.all = True
+        expected_stdout = \
+            "Bugs with attachments pending review:\n" \
+            "http://webkit.org/b/bugid   Description (age in days)\n" \
+            "Total: 0\n"
+        self.assert_execute_outputs(PatchesToReview(), None, expected_stdout, expected_stderr, options=options)
+
+        options.cc_email = "invalid_email@example.com"
+        options.all = False
+        options.include_cq_denied = True
+        expected_stdout = \
+            "Bugs with attachments pending review that has invalid_email@example.com in the CC list:\n" \
+            "http://webkit.org/b/bugid   Description (age in days)\n" \
+            "Total: 0\n"
+        self.assert_execute_outputs(PatchesToReview(), None, expected_stdout, expected_stderr, options=options)
+
+    def test_tree_status(self):
+        expected_stdout = "ok   : Builder1\nok   : Builder2\n"
+        self.assert_execute_outputs(TreeStatus(), None, expected_stdout)
+
+
+class FailureReasonTest(unittest.TestCase):
+    def test_blame_line_for_revision(self):
+        tool = MockTool()
+        command = FailureReason()
+        command.bind_to_tool(tool)
+        # This is an artificial example, mostly to test the CommitInfo lookup failure case.
+        self.assertEquals(command._blame_line_for_revision(0), "FAILED to fetch CommitInfo for r0, likely missing ChangeLog")
+
+        def raising_mock(self):
+            raise Exception("MESSAGE")
+        tool.checkout().commit_info_for_revision = raising_mock
+        self.assertEquals(command._blame_line_for_revision(0), "FAILED to fetch CommitInfo for r0, exception: MESSAGE")
+
+
+class PrintExpectationsTest(unittest.TestCase):
+    def run_test(self, tests, expected_stdout, platform='test-win-xp', **args):
+        options = MockOptions(all=False, csv=False, full=False, platform=platform,
+                              include_keyword=[], exclude_keyword=[]).update(**args)
+        tool = MockTool()
+        tool.port_factory.all_port_names = lambda: TestPort.ALL_BASELINE_VARIANTS
+        command = PrintExpectations()
+        command.bind_to_tool(tool)
+
+        oc = OutputCapture()
+        try:
+            oc.capture_output()
+            command.execute(options, tests, tool)
+        finally:
+            stdout, _, _ = oc.restore_output()
+        self.assertEquals(stdout, expected_stdout)
+
+    def test_basic(self):
+        self.run_test(['failures/expected/text.html', 'failures/expected/image.html'],
+                      ('// For test-win-xp\n'
+                       'failures/expected/image.html [ ImageOnlyFailure ]\n'
+                       'failures/expected/text.html [ Failure ]\n'))
+
+    def test_multiple(self):
+        self.run_test(['failures/expected/text.html', 'failures/expected/image.html'],
+                      ('// For test-win-vista\n'
+                       'failures/expected/image.html [ ImageOnlyFailure ]\n'
+                       'failures/expected/text.html [ Failure ]\n'
+                       '\n'
+                       '// For test-win-win7\n'
+                       'failures/expected/image.html [ ImageOnlyFailure ]\n'
+                       'failures/expected/text.html [ Failure ]\n'
+                       '\n'
+                       '// For test-win-xp\n'
+                       'failures/expected/image.html [ ImageOnlyFailure ]\n'
+                       'failures/expected/text.html [ Failure ]\n'),
+                       platform='test-win-*')
+
+    def test_full(self):
+        self.run_test(['failures/expected/text.html', 'failures/expected/image.html'],
+                      ('// For test-win-xp\n'
+                       'Bug(test) failures/expected/image.html [ ImageOnlyFailure ]\n'
+                       'Bug(test) failures/expected/text.html [ Failure ]\n'),
+                      full=True)
+
+    def test_exclude(self):
+        self.run_test(['failures/expected/text.html', 'failures/expected/image.html'],
+                      ('// For test-win-xp\n'
+                       'failures/expected/text.html [ Failure ]\n'),
+                      exclude_keyword=['image'])
+
+    def test_include(self):
+        self.run_test(['failures/expected/text.html', 'failures/expected/image.html'],
+                      ('// For test-win-xp\n'
+                       'failures/expected/image.html\n'),
+                      include_keyword=['image'])
+
+    def test_csv(self):
+        self.run_test(['failures/expected/text.html', 'failures/expected/image.html'],
+                      ('test-win-xp,failures/expected/image.html,BUGTEST,IMAGE\n'
+                       'test-win-xp,failures/expected/text.html,BUGTEST,FAIL\n'),
+                      csv=True)
+
+
+class PrintBaselinesTest(unittest.TestCase):
+    def setUp(self):
+        self.oc = None
+        self.tool = MockTool()
+        self.test_port = self.tool.port_factory.get('test-win-xp')
+        self.tool.port_factory.get = lambda port_name=None: self.test_port
+        self.tool.port_factory.all_port_names = lambda: TestPort.ALL_BASELINE_VARIANTS
+
+    def tearDown(self):
+        if self.oc:
+            self.restore_output()
+
+    def capture_output(self):
+        self.oc = OutputCapture()
+        self.oc.capture_output()
+
+    def restore_output(self):
+        stdout, stderr, logs = self.oc.restore_output()
+        self.oc = None
+        return (stdout, stderr, logs)
+
+    def test_basic(self):
+        command = PrintBaselines()
+        command.bind_to_tool(self.tool)
+        self.capture_output()
+        command.execute(MockOptions(all=False, include_virtual_tests=False, csv=False, platform=None), ['passes/text.html'], self.tool)
+        stdout, _, _ = self.restore_output()
+        self.assertEquals(stdout,
+                          ('// For test-win-xp\n'
+                           'passes/text-expected.png\n'
+                           'passes/text-expected.txt\n'))
+
+    def test_multiple(self):
+        command = PrintBaselines()
+        command.bind_to_tool(self.tool)
+        self.capture_output()
+        command.execute(MockOptions(all=False, include_virtual_tests=False, csv=False, platform='test-win-*'), ['passes/text.html'], self.tool)
+        stdout, _, _ = self.restore_output()
+        self.assertEquals(stdout,
+                          ('// For test-win-vista\n'
+                           'passes/text-expected.png\n'
+                           'passes/text-expected.txt\n'
+                           '\n'
+                           '// For test-win-win7\n'
+                           'passes/text-expected.png\n'
+                           'passes/text-expected.txt\n'
+                           '\n'
+                           '// For test-win-xp\n'
+                           'passes/text-expected.png\n'
+                           'passes/text-expected.txt\n'))
+
+    def test_csv(self):
+        command = PrintBaselines()
+        command.bind_to_tool(self.tool)
+        self.capture_output()
+        command.execute(MockOptions(all=False, platform='*xp', csv=True, include_virtual_tests=False), ['passes/text.html'], self.tool)
+        stdout, _, _ = self.restore_output()
+        self.assertEquals(stdout,
+                          ('test-win-xp,passes/text.html,None,png,passes/text-expected.png,None\n'
+                           'test-win-xp,passes/text.html,None,txt,passes/text-expected.txt,None\n'))
diff --git a/Tools/Scripts/webkitpy/tool/commands/queues.py b/Tools/Scripts/webkitpy/tool/commands/queues.py
new file mode 100644
index 0000000..6993c2d
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/commands/queues.py
@@ -0,0 +1,455 @@
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Copyright (c) 2009 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+# 
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+# 
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import codecs
+import os
+import sys
+import time
+import traceback
+
+from datetime import datetime
+from optparse import make_option
+from StringIO import StringIO
+
+from webkitpy.common.config.committervalidator import CommitterValidator
+from webkitpy.common.config.ports import DeprecatedPort
+from webkitpy.common.net.bugzilla import Attachment
+from webkitpy.common.net.statusserver import StatusServer
+from webkitpy.common.system.deprecated_logging import error, log
+from webkitpy.common.system.executive import ScriptError
+from webkitpy.tool.bot.botinfo import BotInfo
+from webkitpy.tool.bot.commitqueuetask import CommitQueueTask, CommitQueueTaskDelegate
+from webkitpy.tool.bot.expectedfailures import ExpectedFailures
+from webkitpy.tool.bot.feeders import CommitQueueFeeder, EWSFeeder
+from webkitpy.tool.bot.flakytestreporter import FlakyTestReporter
+from webkitpy.tool.bot.layouttestresultsreader import LayoutTestResultsReader
+from webkitpy.tool.bot.patchanalysistask import UnableToApplyPatch
+from webkitpy.tool.bot.queueengine import QueueEngine, QueueEngineDelegate
+from webkitpy.tool.bot.stylequeuetask import StyleQueueTask, StyleQueueTaskDelegate
+from webkitpy.tool.commands.stepsequence import StepSequenceErrorHandler
+from webkitpy.tool.multicommandtool import Command, TryAgain
+
+
+class AbstractQueue(Command, QueueEngineDelegate):
+    watchers = [
+    ]
+
+    _pass_status = "Pass"
+    _fail_status = "Fail"
+    _retry_status = "Retry"
+    _error_status = "Error"
+
+    def __init__(self, options=None): # Default values should never be collections (like []) as default values are shared between invocations
+        options_list = (options or []) + [
+            make_option("--no-confirm", action="store_false", dest="confirm", default=True, help="Do not ask the user for confirmation before running the queue.  Dangerous!"),
+            make_option("--exit-after-iteration", action="store", type="int", dest="iterations", default=None, help="Stop running the queue after iterating this number of times."),
+        ]
+        Command.__init__(self, "Run the %s" % self.name, options=options_list)
+        self._iteration_count = 0
+
+    def _cc_watchers(self, bug_id):
+        try:
+            self._tool.bugs.add_cc_to_bug(bug_id, self.watchers)
+        except Exception, e:
+            traceback.print_exc()
+            log("Failed to CC watchers.")
+
+    def run_webkit_patch(self, args):
+        webkit_patch_args = [self._tool.path()]
+        # FIXME: This is a hack, we should have a more general way to pass global options.
+        # FIXME: We must always pass global options and their value in one argument
+        # because our global option code looks for the first argument which does
+        # not begin with "-" and assumes that is the command name.
+        webkit_patch_args += ["--status-host=%s" % self._tool.status_server.host]
+        if self._tool.status_server.bot_id:
+            webkit_patch_args += ["--bot-id=%s" % self._tool.status_server.bot_id]
+        if self._options.port:
+            webkit_patch_args += ["--port=%s" % self._options.port]
+        webkit_patch_args.extend(args)
+        # FIXME: There is probably no reason to use run_and_throw_if_fail anymore.
+        # run_and_throw_if_fail was invented to support tee'd output
+        # (where we write both to a log file and to the console at once),
+        # but the queues don't need live-progress, a dump-of-output at the
+        # end should be sufficient.
+        return self._tool.executive.run_and_throw_if_fail(webkit_patch_args, cwd=self._tool.scm().checkout_root)
+
+    def _log_directory(self):
+        return os.path.join("..", "%s-logs" % self.name)
+
+    # QueueEngineDelegate methods
+
+    def queue_log_path(self):
+        return os.path.join(self._log_directory(), "%s.log" % self.name)
+
+    def work_item_log_path(self, work_item):
+        raise NotImplementedError, "subclasses must implement"
+
+    def begin_work_queue(self):
+        log("CAUTION: %s will discard all local changes in \"%s\"" % (self.name, self._tool.scm().checkout_root))
+        if self._options.confirm:
+            response = self._tool.user.prompt("Are you sure?  Type \"yes\" to continue: ")
+            if (response != "yes"):
+                error("User declined.")
+        log("Running WebKit %s." % self.name)
+        self._tool.status_server.update_status(self.name, "Starting Queue")
+
+    def stop_work_queue(self, reason):
+        self._tool.status_server.update_status(self.name, "Stopping Queue, reason: %s" % reason)
+
+    def should_continue_work_queue(self):
+        self._iteration_count += 1
+        return not self._options.iterations or self._iteration_count <= self._options.iterations
+
+    def next_work_item(self):
+        raise NotImplementedError, "subclasses must implement"
+
+    def process_work_item(self, work_item):
+        raise NotImplementedError, "subclasses must implement"
+
+    def handle_unexpected_error(self, work_item, message):
+        raise NotImplementedError, "subclasses must implement"
+
+    # Command methods
+
+    def execute(self, options, args, tool, engine=QueueEngine):
+        self._options = options # FIXME: This code is wrong.  Command.options is a list, this assumes an Options element!
+        self._tool = tool  # FIXME: This code is wrong too!  Command.bind_to_tool handles this!
+        return engine(self.name, self, self._tool.wakeup_event).run()
+
+    @classmethod
+    def _log_from_script_error_for_upload(cls, script_error, output_limit=None):
+        # We have seen request timeouts with app engine due to large
+        # log uploads.  Trying only the last 512k.
+        if not output_limit:
+            output_limit = 512 * 1024  # 512k
+        output = script_error.message_with_output(output_limit=output_limit)
+        # We pre-encode the string to a byte array before passing it
+        # to status_server, because ClientForm (part of mechanize)
+        # wants a file-like object with pre-encoded data.
+        return StringIO(output.encode("utf-8"))
+
+    @classmethod
+    def _update_status_for_script_error(cls, tool, state, script_error, is_error=False):
+        message = str(script_error)
+        if is_error:
+            message = "Error: %s" % message
+        failure_log = cls._log_from_script_error_for_upload(script_error)
+        return tool.status_server.update_status(cls.name, message, state["patch"], failure_log)
+
+
+class FeederQueue(AbstractQueue):
+    name = "feeder-queue"
+
+    _sleep_duration = 30  # seconds
+
+    # AbstractQueue methods
+
+    def begin_work_queue(self):
+        AbstractQueue.begin_work_queue(self)
+        self.feeders = [
+            CommitQueueFeeder(self._tool),
+            EWSFeeder(self._tool),
+        ]
+
+    def next_work_item(self):
+        # This really show inherit from some more basic class that doesn't
+        # understand work items, but the base class in the heirarchy currently
+        # understands work items.
+        return "synthetic-work-item"
+
+    def process_work_item(self, work_item):
+        for feeder in self.feeders:
+            feeder.feed()
+        time.sleep(self._sleep_duration)
+        return True
+
+    def work_item_log_path(self, work_item):
+        return None
+
+    def handle_unexpected_error(self, work_item, message):
+        log(message)
+
+
+class AbstractPatchQueue(AbstractQueue):
+    def _update_status(self, message, patch=None, results_file=None):
+        return self._tool.status_server.update_status(self.name, message, patch, results_file)
+
+    def _next_patch(self):
+        # FIXME: Bugzilla accessibility should be checked here; if it's unaccessible,
+        # it should return None.
+        patch = None
+        while not patch:
+            patch_id = self._tool.status_server.next_work_item(self.name)
+            if not patch_id:
+                return None
+            patch = self._tool.bugs.fetch_attachment(patch_id)
+            if not patch:
+                # FIXME: Using a fake patch because release_work_item has the wrong API.
+                # We also don't really need to release the lock (although that's fine),
+                # mostly we just need to remove this bogus patch from our queue.
+                # If for some reason bugzilla is just down, then it will be re-fed later.
+                fake_patch = Attachment({'id': patch_id}, None)
+                self._release_work_item(fake_patch)
+        return patch
+
+    def _release_work_item(self, patch):
+        self._tool.status_server.release_work_item(self.name, patch)
+
+    def _did_pass(self, patch):
+        self._update_status(self._pass_status, patch)
+        self._release_work_item(patch)
+
+    def _did_fail(self, patch):
+        self._update_status(self._fail_status, patch)
+        self._release_work_item(patch)
+
+    def _did_retry(self, patch):
+        self._update_status(self._retry_status, patch)
+        self._release_work_item(patch)
+
+    def _did_error(self, patch, reason):
+        message = "%s: %s" % (self._error_status, reason)
+        self._update_status(message, patch)
+        self._release_work_item(patch)
+
+    # FIXME: This probably belongs at a layer below AbstractPatchQueue, but shared by CommitQueue and the EarlyWarningSystem.
+    def _upload_results_archive_for_patch(self, patch, results_archive_zip):
+        bot_id = self._tool.status_server.bot_id or "bot"
+        description = "Archive of layout-test-results from %s" % bot_id
+        # results_archive is a ZipFile object, grab the File object (.fp) to pass to Mechanize for uploading.
+        results_archive_file = results_archive_zip.fp
+        # Rewind the file object to start (since Mechanize won't do that automatically)
+        # See https://bugs.webkit.org/show_bug.cgi?id=54593
+        results_archive_file.seek(0)
+        # FIXME: This is a small lie to always say run-webkit-tests since Chromium uses new-run-webkit-tests.
+        # We could make this code look up the test script name off the port.
+        comment_text = "The attached test failures were seen while running run-webkit-tests on the %s.\n" % (self.name)
+        # FIXME: We could easily list the test failures from the archive here,
+        # currently callers do that separately.
+        comment_text += BotInfo(self._tool).summary_text()
+        self._tool.bugs.add_attachment_to_bug(patch.bug_id(), results_archive_file, description, filename="layout-test-results.zip", comment_text=comment_text)
+
+    def work_item_log_path(self, patch):
+        return os.path.join(self._log_directory(), "%s.log" % patch.bug_id())
+
+
+class CommitQueue(AbstractPatchQueue, StepSequenceErrorHandler, CommitQueueTaskDelegate):
+    name = "commit-queue"
+    port_name = "chromium-xvfb"
+
+    def __init__(self):
+        AbstractPatchQueue.__init__(self)
+        self.port = DeprecatedPort.port(self.port_name)
+
+    # AbstractPatchQueue methods
+
+    def begin_work_queue(self):
+        # FIXME: This violates abstraction
+        self._tool._deprecated_port = self.port
+        AbstractPatchQueue.begin_work_queue(self)
+        self.committer_validator = CommitterValidator(self._tool)
+        self._expected_failures = ExpectedFailures()
+        self._layout_test_results_reader = LayoutTestResultsReader(self._tool, self._log_directory())
+
+    def next_work_item(self):
+        return self._next_patch()
+
+    def process_work_item(self, patch):
+        self._cc_watchers(patch.bug_id())
+        task = CommitQueueTask(self, patch)
+        try:
+            if task.run():
+                self._did_pass(patch)
+                return True
+            self._did_retry(patch)
+        except ScriptError, e:
+            validator = CommitterValidator(self._tool)
+            validator.reject_patch_from_commit_queue(patch.id(), self._error_message_for_bug(task, patch, e))
+            results_archive = task.results_archive_from_patch_test_run(patch)
+            if results_archive:
+                self._upload_results_archive_for_patch(patch, results_archive)
+            self._did_fail(patch)
+
+    def _failing_tests_message(self, task, patch):
+        results = task.results_from_patch_test_run(patch)
+        unexpected_failures = self._expected_failures.unexpected_failures_observed(results)
+        if not unexpected_failures:
+            return None
+        return "New failing tests:\n%s" % "\n".join(unexpected_failures)
+
+    def _error_message_for_bug(self, task, patch, script_error):
+        message = self._failing_tests_message(task, patch)
+        if not message:
+            message = script_error.message_with_output()
+        results_link = self._tool.status_server.results_url_for_status(task.failure_status_id)
+        return "%s\nFull output: %s" % (message, results_link)
+
+    def handle_unexpected_error(self, patch, message):
+        self.committer_validator.reject_patch_from_commit_queue(patch.id(), message)
+
+    # CommitQueueTaskDelegate methods
+
+    def run_command(self, command):
+        self.run_webkit_patch(command + [self.port.flag()])
+
+    def command_passed(self, message, patch):
+        self._update_status(message, patch=patch)
+
+    def command_failed(self, message, script_error, patch):
+        failure_log = self._log_from_script_error_for_upload(script_error)
+        return self._update_status(message, patch=patch, results_file=failure_log)
+
+    def expected_failures(self):
+        return self._expected_failures
+
+    def test_results(self):
+        return self._layout_test_results_reader.results()
+
+    def archive_last_test_results(self, patch):
+        return self._layout_test_results_reader.archive(patch)
+
+    def build_style(self):
+        return "release"
+
+    def refetch_patch(self, patch):
+        return self._tool.bugs.fetch_attachment(patch.id())
+
+    def report_flaky_tests(self, patch, flaky_test_results, results_archive=None):
+        reporter = FlakyTestReporter(self._tool, self.name)
+        reporter.report_flaky_tests(patch, flaky_test_results, results_archive)
+
+    def did_pass_testing_ews(self, patch):
+        # Currently, chromium-ews is the only testing EWS. Once there are more,
+        # should make sure they all pass.
+        status = self._tool.status_server.patch_status("chromium-ews", patch.id())
+        return status == self._pass_status
+
+    # StepSequenceErrorHandler methods
+
+    def handle_script_error(cls, tool, state, script_error):
+        # Hitting this error handler should be pretty rare.  It does occur,
+        # however, when a patch no longer applies to top-of-tree in the final
+        # land step.
+        log(script_error.message_with_output())
+
+    @classmethod
+    def handle_checkout_needs_update(cls, tool, state, options, error):
+        message = "Tests passed, but commit failed (checkout out of date).  Updating, then landing without building or re-running tests."
+        tool.status_server.update_status(cls.name, message, state["patch"])
+        # The only time when we find out that out checkout needs update is
+        # when we were ready to actually pull the trigger and land the patch.
+        # Rather than spinning in the master process, we retry without
+        # building or testing, which is much faster.
+        options.build = False
+        options.test = False
+        options.update = True
+        raise TryAgain()
+
+
+class AbstractReviewQueue(AbstractPatchQueue, StepSequenceErrorHandler):
+    """This is the base-class for the EWS queues and the style-queue."""
+    def __init__(self, options=None):
+        AbstractPatchQueue.__init__(self, options)
+
+    def review_patch(self, patch):
+        raise NotImplementedError("subclasses must implement")
+
+    # AbstractPatchQueue methods
+
+    def begin_work_queue(self):
+        AbstractPatchQueue.begin_work_queue(self)
+
+    def next_work_item(self):
+        return self._next_patch()
+
+    def process_work_item(self, patch):
+        try:
+            if not self.review_patch(patch):
+                return False
+            self._did_pass(patch)
+            return True
+        except ScriptError, e:
+            if e.exit_code != QueueEngine.handled_error_code:
+                self._did_fail(patch)
+            else:
+                # The subprocess handled the error, but won't have released the patch, so we do.
+                # FIXME: We need to simplify the rules by which _release_work_item is called.
+                self._release_work_item(patch)
+            raise e
+
+    def handle_unexpected_error(self, patch, message):
+        log(message)
+
+    # StepSequenceErrorHandler methods
+
+    @classmethod
+    def handle_script_error(cls, tool, state, script_error):
+        log(script_error.output)
+
+
+class StyleQueue(AbstractReviewQueue, StyleQueueTaskDelegate):
+    name = "style-queue"
+
+    def __init__(self):
+        AbstractReviewQueue.__init__(self)
+
+    def review_patch(self, patch):
+        task = StyleQueueTask(self, patch)
+        if not task.validate():
+            self._did_error(patch, "%s did not process patch." % self.name)
+            return False
+        try:
+            return task.run()
+        except UnableToApplyPatch, e:
+            self._did_error(patch, "%s unable to apply patch." % self.name)
+            return False
+        except ScriptError, e:
+            message = "Attachment %s did not pass %s:\n\n%s\n\nIf any of these errors are false positives, please file a bug against check-webkit-style." % (patch.id(), self.name, e.output)
+            self._tool.bugs.post_comment_to_bug(patch.bug_id(), message, cc=self.watchers)
+            self._did_fail(patch)
+            return False
+        return True
+
+    # StyleQueueTaskDelegate methods
+
+    def run_command(self, command):
+        self.run_webkit_patch(command)
+
+    def command_passed(self, message, patch):
+        self._update_status(message, patch=patch)
+
+    def command_failed(self, message, script_error, patch):
+        failure_log = self._log_from_script_error_for_upload(script_error)
+        return self._update_status(message, patch=patch, results_file=failure_log)
+
+    def expected_failures(self):
+        return None
+
+    def refetch_patch(self, patch):
+        return self._tool.bugs.fetch_attachment(patch.id())
diff --git a/Tools/Scripts/webkitpy/tool/commands/queues_unittest.py b/Tools/Scripts/webkitpy/tool/commands/queues_unittest.py
new file mode 100644
index 0000000..6301fea
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/commands/queues_unittest.py
@@ -0,0 +1,496 @@
+# Copyright (C) 2009 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#    * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import os
+import StringIO
+
+from webkitpy.common.checkout.scm import CheckoutNeedsUpdate
+from webkitpy.common.checkout.scm.scm_mock import MockSCM
+from webkitpy.common.net.bugzilla import Attachment
+from webkitpy.common.system.outputcapture import OutputCapture
+from webkitpy.layout_tests.models import test_results
+from webkitpy.layout_tests.models import test_failures
+from webkitpy.thirdparty.mock import Mock
+from webkitpy.tool.commands.commandtest import CommandsTest
+from webkitpy.tool.commands.queues import *
+from webkitpy.tool.commands.queuestest import QueuesTest
+from webkitpy.tool.commands.stepsequence import StepSequence
+from webkitpy.common.net.statusserver_mock import MockStatusServer
+from webkitpy.tool.mocktool import MockTool, MockOptions
+
+
+class TestCommitQueue(CommitQueue):
+    def __init__(self, tool=None):
+        CommitQueue.__init__(self)
+        if tool:
+            self.bind_to_tool(tool)
+        self._options = MockOptions(confirm=False, parent_command="commit-queue", port=None)
+
+    def begin_work_queue(self):
+        output_capture = OutputCapture()
+        output_capture.capture_output()
+        CommitQueue.begin_work_queue(self)
+        output_capture.restore_output()
+
+
+class TestQueue(AbstractPatchQueue):
+    name = "test-queue"
+
+
+class TestReviewQueue(AbstractReviewQueue):
+    name = "test-review-queue"
+
+
+class TestFeederQueue(FeederQueue):
+    _sleep_duration = 0
+
+
+class AbstractQueueTest(CommandsTest):
+    def test_log_directory(self):
+        self.assertEquals(TestQueue()._log_directory(), os.path.join("..", "test-queue-logs"))
+
+    def _assert_run_webkit_patch(self, run_args, port=None):
+        queue = TestQueue()
+        tool = MockTool()
+        tool.status_server.bot_id = "gort"
+        tool.executive = Mock()
+        queue.bind_to_tool(tool)
+        queue._options = Mock()
+        queue._options.port = port
+
+        queue.run_webkit_patch(run_args)
+        expected_run_args = ["echo", "--status-host=example.com", "--bot-id=gort"]
+        if port:
+            expected_run_args.append("--port=%s" % port)
+        expected_run_args.extend(run_args)
+        tool.executive.run_and_throw_if_fail.assert_called_with(expected_run_args, cwd='/mock-checkout')
+
+    def test_run_webkit_patch(self):
+        self._assert_run_webkit_patch([1])
+        self._assert_run_webkit_patch(["one", 2])
+        self._assert_run_webkit_patch([1], port="mockport")
+
+    def test_iteration_count(self):
+        queue = TestQueue()
+        queue._options = Mock()
+        queue._options.iterations = 3
+        self.assertTrue(queue.should_continue_work_queue())
+        self.assertTrue(queue.should_continue_work_queue())
+        self.assertTrue(queue.should_continue_work_queue())
+        self.assertFalse(queue.should_continue_work_queue())
+
+    def test_no_iteration_count(self):
+        queue = TestQueue()
+        queue._options = Mock()
+        self.assertTrue(queue.should_continue_work_queue())
+        self.assertTrue(queue.should_continue_work_queue())
+        self.assertTrue(queue.should_continue_work_queue())
+        self.assertTrue(queue.should_continue_work_queue())
+
+    def _assert_log_message(self, script_error, log_message):
+        failure_log = AbstractQueue._log_from_script_error_for_upload(script_error, output_limit=10)
+        self.assertTrue(failure_log.read(), log_message)
+
+    def test_log_from_script_error_for_upload(self):
+        self._assert_log_message(ScriptError("test"), "test")
+        unicode_tor = u"WebKit \u2661 Tor Arne Vestb\u00F8!"
+        utf8_tor = unicode_tor.encode("utf-8")
+        self._assert_log_message(ScriptError(unicode_tor), utf8_tor)
+        script_error = ScriptError(unicode_tor, output=unicode_tor)
+        expected_output = "%s\nLast %s characters of output:\n%s" % (utf8_tor, 10, utf8_tor[-10:])
+        self._assert_log_message(script_error, expected_output)
+
+
+class FeederQueueTest(QueuesTest):
+    def test_feeder_queue(self):
+        queue = TestFeederQueue()
+        tool = MockTool(log_executive=True)
+        expected_stderr = {
+            "begin_work_queue": self._default_begin_work_queue_stderr("feeder-queue"),
+            "next_work_item": "",
+            "process_work_item": """Warning, attachment 10001 on bug 50000 has invalid committer (non-committer@example.com)
+Warning, attachment 10001 on bug 50000 has invalid committer (non-committer@example.com)
+MOCK setting flag 'commit-queue' to '-' on attachment '10001' with comment 'Rejecting attachment 10001 from commit-queue.' and additional comment 'non-committer@example.com does not have committer permissions according to http://trac.webkit.org/browser/trunk/Tools/Scripts/webkitpy/common/config/committers.py.
+
+- If you do not have committer rights please read http://webkit.org/coding/contributing.html for instructions on how to use bugzilla flags.
+
+- If you have committer rights please correct the error in Tools/Scripts/webkitpy/common/config/committers.py by adding yourself to the file (no review needed).  The commit-queue restarts itself every 2 hours.  After restart the commit-queue will correctly respect your committer rights.'
+MOCK: update_work_items: commit-queue [10005, 10000]
+Feeding commit-queue items [10005, 10000]
+Feeding EWS (1 r? patch, 1 new)
+MOCK: submit_to_ews: 10002
+""",
+            "handle_unexpected_error": "Mock error message\n",
+        }
+        self.assert_queue_outputs(queue, tool=tool, expected_stderr=expected_stderr)
+
+
+class AbstractPatchQueueTest(CommandsTest):
+    def test_next_patch(self):
+        queue = AbstractPatchQueue()
+        tool = MockTool()
+        queue.bind_to_tool(tool)
+        queue._options = Mock()
+        queue._options.port = None
+        self.assertEquals(queue._next_patch(), None)
+        tool.status_server = MockStatusServer(work_items=[2, 10000, 10001])
+        expected_stdout = "MOCK: fetch_attachment: 2 is not a known attachment id\n"  # A mock-only message to prevent us from making mistakes.
+        expected_stderr = "MOCK: release_work_item: None 2\n"
+        patch = OutputCapture().assert_outputs(self, queue._next_patch, expected_stdout=expected_stdout, expected_stderr=expected_stderr)
+        # The patch.id() == 2 is ignored because it doesn't exist.
+        self.assertEquals(patch.id(), 10000)
+        self.assertEquals(queue._next_patch().id(), 10001)
+        self.assertEquals(queue._next_patch(), None)    # When the queue is empty
+
+    def test_upload_results_archive_for_patch(self):
+        queue = AbstractPatchQueue()
+        queue.name = "mock-queue"
+        tool = MockTool()
+        queue.bind_to_tool(tool)
+        queue._options = Mock()
+        queue._options.port = None
+        patch = queue._tool.bugs.fetch_attachment(10001)
+        expected_stderr = """MOCK add_attachment_to_bug: bug_id=50000, description=Archive of layout-test-results from bot filename=layout-test-results.zip mimetype=None
+-- Begin comment --
+The attached test failures were seen while running run-webkit-tests on the mock-queue.
+Port: MockPort  Platform: MockPlatform 1.0
+-- End comment --
+"""
+        OutputCapture().assert_outputs(self, queue._upload_results_archive_for_patch, [patch, Mock()], expected_stderr=expected_stderr)
+
+
+class NeedsUpdateSequence(StepSequence):
+    def _run(self, tool, options, state):
+        raise CheckoutNeedsUpdate([], 1, "", None)
+
+
+class AlwaysCommitQueueTool(object):
+    def __init__(self):
+        self.status_server = MockStatusServer()
+
+    def command_by_name(self, name):
+        return CommitQueue
+
+
+class SecondThoughtsCommitQueue(TestCommitQueue):
+    def __init__(self, tool=None):
+        self._reject_patch = False
+        TestCommitQueue.__init__(self, tool)
+
+    def run_command(self, command):
+        # We want to reject the patch after the first validation,
+        # so wait to reject it until after some other command has run.
+        self._reject_patch = True
+        return CommitQueue.run_command(self, command)
+
+    def refetch_patch(self, patch):
+        if not self._reject_patch:
+            return self._tool.bugs.fetch_attachment(patch.id())
+
+        attachment_dictionary = {
+            "id": patch.id(),
+            "bug_id": patch.bug_id(),
+            "name": "Rejected",
+            "is_obsolete": True,
+            "is_patch": False,
+            "review": "-",
+            "reviewer_email": "foo@bar.com",
+            "commit-queue": "-",
+            "committer_email": "foo@bar.com",
+            "attacher_email": "Contributer1",
+        }
+        return Attachment(attachment_dictionary, None)
+
+
+class CommitQueueTest(QueuesTest):
+    def _mock_test_result(self, testname):
+        return test_results.TestResult(testname, [test_failures.FailureTextMismatch()])
+
+    def test_commit_queue(self):
+        tool = MockTool()
+        tool.filesystem.write_text_file('/tmp/layout-test-results/full_results.json', '')  # Otherwise the commit-queue will hit a KeyError trying to read the results from the MockFileSystem.
+        tool.filesystem.write_text_file('/tmp/layout-test-results/webkit_unit_tests_output.xml', '')
+        expected_stderr = {
+            "begin_work_queue": self._default_begin_work_queue_stderr("commit-queue"),
+            "next_work_item": "",
+            "process_work_item": """MOCK: update_status: commit-queue Cleaned working directory
+MOCK: update_status: commit-queue Updated working directory
+MOCK: update_status: commit-queue Applied patch
+MOCK: update_status: commit-queue ChangeLog validated
+MOCK: update_status: commit-queue Built patch
+MOCK: update_status: commit-queue Passed tests
+MOCK: update_status: commit-queue Landed patch
+MOCK: update_status: commit-queue Pass
+MOCK: release_work_item: commit-queue 10000
+""",
+            "handle_unexpected_error": "MOCK setting flag 'commit-queue' to '-' on attachment '10000' with comment 'Rejecting attachment 10000 from commit-queue.' and additional comment 'Mock error message'\n",
+            "handle_script_error": "ScriptError error message\n\nMOCK output\n",
+        }
+        self.assert_queue_outputs(CommitQueue(), tool=tool, expected_stderr=expected_stderr)
+
+    def test_commit_queue_failure(self):
+        expected_stderr = {
+            "begin_work_queue": self._default_begin_work_queue_stderr("commit-queue"),
+            "next_work_item": "",
+            "process_work_item": """MOCK: update_status: commit-queue Cleaned working directory
+MOCK: update_status: commit-queue Updated working directory
+MOCK: update_status: commit-queue Patch does not apply
+MOCK setting flag 'commit-queue' to '-' on attachment '10000' with comment 'Rejecting attachment 10000 from commit-queue.' and additional comment 'MOCK script error
+Full output: http://dummy_url'
+MOCK: update_status: commit-queue Fail
+MOCK: release_work_item: commit-queue 10000
+""",
+            "handle_unexpected_error": "MOCK setting flag 'commit-queue' to '-' on attachment '10000' with comment 'Rejecting attachment 10000 from commit-queue.' and additional comment 'Mock error message'\n",
+            "handle_script_error": "ScriptError error message\n\nMOCK output\n",
+        }
+        queue = CommitQueue()
+
+        def mock_run_webkit_patch(command):
+            if command[0] == 'clean' or command[0] == 'update':
+                # We want cleaning to succeed so we can error out on a step
+                # that causes the commit-queue to reject the patch.
+                return
+            raise ScriptError('MOCK script error')
+
+        queue.run_webkit_patch = mock_run_webkit_patch
+        self.assert_queue_outputs(queue, expected_stderr=expected_stderr)
+
+    def test_commit_queue_failure_with_failing_tests(self):
+        expected_stderr = {
+            "begin_work_queue": self._default_begin_work_queue_stderr("commit-queue"),
+            "next_work_item": "",
+            "process_work_item": """MOCK: update_status: commit-queue Cleaned working directory
+MOCK: update_status: commit-queue Updated working directory
+MOCK: update_status: commit-queue Patch does not apply
+MOCK setting flag 'commit-queue' to '-' on attachment '10000' with comment 'Rejecting attachment 10000 from commit-queue.' and additional comment 'New failing tests:
+mock_test_name.html
+another_test_name.html
+Full output: http://dummy_url'
+MOCK: update_status: commit-queue Fail
+MOCK: release_work_item: commit-queue 10000
+""",
+            "handle_unexpected_error": "MOCK setting flag 'commit-queue' to '-' on attachment '10000' with comment 'Rejecting attachment 10000 from commit-queue.' and additional comment 'Mock error message'\n",
+            "handle_script_error": "ScriptError error message\n\nMOCK output\n",
+        }
+        queue = CommitQueue()
+
+        def mock_run_webkit_patch(command):
+            if command[0] == 'clean' or command[0] == 'update':
+                # We want cleaning to succeed so we can error out on a step
+                # that causes the commit-queue to reject the patch.
+                return
+            queue._expected_failures.unexpected_failures_observed = lambda results: ["mock_test_name.html", "another_test_name.html"]
+            raise ScriptError('MOCK script error')
+
+        queue.run_webkit_patch = mock_run_webkit_patch
+        self.assert_queue_outputs(queue, expected_stderr=expected_stderr)
+
+    def test_rollout(self):
+        tool = MockTool(log_executive=True)
+        tool.filesystem.write_text_file('/tmp/layout-test-results/full_results.json', '')  # Otherwise the commit-queue will hit a KeyError trying to read the results from the MockFileSystem.
+        tool.filesystem.write_text_file('/tmp/layout-test-results/webkit_unit_tests_output.xml', '')
+        tool.buildbot.light_tree_on_fire()
+        expected_stderr = {
+            "begin_work_queue": self._default_begin_work_queue_stderr("commit-queue"),
+            "next_work_item": "",
+            "process_work_item": """MOCK run_and_throw_if_fail: ['echo', '--status-host=example.com', 'clean', '--port=%(port_name)s'], cwd=/mock-checkout
+MOCK: update_status: commit-queue Cleaned working directory
+MOCK run_and_throw_if_fail: ['echo', '--status-host=example.com', 'update', '--port=%(port_name)s'], cwd=/mock-checkout
+MOCK: update_status: commit-queue Updated working directory
+MOCK run_and_throw_if_fail: ['echo', '--status-host=example.com', 'apply-attachment', '--no-update', '--non-interactive', 10000, '--port=%(port_name)s'], cwd=/mock-checkout
+MOCK: update_status: commit-queue Applied patch
+MOCK run_and_throw_if_fail: ['echo', '--status-host=example.com', 'validate-changelog', '--non-interactive', 10000, '--port=%(port_name)s'], cwd=/mock-checkout
+MOCK: update_status: commit-queue ChangeLog validated
+MOCK run_and_throw_if_fail: ['echo', '--status-host=example.com', 'build', '--no-clean', '--no-update', '--build-style=release', '--port=%(port_name)s'], cwd=/mock-checkout
+MOCK: update_status: commit-queue Built patch
+MOCK run_and_throw_if_fail: ['echo', '--status-host=example.com', 'build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive', '--port=%(port_name)s'], cwd=/mock-checkout
+MOCK: update_status: commit-queue Passed tests
+MOCK run_and_throw_if_fail: ['echo', '--status-host=example.com', 'land-attachment', '--force-clean', '--non-interactive', '--parent-command=commit-queue', 10000, '--port=%(port_name)s'], cwd=/mock-checkout
+MOCK: update_status: commit-queue Landed patch
+MOCK: update_status: commit-queue Pass
+MOCK: release_work_item: commit-queue 10000
+""" % {"port_name": CommitQueue.port_name},
+            "handle_unexpected_error": "MOCK setting flag 'commit-queue' to '-' on attachment '10000' with comment 'Rejecting attachment 10000 from commit-queue.' and additional comment 'Mock error message'\n",
+            "handle_script_error": "ScriptError error message\n\nMOCK output\n",
+        }
+        self.assert_queue_outputs(CommitQueue(), tool=tool, expected_stderr=expected_stderr)
+
+    def test_rollout_lands(self):
+        tool = MockTool(log_executive=True)
+        tool.buildbot.light_tree_on_fire()
+        rollout_patch = tool.bugs.fetch_attachment(10005)  # _patch6, a rollout patch.
+        assert(rollout_patch.is_rollout())
+        expected_stderr = {
+            "begin_work_queue": self._default_begin_work_queue_stderr("commit-queue"),
+            "next_work_item": "",
+            "process_work_item": """MOCK run_and_throw_if_fail: ['echo', '--status-host=example.com', 'clean', '--port=%(port_name)s'], cwd=/mock-checkout
+MOCK: update_status: commit-queue Cleaned working directory
+MOCK run_and_throw_if_fail: ['echo', '--status-host=example.com', 'update', '--port=%(port_name)s'], cwd=/mock-checkout
+MOCK: update_status: commit-queue Updated working directory
+MOCK run_and_throw_if_fail: ['echo', '--status-host=example.com', 'apply-attachment', '--no-update', '--non-interactive', 10005, '--port=%(port_name)s'], cwd=/mock-checkout
+MOCK: update_status: commit-queue Applied patch
+MOCK run_and_throw_if_fail: ['echo', '--status-host=example.com', 'validate-changelog', '--non-interactive', 10005, '--port=%(port_name)s'], cwd=/mock-checkout
+MOCK: update_status: commit-queue ChangeLog validated
+MOCK run_and_throw_if_fail: ['echo', '--status-host=example.com', 'land-attachment', '--force-clean', '--non-interactive', '--parent-command=commit-queue', 10005, '--port=%(port_name)s'], cwd=/mock-checkout
+MOCK: update_status: commit-queue Landed patch
+MOCK: update_status: commit-queue Pass
+MOCK: release_work_item: commit-queue 10005
+""" % {"port_name": CommitQueue.port_name},
+            "handle_unexpected_error": "MOCK setting flag 'commit-queue' to '-' on attachment '10005' with comment 'Rejecting attachment 10005 from commit-queue.' and additional comment 'Mock error message'\n",
+            "handle_script_error": "ScriptError error message\n\nMOCK output\n",
+        }
+        self.assert_queue_outputs(CommitQueue(), tool=tool, work_item=rollout_patch, expected_stderr=expected_stderr)
+
+    def test_auto_retry(self):
+        queue = CommitQueue()
+        options = Mock()
+        options.parent_command = "commit-queue"
+        tool = AlwaysCommitQueueTool()
+        sequence = NeedsUpdateSequence(None)
+
+        expected_stderr = "Commit failed because the checkout is out of date.  Please update and try again.\nMOCK: update_status: commit-queue Tests passed, but commit failed (checkout out of date).  Updating, then landing without building or re-running tests.\n"
+        state = {'patch': None}
+        OutputCapture().assert_outputs(self, sequence.run_and_handle_errors, [tool, options, state], expected_exception=TryAgain, expected_stderr=expected_stderr)
+
+        self.assertEquals(options.update, True)
+        self.assertEquals(options.build, False)
+        self.assertEquals(options.test, False)
+
+    def test_manual_reject_during_processing(self):
+        queue = SecondThoughtsCommitQueue(MockTool())
+        queue.begin_work_queue()
+        queue._tool.filesystem.write_text_file('/tmp/layout-test-results/full_results.json', '')  # Otherwise the commit-queue will hit a KeyError trying to read the results from the MockFileSystem.
+        queue._tool.filesystem.write_text_file('/tmp/layout-test-results/webkit_unit_tests_output.xml', '')
+        queue._options = Mock()
+        queue._options.port = None
+        expected_stderr = """MOCK: update_status: commit-queue Cleaned working directory
+MOCK: update_status: commit-queue Updated working directory
+MOCK: update_status: commit-queue Applied patch
+MOCK: update_status: commit-queue ChangeLog validated
+MOCK: update_status: commit-queue Built patch
+MOCK: update_status: commit-queue Passed tests
+MOCK: update_status: commit-queue Retry
+MOCK: release_work_item: commit-queue 10000
+"""
+        OutputCapture().assert_outputs(self, queue.process_work_item, [QueuesTest.mock_work_item], expected_stderr=expected_stderr)
+
+    def test_report_flaky_tests(self):
+        queue = TestCommitQueue(MockTool())
+        expected_stderr = """MOCK bug comment: bug_id=50002, cc=None
+--- Begin comment ---
+The commit-queue just saw foo/bar.html flake (text diff) while processing attachment 10000 on bug 50000.
+Port: MockPort  Platform: MockPlatform 1.0
+--- End comment ---
+
+MOCK add_attachment_to_bug: bug_id=50002, description=Failure diff from bot filename=failure.diff mimetype=None
+MOCK bug comment: bug_id=50002, cc=None
+--- Begin comment ---
+The commit-queue just saw bar/baz.html flake (text diff) while processing attachment 10000 on bug 50000.
+Port: MockPort  Platform: MockPlatform 1.0
+--- End comment ---
+
+MOCK add_attachment_to_bug: bug_id=50002, description=Archive of layout-test-results from bot filename=layout-test-results.zip mimetype=None
+MOCK bug comment: bug_id=50000, cc=None
+--- Begin comment ---
+The commit-queue encountered the following flaky tests while processing attachment 10000:
+
+foo/bar.html bug 50002 (author: abarth@webkit.org)
+bar/baz.html bug 50002 (author: abarth@webkit.org)
+The commit-queue is continuing to process your patch.
+--- End comment ---
+
+"""
+        test_names = ["foo/bar.html", "bar/baz.html"]
+        test_results = [self._mock_test_result(name) for name in test_names]
+
+        class MockZipFile(object):
+            def __init__(self):
+                self.fp = StringIO()
+
+            def read(self, path):
+                return ""
+
+            def namelist(self):
+                # This is intentionally missing one diffs.txt to exercise the "upload the whole zip" codepath.
+                return ['foo/bar-diffs.txt']
+
+        OutputCapture().assert_outputs(self, queue.report_flaky_tests, [QueuesTest.mock_work_item, test_results, MockZipFile()], expected_stderr=expected_stderr)
+
+    def test_did_pass_testing_ews(self):
+        tool = MockTool()
+        patch = tool.bugs.fetch_attachment(10000)
+        queue = TestCommitQueue(tool)
+        self.assertFalse(queue.did_pass_testing_ews(patch))
+
+
+class StyleQueueTest(QueuesTest):
+    def test_style_queue_with_style_exception(self):
+        expected_stderr = {
+            "begin_work_queue": self._default_begin_work_queue_stderr("style-queue"),
+            "next_work_item": "",
+            "process_work_item": """MOCK run_and_throw_if_fail: ['echo', '--status-host=example.com', 'clean'], cwd=/mock-checkout
+MOCK: update_status: style-queue Cleaned working directory
+MOCK run_and_throw_if_fail: ['echo', '--status-host=example.com', 'update'], cwd=/mock-checkout
+MOCK: update_status: style-queue Updated working directory
+MOCK run_and_throw_if_fail: ['echo', '--status-host=example.com', 'apply-attachment', '--no-update', '--non-interactive', 10000], cwd=/mock-checkout
+MOCK: update_status: style-queue Applied patch
+MOCK run_and_throw_if_fail: ['echo', '--status-host=example.com', 'apply-watchlist-local', 50000], cwd=/mock-checkout
+MOCK: update_status: style-queue Watchlist applied
+MOCK run_and_throw_if_fail: ['echo', '--status-host=example.com', 'check-style-local', '--non-interactive', '--quiet'], cwd=/mock-checkout
+MOCK: update_status: style-queue Style checked
+MOCK: update_status: style-queue Pass
+MOCK: release_work_item: style-queue 10000
+""",
+            "handle_unexpected_error": "Mock error message\n",
+            "handle_script_error": "MOCK output\n",
+        }
+        tool = MockTool(log_executive=True, executive_throws_when_run=set(['check-style']))
+        self.assert_queue_outputs(StyleQueue(), expected_stderr=expected_stderr, tool=tool)
+
+    def test_style_queue_with_watch_list_exception(self):
+        expected_stderr = {
+            "begin_work_queue": self._default_begin_work_queue_stderr("style-queue"),
+            "next_work_item": "",
+            "process_work_item": """MOCK run_and_throw_if_fail: ['echo', '--status-host=example.com', 'clean'], cwd=/mock-checkout
+MOCK: update_status: style-queue Cleaned working directory
+MOCK run_and_throw_if_fail: ['echo', '--status-host=example.com', 'update'], cwd=/mock-checkout
+MOCK: update_status: style-queue Updated working directory
+MOCK run_and_throw_if_fail: ['echo', '--status-host=example.com', 'apply-attachment', '--no-update', '--non-interactive', 10000], cwd=/mock-checkout
+MOCK: update_status: style-queue Applied patch
+MOCK run_and_throw_if_fail: ['echo', '--status-host=example.com', 'apply-watchlist-local', 50000], cwd=/mock-checkout
+MOCK: update_status: style-queue Unabled to apply watchlist
+MOCK run_and_throw_if_fail: ['echo', '--status-host=example.com', 'check-style-local', '--non-interactive', '--quiet'], cwd=/mock-checkout
+MOCK: update_status: style-queue Style checked
+MOCK: update_status: style-queue Pass
+MOCK: release_work_item: style-queue 10000
+""",
+            "handle_unexpected_error": "Mock error message\n",
+            "handle_script_error": "MOCK output\n",
+        }
+        tool = MockTool(log_executive=True, executive_throws_when_run=set(['apply-watchlist-local']))
+        self.assert_queue_outputs(StyleQueue(), expected_stderr=expected_stderr, tool=tool)
diff --git a/Tools/Scripts/webkitpy/tool/commands/queuestest.py b/Tools/Scripts/webkitpy/tool/commands/queuestest.py
new file mode 100644
index 0000000..b99302c
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/commands/queuestest.py
@@ -0,0 +1,98 @@
+# Copyright (C) 2009 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#    * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from webkitpy.common.net.bugzilla import Attachment
+from webkitpy.common.system.outputcapture import OutputCapture
+from webkitpy.common.system.executive import ScriptError
+from webkitpy.thirdparty.mock import Mock
+from webkitpy.tool.commands.stepsequence import StepSequenceErrorHandler
+from webkitpy.tool.mocktool import MockTool
+
+
+class MockQueueEngine(object):
+    def __init__(self, name, queue, wakeup_event):
+        pass
+
+    def run(self):
+        pass
+
+
+class QueuesTest(unittest.TestCase):
+    # This is _patch1 in mocktool.py
+    mock_work_item = MockTool().bugs.fetch_attachment(10000)
+
+    def assert_outputs(self, func, func_name, args, expected_stdout, expected_stderr, expected_exceptions):
+        exception = None
+        if expected_exceptions and func_name in expected_exceptions:
+            exception = expected_exceptions[func_name]
+
+        OutputCapture().assert_outputs(self,
+                func,
+                args=args,
+                expected_stdout=expected_stdout.get(func_name, ""),
+                expected_stderr=expected_stderr.get(func_name, ""),
+                expected_exception=exception)
+
+    def _default_begin_work_queue_stderr(self, name):
+        checkout_dir = '/mock-checkout'
+        string_replacements = {"name": name, 'checkout_dir': checkout_dir}
+        return "CAUTION: %(name)s will discard all local changes in \"%(checkout_dir)s\"\nRunning WebKit %(name)s.\nMOCK: update_status: %(name)s Starting Queue\n" % string_replacements
+
+    def assert_queue_outputs(self, queue, args=None, work_item=None, expected_stdout=None, expected_stderr=None, expected_exceptions=None, options=None, tool=None):
+        if not tool:
+            tool = MockTool()
+            # This is a hack to make it easy for callers to not have to setup a custom MockFileSystem just to test the commit-queue
+            # the cq tries to read the layout test results, and will hit a KeyError in MockFileSystem if we don't do this.
+            tool.filesystem.write_text_file('/mock-results/results.html', "")
+        if not expected_stdout:
+            expected_stdout = {}
+        if not expected_stderr:
+            expected_stderr = {}
+        if not args:
+            args = []
+        if not options:
+            options = Mock()
+            options.port = None
+        if not work_item:
+            work_item = self.mock_work_item
+        tool.user.prompt = lambda message: "yes"
+
+        queue.execute(options, args, tool, engine=MockQueueEngine)
+
+        self.assert_outputs(queue.queue_log_path, "queue_log_path", [], expected_stdout, expected_stderr, expected_exceptions)
+        self.assert_outputs(queue.work_item_log_path, "work_item_log_path", [work_item], expected_stdout, expected_stderr, expected_exceptions)
+        self.assert_outputs(queue.begin_work_queue, "begin_work_queue", [], expected_stdout, expected_stderr, expected_exceptions)
+        self.assert_outputs(queue.should_continue_work_queue, "should_continue_work_queue", [], expected_stdout, expected_stderr, expected_exceptions)
+        self.assert_outputs(queue.next_work_item, "next_work_item", [], expected_stdout, expected_stderr, expected_exceptions)
+        self.assert_outputs(queue.process_work_item, "process_work_item", [work_item], expected_stdout, expected_stderr, expected_exceptions)
+        self.assert_outputs(queue.handle_unexpected_error, "handle_unexpected_error", [work_item, "Mock error message"], expected_stdout, expected_stderr, expected_exceptions)
+        # Should we have a different function for testing StepSequenceErrorHandlers?
+        if isinstance(queue, StepSequenceErrorHandler):
+            self.assert_outputs(queue.handle_script_error, "handle_script_error", [tool, {"patch": self.mock_work_item}, ScriptError(message="ScriptError error message", script_args="MockErrorCommand", output="MOCK output")], expected_stdout, expected_stderr, expected_exceptions)
diff --git a/Tools/Scripts/webkitpy/tool/commands/rebaseline.py b/Tools/Scripts/webkitpy/tool/commands/rebaseline.py
new file mode 100644
index 0000000..d9209b1
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/commands/rebaseline.py
@@ -0,0 +1,500 @@
+# Copyright (c) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import json
+import logging
+import optparse
+import sys
+
+from webkitpy.common.checkout.baselineoptimizer import BaselineOptimizer
+from webkitpy.common.system.executive import ScriptError
+from webkitpy.layout_tests.controllers.test_result_writer import TestResultWriter
+from webkitpy.layout_tests.models import test_failures
+from webkitpy.layout_tests.models.test_expectations import TestExpectations, BASELINE_SUFFIX_LIST
+from webkitpy.layout_tests.port import builders
+from webkitpy.layout_tests.port import factory
+from webkitpy.tool.multicommandtool import AbstractDeclarativeCommand
+
+
+_log = logging.getLogger(__name__)
+
+
+# FIXME: Should TestResultWriter know how to compute this string?
+def _baseline_name(fs, test_name, suffix):
+    return fs.splitext(test_name)[0] + TestResultWriter.FILENAME_SUFFIX_EXPECTED + "." + suffix
+
+
+class AbstractRebaseliningCommand(AbstractDeclarativeCommand):
+    # not overriding execute() - pylint: disable-msg=W0223
+
+    move_overwritten_baselines_option = optparse.make_option("--move-overwritten-baselines", action="store_true", default=False,
+        help="Move overwritten baselines elsewhere in the baseline path. This is for bringing up new ports.")
+
+    no_optimize_option = optparse.make_option('--no-optimize', dest='optimize', action='store_false', default=True,
+        help=('Do not optimize/de-dup the expectations after rebaselining (default is to de-dup automatically). '
+              'You can use "webkit-patch optimize-baselines" to optimize separately.'))
+
+    platform_options = factory.platform_options(use_globs=True)
+
+    results_directory_option = optparse.make_option("--results-directory", help="Local results directory to use")
+
+    suffixes_option = optparse.make_option("--suffixes", default=','.join(BASELINE_SUFFIX_LIST), action="store",
+        help="Comma-separated-list of file types to rebaseline")
+
+    def __init__(self, options=None):
+        super(AbstractRebaseliningCommand, self).__init__(options=options)
+        self._baseline_suffix_list = BASELINE_SUFFIX_LIST
+
+
+class RebaselineTest(AbstractRebaseliningCommand):
+    name = "rebaseline-test-internal"
+    help_text = "Rebaseline a single test from a buildbot. Only intended for use by other webkit-patch commands."
+
+    def __init__(self):
+        super(RebaselineTest, self).__init__(options=[
+            self.no_optimize_option,
+            self.results_directory_option,
+            self.suffixes_option,
+            optparse.make_option("--builder", help="Builder to pull new baselines from"),
+            optparse.make_option("--move-overwritten-baselines-to", action="append", default=[],
+                help="Platform to move existing baselines to before rebaselining. This is for bringing up new ports."),
+            optparse.make_option("--test", help="Test to rebaseline"),
+            ])
+        self._scm_changes = {'add': []}
+
+    def _results_url(self, builder_name):
+        return self._tool.buildbot_for_builder_name(builder_name).builder_with_name(builder_name).latest_layout_test_results_url()
+
+    def _baseline_directory(self, builder_name):
+        port = self._tool.port_factory.get_from_builder_name(builder_name)
+        override_dir = builders.rebaseline_override_dir(builder_name)
+        if override_dir:
+            return self._tool.filesystem.join(port.layout_tests_dir(), 'platform', override_dir)
+        return port.baseline_version_dir()
+
+    def _copy_existing_baseline(self, move_overwritten_baselines_to, test_name, suffix):
+        old_baselines = []
+        new_baselines = []
+
+        # Need to gather all the baseline paths before modifying the filesystem since
+        # the modifications can affect the results of port.expected_filename.
+        for platform in move_overwritten_baselines_to:
+            port = self._tool.port_factory.get(platform)
+            old_baseline = port.expected_filename(test_name, "." + suffix)
+            if not self._tool.filesystem.exists(old_baseline):
+                _log.debug("No existing baseline for %s." % test_name)
+                continue
+
+            new_baseline = self._tool.filesystem.join(port.baseline_path(), self._file_name_for_expected_result(test_name, suffix))
+            if self._tool.filesystem.exists(new_baseline):
+                _log.debug("Existing baseline at %s, not copying over it." % new_baseline)
+                continue
+
+            old_baselines.append(old_baseline)
+            new_baselines.append(new_baseline)
+
+        for i in range(len(old_baselines)):
+            old_baseline = old_baselines[i]
+            new_baseline = new_baselines[i]
+
+            _log.debug("Copying baseline from %s to %s." % (old_baseline, new_baseline))
+            self._tool.filesystem.maybe_make_directory(self._tool.filesystem.dirname(new_baseline))
+            self._tool.filesystem.copyfile(old_baseline, new_baseline)
+            if not self._tool.scm().exists(new_baseline):
+                self._add_to_scm(new_baseline)
+
+    def _save_baseline(self, data, target_baseline):
+        if not data:
+            return
+        filesystem = self._tool.filesystem
+        filesystem.maybe_make_directory(filesystem.dirname(target_baseline))
+        filesystem.write_binary_file(target_baseline, data)
+        if not self._tool.scm().exists(target_baseline):
+            self._add_to_scm(target_baseline)
+
+    def _add_to_scm(self, path):
+        self._scm_changes['add'].append(path)
+
+    def _update_expectations_file(self, builder_name, test_name):
+        port = self._tool.port_factory.get_from_builder_name(builder_name)
+
+        # Since rebaseline-test-internal can be called multiple times in parallel,
+        # we need to ensure that we're not trying to update the expectations file
+        # concurrently as well.
+        # FIXME: We should rework the code to not need this; maybe just download
+        # the files in parallel and rebaseline local files serially?
+        try:
+            path = port.path_to_test_expectations_file()
+            lock = self._tool.make_file_lock(path + '.lock')
+            lock.acquire_lock()
+            expectations = TestExpectations(port, include_overrides=False)
+            for test_configuration in port.all_test_configurations():
+                if test_configuration.version == port.test_configuration().version:
+                    expectationsString = expectations.remove_configuration_from_test(test_name, test_configuration)
+
+            self._tool.filesystem.write_text_file(path, expectationsString)
+        finally:
+            lock.release_lock()
+
+    def _test_root(self, test_name):
+        return self._tool.filesystem.splitext(test_name)[0]
+
+    def _file_name_for_actual_result(self, test_name, suffix):
+        return "%s-actual.%s" % (self._test_root(test_name), suffix)
+
+    def _file_name_for_expected_result(self, test_name, suffix):
+        return "%s-expected.%s" % (self._test_root(test_name), suffix)
+
+    def _rebaseline_test(self, builder_name, test_name, move_overwritten_baselines_to, suffix, results_url):
+        baseline_directory = self._baseline_directory(builder_name)
+
+        source_baseline = "%s/%s" % (results_url, self._file_name_for_actual_result(test_name, suffix))
+        target_baseline = self._tool.filesystem.join(baseline_directory, self._file_name_for_expected_result(test_name, suffix))
+
+        if move_overwritten_baselines_to:
+            self._copy_existing_baseline(move_overwritten_baselines_to, test_name, suffix)
+
+        _log.debug("Retrieving %s." % source_baseline)
+        self._save_baseline(self._tool.web.get_binary(source_baseline, convert_404_to_None=True), target_baseline)
+
+    def _rebaseline_test_and_update_expectations(self, options):
+        if options.results_directory:
+            results_url = 'file://' + options.results_directory
+        else:
+            results_url = self._results_url(options.builder)
+        self._baseline_suffix_list = options.suffixes.split(',')
+        for suffix in self._baseline_suffix_list:
+            self._rebaseline_test(options.builder, options.test, options.move_overwritten_baselines_to, suffix, results_url)
+        self._update_expectations_file(options.builder, options.test)
+
+    def execute(self, options, args, tool):
+        self._rebaseline_test_and_update_expectations(options)
+        print json.dumps(self._scm_changes)
+
+
+class OptimizeBaselines(AbstractRebaseliningCommand):
+    name = "optimize-baselines"
+    help_text = "Reshuffles the baselines for the given tests to use as litte space on disk as possible."
+    argument_names = "TEST_NAMES"
+
+    def __init__(self):
+        super(OptimizeBaselines, self).__init__(options=[self.suffixes_option] + self.platform_options)
+
+    def _optimize_baseline(self, optimizer, test_name):
+        for suffix in self._baseline_suffix_list:
+            baseline_name = _baseline_name(self._tool.filesystem, test_name, suffix)
+            if not optimizer.optimize(baseline_name):
+                print "Heuristics failed to optimize %s" % baseline_name
+
+    def execute(self, options, args, tool):
+        self._baseline_suffix_list = options.suffixes.split(',')
+        port_names = tool.port_factory.all_port_names(options.platform)
+        if not port_names:
+            print "No port names match '%s'" % options.platform
+            return
+
+        optimizer = BaselineOptimizer(tool, port_names)
+        port = tool.port_factory.get(port_names[0])
+        for test_name in port.tests(args):
+            _log.info("Optimizing %s" % test_name)
+            self._optimize_baseline(optimizer, test_name)
+
+
+class AnalyzeBaselines(AbstractRebaseliningCommand):
+    name = "analyze-baselines"
+    help_text = "Analyzes the baselines for the given tests and prints results that are identical."
+    argument_names = "TEST_NAMES"
+
+    def __init__(self):
+        super(AnalyzeBaselines, self).__init__(options=[
+            self.suffixes_option,
+            optparse.make_option('--missing', action='store_true', default=False, help='show missing baselines as well'),
+            ] + self.platform_options)
+        self._optimizer_class = BaselineOptimizer  # overridable for testing
+        self._baseline_optimizer = None
+        self._port = None
+
+    def _write(self, msg):
+        print msg
+
+    def _analyze_baseline(self, options, test_name):
+        for suffix in self._baseline_suffix_list:
+            baseline_name = _baseline_name(self._tool.filesystem, test_name, suffix)
+            results_by_directory = self._baseline_optimizer.read_results_by_directory(baseline_name)
+            if results_by_directory:
+                self._write("%s:" % baseline_name)
+                self._baseline_optimizer.write_by_directory(results_by_directory, self._write, "  ")
+            elif options.missing:
+                self._write("%s: (no baselines found)" % baseline_name)
+
+    def execute(self, options, args, tool):
+        self._baseline_suffix_list = options.suffixes.split(',')
+        port_names = tool.port_factory.all_port_names(options.platform)
+        if not port_names:
+            print "No port names match '%s'" % options.platform
+            return
+
+        self._baseline_optimizer = self._optimizer_class(tool, port_names)
+        self._port = tool.port_factory.get(port_names[0])
+        for test_name in self._port.tests(args):
+            self._analyze_baseline(options, test_name)
+
+
+class AbstractParallelRebaselineCommand(AbstractRebaseliningCommand):
+    # not overriding execute() - pylint: disable-msg=W0223
+
+    def _run_webkit_patch(self, args, verbose):
+        try:
+            verbose_args = ['--verbose'] if verbose else []
+            stderr = self._tool.executive.run_command([self._tool.path()] + verbose_args + args, cwd=self._tool.scm().checkout_root, return_stderr=True)
+            for line in stderr.splitlines():
+                print >> sys.stderr, line
+        except ScriptError, e:
+            _log.error(e)
+
+    def _builders_to_fetch_from(self, builders_to_check):
+        # This routine returns the subset of builders that will cover all of the baseline search paths
+        # used in the input list. In particular, if the input list contains both Release and Debug
+        # versions of a configuration, we *only* return the Release version (since we don't save
+        # debug versions of baselines).
+        release_builders = set()
+        debug_builders = set()
+        builders_to_fallback_paths = {}
+        for builder in builders_to_check:
+            port = self._tool.port_factory.get_from_builder_name(builder)
+            if port.test_configuration().build_type == 'Release':
+                release_builders.add(builder)
+            else:
+                debug_builders.add(builder)
+        for builder in list(release_builders) + list(debug_builders):
+            port = self._tool.port_factory.get_from_builder_name(builder)
+            fallback_path = port.baseline_search_path()
+            if fallback_path not in builders_to_fallback_paths.values():
+                builders_to_fallback_paths[builder] = fallback_path
+        return builders_to_fallback_paths.keys()
+
+    def _rebaseline_commands(self, test_list, options):
+
+        path_to_webkit_patch = self._tool.path()
+        cwd = self._tool.scm().checkout_root
+        commands = []
+        for test in test_list:
+            for builder in self._builders_to_fetch_from(test_list[test]):
+                suffixes = ','.join(test_list[test][builder])
+                cmd_line = [path_to_webkit_patch, 'rebaseline-test-internal', '--suffixes', suffixes, '--builder', builder, '--test', test]
+                if options.move_overwritten_baselines:
+                    move_overwritten_baselines_to = builders.move_overwritten_baselines_to(builder)
+                    for platform in move_overwritten_baselines_to:
+                        cmd_line.extend(['--move-overwritten-baselines-to', platform])
+                if options.results_directory:
+                    cmd_line.extend(['--results-directory', options.results_directory])
+                if options.verbose:
+                    cmd_line.append('--verbose')
+                commands.append(tuple([cmd_line, cwd]))
+        return commands
+
+    def _files_to_add(self, command_results):
+        files_to_add = set()
+        for output in [result[1].split('\n') for result in command_results]:
+            file_added = False
+            for line in output:
+                try:
+                    if line:
+                        files_to_add.update(json.loads(line)['add'])
+                        file_added = True
+                except ValueError:
+                    _log.debug('"%s" is not a JSON object, ignoring' % line)
+
+            if not file_added:
+                _log.debug('Could not add file based off output "%s"' % output)
+
+
+        return list(files_to_add)
+
+    def _optimize_baselines(self, test_list, verbose=False):
+        # We don't run this in parallel because modifying the SCM in parallel is unreliable.
+        for test in test_list:
+            all_suffixes = set()
+            for builder in self._builders_to_fetch_from(test_list[test]):
+                all_suffixes.update(test_list[test][builder])
+            # FIXME: We should propagate the platform options as well.
+            self._run_webkit_patch(['optimize-baselines', '--suffixes', ','.join(all_suffixes), test], verbose)
+
+    def _rebaseline(self, options, test_list):
+        for test, builders_to_check in sorted(test_list.items()):
+            _log.info("Rebaselining %s" % test)
+            for builder, suffixes in sorted(builders_to_check.items()):
+                _log.debug("  %s: %s" % (builder, ",".join(suffixes)))
+
+        commands = self._rebaseline_commands(test_list, options)
+        command_results = self._tool.executive.run_in_parallel(commands)
+
+        log_output = '\n'.join(result[2] for result in command_results).replace('\n\n', '\n')
+        for line in log_output.split('\n'):
+            if line:
+                print >> sys.stderr, line  # FIXME: Figure out how to log properly.
+
+        files_to_add = self._files_to_add(command_results)
+        if files_to_add:
+            self._tool.scm().add_list(list(files_to_add))
+
+        if options.optimize:
+            self._optimize_baselines(test_list, options.verbose)
+
+
+class RebaselineJson(AbstractParallelRebaselineCommand):
+    name = "rebaseline-json"
+    help_text = "Rebaseline based off JSON passed to stdin. Intended to only be called from other scripts."
+
+    def __init__(self,):
+        super(RebaselineJson, self).__init__(options=[
+            self.move_overwritten_baselines_option,
+            self.no_optimize_option,
+            self.results_directory_option,
+            ])
+
+    def execute(self, options, args, tool):
+        self._rebaseline(options, json.loads(sys.stdin.read()))
+
+
+class RebaselineExpectations(AbstractParallelRebaselineCommand):
+    name = "rebaseline-expectations"
+    help_text = "Rebaselines the tests indicated in TestExpectations."
+
+    def __init__(self):
+        super(RebaselineExpectations, self).__init__(options=[
+            self.move_overwritten_baselines_option,
+            self.no_optimize_option,
+            ] + self.platform_options)
+        self._test_list = None
+
+    def _update_expectations_files(self, port_name):
+        port = self._tool.port_factory.get(port_name)
+
+        expectations = TestExpectations(port)
+        for path in port.expectations_dict():
+            if self._tool.filesystem.exists(path):
+                self._tool.filesystem.write_text_file(path, expectations.remove_rebaselined_tests(expectations.get_rebaselining_failures(), path))
+
+    def _tests_to_rebaseline(self, port):
+        tests_to_rebaseline = {}
+        expectations = TestExpectations(port, include_overrides=True)
+        for test in expectations.get_rebaselining_failures():
+            tests_to_rebaseline[test] = TestExpectations.suffixes_for_expectations(expectations.get_expectations(test))
+        return tests_to_rebaseline
+
+    def _add_tests_to_rebaseline_for_port(self, port_name):
+        builder_name = builders.builder_name_for_port_name(port_name)
+        if not builder_name:
+            return
+        tests = self._tests_to_rebaseline(self._tool.port_factory.get(port_name)).items()
+
+        if tests:
+            _log.info("Retrieving results for %s from %s." % (port_name, builder_name))
+
+        for test_name, suffixes in tests:
+            _log.info("    %s (%s)" % (test_name, ','.join(suffixes)))
+            if test_name not in self._test_list:
+                self._test_list[test_name] = {}
+            self._test_list[test_name][builder_name] = suffixes
+
+    def execute(self, options, args, tool):
+        options.results_directory = None
+        self._test_list = {}
+        port_names = tool.port_factory.all_port_names(options.platform)
+        for port_name in port_names:
+            self._add_tests_to_rebaseline_for_port(port_name)
+        if not self._test_list:
+            _log.warning("Did not find any tests marked Rebaseline.")
+            return
+
+        self._rebaseline(options, self._test_list)
+
+        for port_name in port_names:
+            self._update_expectations_files(port_name)
+
+
+class Rebaseline(AbstractParallelRebaselineCommand):
+    name = "rebaseline"
+    help_text = "Rebaseline tests with results from the build bots. Shows the list of failing tests on the builders if no test names are provided."
+    argument_names = "[TEST_NAMES]"
+
+    def __init__(self):
+        super(Rebaseline, self).__init__(options=[
+            self.move_overwritten_baselines_option,
+            self.no_optimize_option,
+            # FIXME: should we support the platform options in addition to (or instead of) --builders?
+            self.suffixes_option,
+            optparse.make_option("--builders", default=None, action="append", help="Comma-separated-list of builders to pull new baselines from (can also be provided multiple times)"),
+            ])
+
+    def _builders_to_pull_from(self):
+        chromium_buildbot_builder_names = []
+        webkit_buildbot_builder_names = []
+        for name in builders.all_builder_names():
+            if self._tool.port_factory.get_from_builder_name(name).is_chromium():
+                chromium_buildbot_builder_names.append(name)
+            else:
+                webkit_buildbot_builder_names.append(name)
+
+        titles = ["build.webkit.org bots", "build.chromium.org bots"]
+        lists = [webkit_buildbot_builder_names, chromium_buildbot_builder_names]
+
+        chosen_names = self._tool.user.prompt_with_multiple_lists("Which builder to pull results from:", titles, lists, can_choose_multiple=True)
+        return [self._builder_with_name(name) for name in chosen_names]
+
+    def _builder_with_name(self, name):
+        return self._tool.buildbot_for_builder_name(name).builder_with_name(name)
+
+    def _tests_to_update(self, builder):
+        failing_tests = builder.latest_layout_test_results().tests_matching_failure_types([test_failures.FailureTextMismatch])
+        return self._tool.user.prompt_with_list("Which test(s) to rebaseline for %s:" % builder.name(), failing_tests, can_choose_multiple=True)
+
+    def execute(self, options, args, tool):
+        options.results_directory = None
+        if options.builders:
+            builders_to_check = []
+            for builder_names in options.builders:
+                builders_to_check += [self._builder_with_name(name) for name in builder_names.split(",")]
+        else:
+            builders_to_check = self._builders_to_pull_from()
+
+        test_list = {}
+        suffixes_to_update = options.suffixes.split(",")
+
+        for builder in builders_to_check:
+            tests = args or self._tests_to_update(builder)
+            for test in tests:
+                if test not in test_list:
+                    test_list[test] = {}
+                test_list[test][builder.name()] = suffixes_to_update
+
+        if options.verbose:
+            _log.debug("rebaseline-json: " + str(test_list))
+
+        self._rebaseline(options, test_list)
diff --git a/Tools/Scripts/webkitpy/tool/commands/rebaseline_unittest.py b/Tools/Scripts/webkitpy/tool/commands/rebaseline_unittest.py
new file mode 100644
index 0000000..d7dafb9
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/commands/rebaseline_unittest.py
@@ -0,0 +1,401 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#    * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from webkitpy.common.system.outputcapture import OutputCapture
+from webkitpy.common.checkout.baselineoptimizer import BaselineOptimizer
+from webkitpy.common.net.buildbot.buildbot_mock import MockBuilder
+from webkitpy.common.system.executive_mock import MockExecutive2
+from webkitpy.thirdparty.mock import Mock
+from webkitpy.tool.commands.rebaseline import *
+from webkitpy.tool.mocktool import MockTool, MockOptions
+
+
+class _BaseTestCase(unittest.TestCase):
+    MOCK_WEB_RESULT = 'MOCK Web result, convert 404 to None=True'
+    WEB_PREFIX = 'http://example.com/f/builders/WebKit Mac10.7/results/layout-test-results'
+
+    command_constructor = None
+
+    def setUp(self):
+        self.tool = MockTool()
+        self.command = self.command_constructor()  # lint warns that command_constructor might not be set, but this is intentional; pylint: disable-msg=E1102
+        self.command.bind_to_tool(self.tool)
+        self.lion_port = self.tool.port_factory.get_from_builder_name("WebKit Mac10.7")
+        self.lion_expectations_path = self.lion_port.path_to_test_expectations_file()
+
+        # FIXME: we should override builders._exact_matches here to point to a set
+        # of test ports and restore the value in tearDown(), and that way the
+        # individual tests wouldn't have to worry about it.
+
+    def _expand(self, path):
+        if self.tool.filesystem.isabs(path):
+            return path
+        return self.tool.filesystem.join(self.lion_port.layout_tests_dir(), path)
+
+    def _read(self, path):
+        return self.tool.filesystem.read_text_file(self._expand(path))
+
+    def _write(self, path, contents):
+        self.tool.filesystem.write_text_file(self._expand(path), contents)
+
+    def _zero_out_test_expectations(self):
+        for port_name in self.tool.port_factory.all_port_names():
+            port = self.tool.port_factory.get(port_name)
+            for path in port.expectations_files():
+                self._write(path, '')
+        self.tool.filesystem.written_files = {}
+
+
+class TestRebaselineTest(_BaseTestCase):
+    command_constructor = RebaselineTest  # AKA webkit-patch rebaseline-test-internal
+
+    def setUp(self):
+        super(TestRebaselineTest, self).setUp()
+        self.options = MockOptions(builder="WebKit Mac10.7", test="userscripts/another-test.html", suffixes="txt",
+                                   move_overwritten_baselines_to=None, results_directory=None)
+
+    def test_baseline_directory(self):
+        command = self.command
+        self.assertEqual(command._baseline_directory("Apple Win XP Debug (Tests)"), "/mock-checkout/LayoutTests/platform/win-xp")
+        self.assertEqual(command._baseline_directory("Apple Win 7 Release (Tests)"), "/mock-checkout/LayoutTests/platform/win")
+        self.assertEqual(command._baseline_directory("Apple Lion Release WK1 (Tests)"), "/mock-checkout/LayoutTests/platform/mac-lion")
+        self.assertEqual(command._baseline_directory("Apple Lion Release WK2 (Tests)"), "/mock-checkout/LayoutTests/platform/mac-wk2")
+        self.assertEqual(command._baseline_directory("GTK Linux 32-bit Release"), "/mock-checkout/LayoutTests/platform/gtk")
+        self.assertEqual(command._baseline_directory("EFL Linux 64-bit Debug"), "/mock-checkout/LayoutTests/platform/efl-wk1")
+        self.assertEqual(command._baseline_directory("Qt Linux Release"), "/mock-checkout/LayoutTests/platform/qt")
+        self.assertEqual(command._baseline_directory("WebKit Mac10.7"), "/mock-checkout/LayoutTests/platform/chromium-mac-lion")
+        self.assertEqual(command._baseline_directory("WebKit Mac10.6"), "/mock-checkout/LayoutTests/platform/chromium-mac-snowleopard")
+
+    def test_rebaseline_updates_expectations_file_noop(self):
+        self._zero_out_test_expectations()
+        self._write(self.lion_expectations_path, """Bug(B) [ Mac Linux XP Debug ] fast/dom/Window/window-postmessage-clone-really-deep-array.html [ Pass ]
+Bug(A) [ Debug ] : fast/css/large-list-of-rules-crash.html [ Failure ]
+""")
+        self._write("fast/dom/Window/window-postmessage-clone-really-deep-array.html", "Dummy test contents")
+        self._write("fast/css/large-list-of-rules-crash.html", "Dummy test contents")
+        self._write("userscripts/another-test.html", "Dummy test contents")
+
+        self.options.suffixes = "png,wav,txt"
+        self.command._rebaseline_test_and_update_expectations(self.options)
+
+        self.assertEquals(self.tool.web.urls_fetched,
+            [self.WEB_PREFIX + '/userscripts/another-test-actual.png',
+             self.WEB_PREFIX + '/userscripts/another-test-actual.wav',
+             self.WEB_PREFIX + '/userscripts/another-test-actual.txt'])
+        new_expectations = self._read(self.lion_expectations_path)
+        self.assertEqual(new_expectations, """Bug(B) [ Mac Linux XP Debug ] fast/dom/Window/window-postmessage-clone-really-deep-array.html [ Pass ]
+Bug(A) [ Debug ] : fast/css/large-list-of-rules-crash.html [ Failure ]
+""")
+
+    def test_rebaseline_updates_expectations_file(self):
+        self._write(self.lion_expectations_path, "Bug(x) [ Mac ] userscripts/another-test.html [ ImageOnlyFailure ]\nbug(z) [ Linux ] userscripts/another-test.html [ ImageOnlyFailure ]\n")
+        self._write("userscripts/another-test.html", "Dummy test contents")
+
+        self.options.suffixes = 'png,wav,txt'
+        self.command._rebaseline_test_and_update_expectations(self.options)
+
+        self.assertEquals(self.tool.web.urls_fetched,
+            [self.WEB_PREFIX + '/userscripts/another-test-actual.png',
+             self.WEB_PREFIX + '/userscripts/another-test-actual.wav',
+             self.WEB_PREFIX + '/userscripts/another-test-actual.txt'])
+        new_expectations = self._read(self.lion_expectations_path)
+        self.assertEqual(new_expectations, "Bug(x) [ MountainLion SnowLeopard ] userscripts/another-test.html [ ImageOnlyFailure ]\nbug(z) [ Linux ] userscripts/another-test.html [ ImageOnlyFailure ]\n")
+
+    def test_rebaseline_does_not_include_overrides(self):
+        self._write(self.lion_expectations_path, "Bug(x) [ Mac ] userscripts/another-test.html [ ImageOnlyFailure ]\nBug(z) [ Linux ] userscripts/another-test.html [ ImageOnlyFailure ]\n")
+        self._write(self.lion_port.path_from_chromium_base('skia', 'skia_test_expectations.txt'), "Bug(y) [ Mac ] other-test.html [ Failure ]\n")
+        self._write("userscripts/another-test.html", "Dummy test contents")
+
+        self.options.suffixes = 'png,wav,txt'
+        self.command._rebaseline_test_and_update_expectations(self.options)
+
+        self.assertEquals(self.tool.web.urls_fetched,
+            [self.WEB_PREFIX + '/userscripts/another-test-actual.png',
+             self.WEB_PREFIX + '/userscripts/another-test-actual.wav',
+             self.WEB_PREFIX + '/userscripts/another-test-actual.txt'])
+
+        new_expectations = self._read(self.lion_expectations_path)
+        self.assertEqual(new_expectations, "Bug(x) [ MountainLion SnowLeopard ] userscripts/another-test.html [ ImageOnlyFailure ]\nBug(z) [ Linux ] userscripts/another-test.html [ ImageOnlyFailure ]\n")
+
+    def test_rebaseline_test(self):
+        self.command._rebaseline_test("WebKit Linux", "userscripts/another-test.html", None, "txt", self.WEB_PREFIX)
+        self.assertEquals(self.tool.web.urls_fetched, [self.WEB_PREFIX + '/userscripts/another-test-actual.txt'])
+
+    def test_rebaseline_test_with_results_directory(self):
+        self._write(self.lion_expectations_path, "Bug(x) [ Mac ] userscripts/another-test.html [ ImageOnlyFailure ]\nbug(z) [ Linux ] userscripts/another-test.html [ ImageOnlyFailure ]\n")
+        self.options.results_directory = '/tmp'
+        self.command._rebaseline_test_and_update_expectations(self.options)
+        self.assertEquals(self.tool.web.urls_fetched, ['file:///tmp/userscripts/another-test-actual.txt'])
+
+    def test_rebaseline_test_and_print_scm_changes(self):
+        self.command._print_scm_changes = True
+        self.command._scm_changes = {'add': [], 'delete': []}
+        self.tool._scm.exists = lambda x: False
+
+        self.command._rebaseline_test("WebKit Linux", "userscripts/another-test.html", None, "txt", None)
+
+        self.assertEquals(self.command._scm_changes, {'add': ['/mock-checkout/LayoutTests/platform/chromium-linux/userscripts/another-test-expected.txt'], 'delete': []})
+
+    def test_rebaseline_and_copy_test(self):
+        self._write("userscripts/another-test-expected.txt", "generic result")
+
+        self.command._rebaseline_test("WebKit Mac10.7", "userscripts/another-test.html", ["chromium-mac-snowleopard"], "txt", None)
+
+        self.assertEquals(self._read('platform/chromium-mac-lion/userscripts/another-test-expected.txt'), self.MOCK_WEB_RESULT)
+        self.assertEquals(self._read('platform/chromium-mac-snowleopard/userscripts/another-test-expected.txt'), 'generic result')
+
+    def test_rebaseline_and_copy_test_no_existing_result(self):
+        self.command._rebaseline_test("WebKit Mac10.7", "userscripts/another-test.html", ["chromium-mac-snowleopard"], "txt", None)
+
+        self.assertEquals(self._read('platform/chromium-mac-lion/userscripts/another-test-expected.txt'), self.MOCK_WEB_RESULT)
+        self.assertFalse(self.tool.filesystem.exists(self._expand('platform/chromium-mac-snowleopard/userscripts/another-test-expected.txt')))
+
+    def test_rebaseline_and_copy_test_with_lion_result(self):
+        self._write("platform/chromium-mac-lion/userscripts/another-test-expected.txt", "original lion result")
+
+        self.command._rebaseline_test("WebKit Mac10.7", "userscripts/another-test.html", ["chromium-mac-snowleopard"], "txt", self.WEB_PREFIX)
+
+        self.assertEquals(self.tool.web.urls_fetched, [self.WEB_PREFIX + '/userscripts/another-test-actual.txt'])
+        self.assertEquals(self._read("platform/chromium-mac-snowleopard/userscripts/another-test-expected.txt"), "original lion result")
+        self.assertEquals(self._read("platform/chromium-mac-lion/userscripts/another-test-expected.txt"), self.MOCK_WEB_RESULT)
+
+    def test_rebaseline_and_copy_no_overwrite_test(self):
+        self._write("platform/chromium-mac-lion/userscripts/another-test-expected.txt", "original lion result")
+        self._write("platform/chromium-mac-snowleopard/userscripts/another-test-expected.txt", "original snowleopard result")
+
+        self.command._rebaseline_test("WebKit Mac10.7", "userscripts/another-test.html", ["chromium-mac-snowleopard"], "txt", None)
+
+        self.assertEquals(self._read("platform/chromium-mac-snowleopard/userscripts/another-test-expected.txt"), "original snowleopard result")
+        self.assertEquals(self._read("platform/chromium-mac-lion/userscripts/another-test-expected.txt"), self.MOCK_WEB_RESULT)
+
+    def test_rebaseline_test_internal_with_move_overwritten_baselines_to(self):
+        self.tool.executive = MockExecutive2()
+
+        # FIXME: it's confusing that this is the test- port, and not the regular lion port. Really all of the tests should be using the test ports.
+        port = self.tool.port_factory.get('test-mac-snowleopard')
+        self._write(port._filesystem.join(port.layout_tests_dir(), 'platform/test-mac-snowleopard/failures/expected/image-expected.txt'), 'original snowleopard result')
+
+        old_exact_matches = builders._exact_matches
+        oc = OutputCapture()
+        try:
+            builders._exact_matches = {
+                "MOCK Leopard": {"port_name": "test-mac-leopard", "specifiers": set(["mock-specifier"])},
+                "MOCK SnowLeopard": {"port_name": "test-mac-snowleopard", "specifiers": set(["mock-specifier"])},
+            }
+
+            options = MockOptions(optimize=True, builder="MOCK SnowLeopard", suffixes="txt",
+                move_overwritten_baselines_to=["test-mac-leopard"], verbose=True, test="failures/expected/image.html",
+                results_directory=None)
+
+            oc.capture_output()
+            self.command.execute(options, [], self.tool)
+        finally:
+            out, _, _ = oc.restore_output()
+            builders._exact_matches = old_exact_matches
+
+        self.assertEquals(self._read(self.tool.filesystem.join(port.layout_tests_dir(), 'platform/test-mac-leopard/failures/expected/image-expected.txt')), 'original snowleopard result')
+        self.assertEquals(out, '{"add": []}\n')
+
+
+class TestRebaselineJson(_BaseTestCase):
+    command_constructor = RebaselineJson
+
+    def setUp(self):
+        super(TestRebaselineJson, self).setUp()
+        self.tool.executive = MockExecutive2()
+        self.old_exact_matches = builders._exact_matches
+        builders._exact_matches = {
+            "MOCK builder": {"port_name": "test-mac-snowleopard", "specifiers": set(["mock-specifier"]),
+                             "move_overwritten_baselines_to": ["test-mac-leopard"]},
+            "MOCK builder (Debug)": {"port_name": "test-mac-snowleopard", "specifiers": set(["mock-specifier", "debug"])},
+        }
+
+    def tearDown(self):
+        builders._exact_matches = self.old_exact_matches
+        super(TestRebaselineJson, self).tearDown()
+
+    def test_rebaseline_all(self):
+        options = MockOptions(optimize=True, verbose=True, move_overwritten_baselines=False, results_directory=None)
+        self.command._rebaseline(options,  {"user-scripts/another-test.html": {"MOCK builder": ["txt", "png"]}})
+
+        # Note that we have one run_in_parallel() call followed by a run_command()
+        self.assertEquals(self.tool.executive.calls,
+            [[['echo', 'rebaseline-test-internal', '--suffixes', 'txt,png', '--builder', 'MOCK builder', '--test', 'user-scripts/another-test.html', '--verbose']],
+             ['echo', '--verbose', 'optimize-baselines', '--suffixes', 'txt,png', 'user-scripts/another-test.html']])
+
+    def test_rebaseline_debug(self):
+        options = MockOptions(optimize=True, verbose=True, move_overwritten_baselines=False, results_directory=None)
+        self.command._rebaseline(options,  {"user-scripts/another-test.html": {"MOCK builder (Debug)": ["txt", "png"]}})
+
+        # Note that we have one run_in_parallel() call followed by a run_command()
+        self.assertEquals(self.tool.executive.calls,
+            [[['echo', 'rebaseline-test-internal', '--suffixes', 'txt,png', '--builder', 'MOCK builder (Debug)', '--test', 'user-scripts/another-test.html', '--verbose']],
+             ['echo', '--verbose', 'optimize-baselines', '--suffixes', 'txt,png', 'user-scripts/another-test.html']])
+
+    def test_move_overwritten(self):
+        options = MockOptions(optimize=True, verbose=True, move_overwritten_baselines=True, results_directory=None)
+        self.command._rebaseline(options,  {"user-scripts/another-test.html": {"MOCK builder": ["txt", "png"]}})
+
+        # Note that we have one run_in_parallel() call followed by a run_command()
+        self.assertEquals(self.tool.executive.calls,
+            [[['echo', 'rebaseline-test-internal', '--suffixes', 'txt,png', '--builder', 'MOCK builder', '--test', 'user-scripts/another-test.html', '--move-overwritten-baselines-to', 'test-mac-leopard', '--verbose']],
+             ['echo', '--verbose', 'optimize-baselines', '--suffixes', 'txt,png', 'user-scripts/another-test.html']])
+
+    def test_no_optimize(self):
+        options = MockOptions(optimize=False, verbose=True, move_overwritten_baselines=False, results_directory=None)
+        self.command._rebaseline(options,  {"user-scripts/another-test.html": {"MOCK builder (Debug)": ["txt", "png"]}})
+
+        # Note that we have only one run_in_parallel() call
+        self.assertEquals(self.tool.executive.calls,
+            [[['echo', 'rebaseline-test-internal', '--suffixes', 'txt,png', '--builder', 'MOCK builder (Debug)', '--test', 'user-scripts/another-test.html', '--verbose']]])
+
+    def test_results_directory(self):
+        options = MockOptions(optimize=False, verbose=True, move_overwritten_baselines=False, results_directory='/tmp')
+        self.command._rebaseline(options,  {"user-scripts/another-test.html": {"MOCK builder": ["txt", "png"]}})
+
+        # Note that we have only one run_in_parallel() call
+        self.assertEquals(self.tool.executive.calls,
+            [[['echo', 'rebaseline-test-internal', '--suffixes', 'txt,png', '--builder', 'MOCK builder', '--test', 'user-scripts/another-test.html', '--results-directory', '/tmp', '--verbose']]])
+
+
+class TestRebaseline(_BaseTestCase):
+    # This command shares most of its logic with RebaselineJson, so these tests just test what is different.
+
+    command_constructor = Rebaseline  # AKA webkit-patch rebaseline
+
+    def test_tests_to_update(self):
+        build = Mock()
+        OutputCapture().assert_outputs(self, self.command._tests_to_update, [build])
+
+    def test_rebaseline(self):
+        self.command._builders_to_pull_from = lambda: [MockBuilder('MOCK builder')]
+        self.command._tests_to_update = lambda builder: ['mock/path/to/test.html']
+
+        self._zero_out_test_expectations()
+
+        old_exact_matches = builders._exact_matches
+        oc = OutputCapture()
+        try:
+            builders._exact_matches = {
+                "MOCK builder": {"port_name": "test-mac-leopard", "specifiers": set(["mock-specifier"])},
+            }
+            oc.capture_output()
+            self.command.execute(MockOptions(optimize=False, builders=None, suffixes="txt,png", verbose=True, move_overwritten_baselines=False), [], self.tool)
+        finally:
+            oc.restore_output()
+            builders._exact_matches = old_exact_matches
+
+        calls = filter(lambda x: x != ['qmake', '-v'] and x[0] != 'perl', self.tool.executive.calls)
+        self.assertEquals(calls,
+            [[['echo', 'rebaseline-test-internal', '--suffixes', 'txt,png', '--builder', 'MOCK builder', '--test', 'mock/path/to/test.html', '--verbose']]])
+
+
+class TestRebaselineExpectations(_BaseTestCase):
+    command_constructor = RebaselineExpectations
+
+    def setUp(self):
+        super(TestRebaselineExpectations, self).setUp()
+        self.options = MockOptions(optimize=False, builders=None, suffixes=['txt'], verbose=False, platform=None,
+                                   move_overwritten_baselines=False, results_directory=None)
+
+    def test_rebaseline_expectations(self):
+        self._zero_out_test_expectations()
+
+        self.tool.executive = MockExecutive2()
+
+        self.command._tests_to_rebaseline = lambda port: {'userscripts/another-test.html': set(['txt']), 'userscripts/images.svg': set(['png'])}
+        self.command.execute(self.options, [], self.tool)
+
+        # FIXME: change this to use the test- ports.
+        calls = filter(lambda x: x != ['qmake', '-v'], self.tool.executive.calls)
+        self.assertTrue(len(calls) == 1)
+        self.assertTrue(len(calls[0]) == 26)
+
+    def test_rebaseline_expectations_noop(self):
+        self._zero_out_test_expectations()
+
+        oc = OutputCapture()
+        try:
+            oc.capture_output()
+            self.command.execute(self.options, [], self.tool)
+        finally:
+            _, _, logs = oc.restore_output()
+            self.assertEquals(self.tool.filesystem.written_files, {})
+            self.assertEquals(logs, 'Did not find any tests marked Rebaseline.\n')
+
+    def disabled_test_overrides_are_included_correctly(self):
+        # This tests that the any tests marked as REBASELINE in the overrides are found, but
+        # that the overrides do not get written into the main file.
+        self._zero_out_test_expectations()
+
+        self._write(self.lion_expectations_path, '')
+        self.lion_port.expectations_dict = lambda: {
+            self.lion_expectations_path: '',
+            'overrides': ('Bug(x) userscripts/another-test.html [ Failure Rebaseline ]\n'
+                          'Bug(y) userscripts/test.html [ Crash ]\n')}
+        self._write('/userscripts/another-test.html', '')
+
+        self.assertEquals(self.command._tests_to_rebaseline(self.lion_port), {'userscripts/another-test.html': set(['png', 'txt', 'wav'])})
+        self.assertEquals(self._read(self.lion_expectations_path), '')
+
+
+class _FakeOptimizer(BaselineOptimizer):
+    def read_results_by_directory(self, baseline_name):
+        if baseline_name.endswith('txt'):
+            return {'LayoutTests/passes/text.html': '123456',
+                    'LayoutTests/platform/test-mac-leopard/passes/text.html': 'abcdef'}
+        return {}
+
+
+class TestAnalyzeBaselines(_BaseTestCase):
+    command_constructor = AnalyzeBaselines
+
+    def setUp(self):
+        super(TestAnalyzeBaselines, self).setUp()
+        self.port = self.tool.port_factory.get('test')
+        self.tool.port_factory.get = (lambda port_name=None, options=None: self.port)
+        self.lines = []
+        self.command._optimizer_class = _FakeOptimizer
+        self.command._write = (lambda msg: self.lines.append(msg))  # pylint bug warning about unnecessary lambda? pylint: disable-msg=W0108
+
+    def test_default(self):
+        self.command.execute(MockOptions(suffixes='txt', missing=False, platform=None), ['passes/text.html'], self.tool)
+        self.assertEquals(self.lines,
+            ['passes/text-expected.txt:',
+             '  (generic): 123456',
+             '  test-mac-leopard: abcdef'])
+
+    def test_missing_baselines(self):
+        self.command.execute(MockOptions(suffixes='png,txt', missing=True, platform=None), ['passes/text.html'], self.tool)
+        self.assertEquals(self.lines,
+            ['passes/text-expected.png: (no baselines found)',
+             'passes/text-expected.txt:',
+             '  (generic): 123456',
+             '  test-mac-leopard: abcdef'])
diff --git a/Tools/Scripts/webkitpy/tool/commands/rebaselineserver.py b/Tools/Scripts/webkitpy/tool/commands/rebaselineserver.py
new file mode 100644
index 0000000..09c6d0b
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/commands/rebaselineserver.py
@@ -0,0 +1,98 @@
+# Copyright (c) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Starts a local HTTP server which displays layout test failures (given a test
+results directory), provides comparisons of expected and actual results (both
+images and text) and allows one-click rebaselining of tests."""
+
+from webkitpy.common import system
+from webkitpy.common.net.resultsjsonparser import for_each_test, JSONTestResult
+from webkitpy.layout_tests.layout_package import json_results_generator
+from webkitpy.tool.commands.abstractlocalservercommand import AbstractLocalServerCommand
+from webkitpy.tool.servers.rebaselineserver import get_test_baselines, RebaselineHTTPServer, STATE_NEEDS_REBASELINE
+
+
+class TestConfig(object):
+    def __init__(self, test_port, layout_tests_directory, results_directory, platforms, filesystem, scm):
+        self.test_port = test_port
+        self.layout_tests_directory = layout_tests_directory
+        self.results_directory = results_directory
+        self.platforms = platforms
+        self.filesystem = filesystem
+        self.scm = scm
+
+
+class RebaselineServer(AbstractLocalServerCommand):
+    name = "rebaseline-server"
+    help_text = __doc__
+    argument_names = "/path/to/results/directory"
+
+    server = RebaselineHTTPServer
+
+    def _gather_baselines(self, results_json):
+        # Rebaseline server and it's associated JavaScript expected the tests subtree to
+        # be key-value pairs instead of hierarchical.
+        # FIXME: make the rebaseline server use the hierarchical tree.
+        new_tests_subtree = {}
+
+        def gather_baselines_for_test(test_name, result_dict):
+            result = JSONTestResult(test_name, result_dict)
+            if result.did_pass_or_run_as_expected():
+                return
+            result_dict['state'] = STATE_NEEDS_REBASELINE
+            result_dict['baselines'] = get_test_baselines(test_name, self._test_config)
+            new_tests_subtree[test_name] = result_dict
+
+        for_each_test(results_json['tests'], gather_baselines_for_test)
+        results_json['tests'] = new_tests_subtree
+
+    def _prepare_config(self, options, args, tool):
+        results_directory = args[0]
+        filesystem = system.filesystem.FileSystem()
+        scm = self._tool.scm()
+
+        print 'Parsing full_results.json...'
+        results_json_path = filesystem.join(results_directory, 'full_results.json')
+        results_json = json_results_generator.load_json(filesystem, results_json_path)
+
+        port = tool.port_factory.get()
+        layout_tests_directory = port.layout_tests_dir()
+        platforms = filesystem.listdir(filesystem.join(layout_tests_directory, 'platform'))
+        self._test_config = TestConfig(port, layout_tests_directory, results_directory, platforms, filesystem, scm)
+
+        print 'Gathering current baselines...'
+        self._gather_baselines(results_json)
+
+        return {
+            'test_config': self._test_config,
+            "results_json": results_json,
+            "platforms_json": {
+                'platforms': platforms,
+                'defaultPlatform': port.name(),
+            },
+        }
diff --git a/Tools/Scripts/webkitpy/tool/commands/roll.py b/Tools/Scripts/webkitpy/tool/commands/roll.py
new file mode 100644
index 0000000..37481b2
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/commands/roll.py
@@ -0,0 +1,74 @@
+# Copyright (c) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from webkitpy.tool.commands.abstractsequencedcommand import AbstractSequencedCommand
+
+from webkitpy.tool import steps
+
+
+class RollChromiumDEPS(AbstractSequencedCommand):
+    name = "roll-chromium-deps"
+    help_text = "Updates Chromium DEPS (defaults to the last-known good revision of Chromium)"
+    argument_names = "[CHROMIUM_REVISION]"
+    steps = [
+        steps.UpdateChromiumDEPS,
+        steps.PrepareChangeLogForDEPSRoll,
+        steps.ConfirmDiff,
+        steps.Commit,
+    ]
+
+    def _prepare_state(self, options, args, tool):
+        return {
+            "chromium_revision": (args and args[0]),
+        }
+
+
+class PostChromiumDEPSRoll(AbstractSequencedCommand):
+    name = "post-chromium-deps-roll"
+    help_text = "Posts a patch to update Chromium DEPS (revision defaults to the last-known good revision of Chromium)"
+    argument_names = "CHROMIUM_REVISION CHROMIUM_REVISION_NAME"
+    steps = [
+        steps.CleanWorkingDirectory,
+        steps.Update,
+        steps.UpdateChromiumDEPS,
+        steps.PrepareChangeLogForDEPSRoll,
+        steps.CreateBug,
+        steps.PostDiff,
+    ]
+
+    def _prepare_state(self, options, args, tool):
+        options.review = False
+        options.request_commit = True
+
+        chromium_revision = args[0]
+        chromium_revision_name = args[1]
+        return {
+            "chromium_revision": chromium_revision,
+            "bug_title": "Roll Chromium DEPS to %s" % chromium_revision_name,
+            "bug_description": "A DEPS roll a day keeps the build break away.",
+        }
diff --git a/Tools/Scripts/webkitpy/tool/commands/roll_unittest.py b/Tools/Scripts/webkitpy/tool/commands/roll_unittest.py
new file mode 100644
index 0000000..800bc5b
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/commands/roll_unittest.py
@@ -0,0 +1,63 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#    * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from webkitpy.thirdparty.mock import Mock
+from webkitpy.tool.commands.commandtest import CommandsTest
+from webkitpy.tool.commands.roll import *
+from webkitpy.tool.mocktool import MockOptions, MockTool
+
+
+class RollCommandsTest(CommandsTest):
+    def test_update_chromium_deps(self):
+        expected_stderr = """Updating Chromium DEPS to 6764
+MOCK: MockDEPS.write_variable(chromium_rev, 6764)
+MOCK: user.open_url: file://...
+Was that diff correct?
+Committed r49824: <http://trac.webkit.org/changeset/49824>
+"""
+        self.assert_execute_outputs(RollChromiumDEPS(), [6764], expected_stderr=expected_stderr)
+
+    def test_update_chromium_deps_older_revision(self):
+        options = MockOptions(non_interactive=False)
+        expected_stderr = """Current Chromium DEPS revision 6564 is newer than 5764.
+ERROR: Unable to update Chromium DEPS
+"""
+        self.assert_execute_outputs(RollChromiumDEPS(), [5764], options=options, expected_stderr=expected_stderr, expected_exception=SystemExit)
+
+
+class PostRollCommandsTest(CommandsTest):
+    def test_prepare_state(self):
+        postroll = PostChromiumDEPSRoll()
+        options = MockOptions()
+        tool = MockTool()
+        lkgr_state = postroll._prepare_state(options, [None, "last-known good revision"], tool)
+        self.assertEquals(None, lkgr_state["chromium_revision"])
+        self.assertEquals("Roll Chromium DEPS to last-known good revision", lkgr_state["bug_title"])
+        revision_state = postroll._prepare_state(options, ["1234", "r1234"], tool)
+        self.assertEquals("1234", revision_state["chromium_revision"])
+        self.assertEquals("Roll Chromium DEPS to r1234", revision_state["bug_title"])
diff --git a/Tools/Scripts/webkitpy/tool/commands/sheriffbot.py b/Tools/Scripts/webkitpy/tool/commands/sheriffbot.py
new file mode 100644
index 0000000..d30da39
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/commands/sheriffbot.py
@@ -0,0 +1,74 @@
+# Copyright (c) 2009 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+# 
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+# 
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from webkitpy.common.system.deprecated_logging import log
+from webkitpy.tool.bot.sheriff import Sheriff
+from webkitpy.tool.bot.irc_command import commands as irc_commands
+from webkitpy.tool.bot.ircbot import IRCBot
+from webkitpy.tool.commands.queues import AbstractQueue
+from webkitpy.tool.commands.stepsequence import StepSequenceErrorHandler
+
+
+class SheriffBot(AbstractQueue, StepSequenceErrorHandler):
+    name = "sheriff-bot"
+    watchers = AbstractQueue.watchers + [
+        "abarth@webkit.org",
+        "eric@webkit.org",
+    ]
+
+    # AbstractQueue methods
+
+    def begin_work_queue(self):
+        AbstractQueue.begin_work_queue(self)
+        self._sheriff = Sheriff(self._tool, self)
+        self._irc_bot = IRCBot("sheriffbot", self._tool, self._sheriff, irc_commands)
+        self._tool.ensure_irc_connected(self._irc_bot.irc_delegate())
+
+    def work_item_log_path(self, failure_map):
+        return None
+
+    def _is_old_failure(self, revision):
+        return self._tool.status_server.svn_revision(revision)
+
+    def next_work_item(self):
+        self._irc_bot.process_pending_messages()
+        return
+
+    def process_work_item(self, failure_map):
+        return True
+
+    def handle_unexpected_error(self, failure_map, message):
+        log(message)
+
+    # StepSequenceErrorHandler methods
+
+    @classmethod
+    def handle_script_error(cls, tool, state, script_error):
+        # Ideally we would post some information to IRC about what went wrong
+        # here, but we don't have the IRC password in the child process.
+        pass
diff --git a/Tools/Scripts/webkitpy/tool/commands/sheriffbot_unittest.py b/Tools/Scripts/webkitpy/tool/commands/sheriffbot_unittest.py
new file mode 100644
index 0000000..9aa57b1
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/commands/sheriffbot_unittest.py
@@ -0,0 +1,33 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#    * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from webkitpy.tool.commands.queuestest import QueuesTest
+
+
+class SheriffBotTest(QueuesTest):
+    pass  # No unittests as the moment.
diff --git a/Tools/Scripts/webkitpy/tool/commands/stepsequence.py b/Tools/Scripts/webkitpy/tool/commands/stepsequence.py
new file mode 100644
index 0000000..b666554
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/commands/stepsequence.py
@@ -0,0 +1,83 @@
+# Copyright (C) 2009 Google Inc. All rights reserved.
+# 
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+# 
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+# 
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from webkitpy.tool import steps
+
+from webkitpy.common.checkout.scm import CheckoutNeedsUpdate
+from webkitpy.common.system.deprecated_logging import log
+from webkitpy.common.system.executive import ScriptError
+from webkitpy.tool.bot.queueengine import QueueEngine
+
+
+class StepSequenceErrorHandler():
+    @classmethod
+    def handle_script_error(cls, tool, patch, script_error):
+        raise NotImplementedError, "subclasses must implement"
+
+    @classmethod
+    def handle_checkout_needs_update(cls, tool, state, options, error):
+        raise NotImplementedError, "subclasses must implement"
+
+
+class StepSequence(object):
+    def __init__(self, steps):
+        self._steps = steps or []
+
+    def options(self):
+        collected_options = [
+            steps.Options.parent_command,
+            steps.Options.quiet,
+        ]
+        for step in self._steps:
+            collected_options = collected_options + step.options()
+        # Remove duplicates.
+        collected_options = sorted(set(collected_options))
+        return collected_options
+
+    def _run(self, tool, options, state):
+        for step in self._steps:
+            step(tool, options).run(state)
+
+    def run_and_handle_errors(self, tool, options, state=None):
+        if not state:
+            state = {}
+        try:
+            self._run(tool, options, state)
+        except CheckoutNeedsUpdate, e:
+            log("Commit failed because the checkout is out of date.  Please update and try again.")
+            if options.parent_command:
+                command = tool.command_by_name(options.parent_command)
+                command.handle_checkout_needs_update(tool, state, options, e)
+            QueueEngine.exit_after_handled_error(e)
+        except ScriptError, e:
+            if not options.quiet:
+                log(e.message_with_output())
+            if options.parent_command:
+                command = tool.command_by_name(options.parent_command)
+                command.handle_script_error(tool, state, e)
+            QueueEngine.exit_after_handled_error(e)
diff --git a/Tools/Scripts/webkitpy/tool/commands/suggestnominations.py b/Tools/Scripts/webkitpy/tool/commands/suggestnominations.py
new file mode 100644
index 0000000..c197a11
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/commands/suggestnominations.py
@@ -0,0 +1,247 @@
+# Copyright (c) 2011 Google Inc. All rights reserved.
+# Copyright (c) 2011 Code Aurora Forum. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from optparse import make_option
+import re
+
+from webkitpy.common.checkout.changelog import ChangeLogEntry
+from webkitpy.common.config.committers import CommitterList
+from webkitpy.tool import steps
+from webkitpy.tool.grammar import join_with_separators
+from webkitpy.tool.multicommandtool import AbstractDeclarativeCommand
+
+
+class SuggestNominations(AbstractDeclarativeCommand):
+    name = "suggest-nominations"
+    help_text = "Suggest contributors for committer/reviewer nominations"
+
+    def __init__(self):
+        options = [
+            make_option("--committer-minimum", action="store", dest="committer_minimum", type="int", default=10, help="Specify minimum patch count for Committer nominations."),
+            make_option("--reviewer-minimum", action="store", dest="reviewer_minimum", type="int", default=80, help="Specify minimum patch count for Reviewer nominations."),
+            make_option("--max-commit-age", action="store", dest="max_commit_age", type="int", default=9, help="Specify max commit age to consider for nominations (in months)."),
+            make_option("--show-commits", action="store_true", dest="show_commits", default=False, help="Show commit history with nomination suggestions."),
+        ]
+
+        AbstractDeclarativeCommand.__init__(self, options=options)
+        # FIXME: This should probably be on the tool somewhere.
+        self._committer_list = CommitterList()
+
+    _counters_by_name = {}
+    _counters_by_email = {}
+
+    def _init_options(self, options):
+        self.committer_minimum = options.committer_minimum
+        self.reviewer_minimum = options.reviewer_minimum
+        self.max_commit_age = options.max_commit_age
+        self.show_commits = options.show_commits
+        self.verbose = options.verbose
+
+    # FIXME: This should move to scm.py
+    def _recent_commit_messages(self):
+        git_log = self._tool.executive.run_command(['git', 'log', '--since="%s months ago"' % self.max_commit_age])
+        match_git_svn_id = re.compile(r"\n\n    git-svn-id:.*\n", re.MULTILINE)
+        match_get_log_lines = re.compile(r"^\S.*\n", re.MULTILINE)
+        match_leading_indent = re.compile(r"^[ ]{4}", re.MULTILINE)
+
+        messages = re.split(r"commit \w{40}", git_log)[1:]  # Ignore the first message which will be empty.
+        for message in messages:
+            # Remove any lines from git and unindent all the lines
+            (message, _) = match_git_svn_id.subn("", message)
+            (message, _) = match_get_log_lines.subn("", message)
+            (message, _) = match_leading_indent.subn("", message)
+            yield message.lstrip()  # Remove any leading newlines from the log message.
+
+    # e.g. Patch by Eric Seidel <eric@webkit.org> on 2011-09-15
+    patch_by_regexp = r'^Patch by (?P<name>.+?)\s+<(?P<email>[^<>]+)> on (?P<date>\d{4}-\d{2}-\d{2})$'
+
+    def _count_recent_patches(self):
+        # This entire block could be written as a map/reduce over the messages.
+        for message in self._recent_commit_messages():
+            # FIXME: This should use ChangeLogEntry to do the entire parse instead
+            # of grabbing at its regexps.
+            dateline_match = re.match(ChangeLogEntry.date_line_regexp, message, re.MULTILINE)
+            if not dateline_match:
+                # Modern commit messages don't just dump the ChangeLog entry, but rather
+                # have a special Patch by line for non-committers.
+                dateline_match = re.search(self.patch_by_regexp, message, re.MULTILINE)
+                if not dateline_match:
+                    continue
+
+            author_email = dateline_match.group("email")
+            if not author_email:
+                continue
+
+            # We only care about reviewed patches, so make sure it has a valid reviewer line.
+            reviewer_match = re.search(ChangeLogEntry.reviewed_by_regexp, message, re.MULTILINE)
+            # We might also want to validate the reviewer name against the committer list.
+            if not reviewer_match or not reviewer_match.group("reviewer"):
+                continue
+
+            author_name = dateline_match.group("name")
+            if not author_name:
+                continue
+
+            if re.search("([^a-zA-Z]and[^a-zA-Z])|(,)|(@)", author_name):
+                # This entry seems to have multiple reviewers, or invalid characters, so reject it.
+                continue
+
+            svn_id_match = re.search(ChangeLogEntry.svn_id_regexp, message, re.MULTILINE)
+            if svn_id_match:
+                svn_id = svn_id_match.group("svnid")
+            if not svn_id_match or not svn_id:
+                svn_id = "unknown"
+            commit_date = dateline_match.group("date")
+
+            # See if we already have a contributor with this name or email
+            counter_by_name = self._counters_by_name.get(author_name)
+            counter_by_email = self._counters_by_email.get(author_email)
+            if counter_by_name:
+                if counter_by_email:
+                    if counter_by_name != counter_by_email:
+                        # Merge these two counters  This is for the case where we had
+                        # John Smith (jsmith@gmail.com) and Jonathan Smith (jsmith@apple.com)
+                        # and just found a John Smith (jsmith@apple.com).  Now we know the
+                        # two names are the same person
+                        counter_by_name['names'] |= counter_by_email['names']
+                        counter_by_name['emails'] |= counter_by_email['emails']
+                        counter_by_name['count'] += counter_by_email.get('count', 0)
+                        self._counters_by_email[author_email] = counter_by_name
+                else:
+                    # Add email to the existing counter
+                    self._counters_by_email[author_email] = counter_by_name
+                    counter_by_name['emails'] |= set([author_email])
+            else:
+                if counter_by_email:
+                    # Add name to the existing counter
+                    self._counters_by_name[author_name] = counter_by_email
+                    counter_by_email['names'] |= set([author_name])
+                else:
+                    # Create new counter
+                    new_counter = {'names': set([author_name]), 'emails': set([author_email]), 'latest_name': author_name, 'latest_email': author_email, 'commits': ""}
+                    self._counters_by_name[author_name] = new_counter
+                    self._counters_by_email[author_email] = new_counter
+
+            assert(self._counters_by_name[author_name] == self._counters_by_email[author_email])
+            counter = self._counters_by_name[author_name]
+            counter['count'] = counter.get('count', 0) + 1
+
+            if svn_id.isdigit():
+                svn_id = "http://trac.webkit.org/changeset/" + svn_id
+            counter['commits'] += "  commit: %s on %s by %s (%s)\n" % (svn_id, commit_date, author_name, author_email)
+
+        return self._counters_by_email
+
+    def _collect_nominations(self, counters_by_email):
+        nominations = []
+        for author_email, counter in counters_by_email.items():
+            if author_email != counter['latest_email']:
+                continue
+            roles = []
+
+            contributor = self._committer_list.contributor_by_email(author_email)
+
+            author_name = counter['latest_name']
+            patch_count = counter['count']
+
+            if patch_count >= self.committer_minimum and (not contributor or not contributor.can_commit):
+                roles.append("committer")
+            if patch_count >= self.reviewer_minimum  and (not contributor or not contributor.can_review):
+                roles.append("reviewer")
+            if roles:
+                nominations.append({
+                    'roles': roles,
+                    'author_name': author_name,
+                    'author_email': author_email,
+                    'patch_count': patch_count,
+                })
+        return nominations
+
+    def _print_nominations(self, nominations):
+        def nomination_cmp(a_nomination, b_nomination):
+            roles_result = cmp(a_nomination['roles'], b_nomination['roles'])
+            if roles_result:
+                return -roles_result
+            count_result = cmp(a_nomination['patch_count'], b_nomination['patch_count'])
+            if count_result:
+                return -count_result
+            return cmp(a_nomination['author_name'], b_nomination['author_name'])
+
+        for nomination in sorted(nominations, nomination_cmp):
+            # This is a little bit of a hack, but its convienent to just pass the nomination dictionary to the formating operator.
+            nomination['roles_string'] = join_with_separators(nomination['roles']).upper()
+            print "%(roles_string)s: %(author_name)s (%(author_email)s) has %(patch_count)s reviewed patches" % nomination
+            counter = self._counters_by_email[nomination['author_email']]
+
+            if self.show_commits:
+                print counter['commits']
+
+    def _print_counts(self, counters_by_email):
+        def counter_cmp(a_tuple, b_tuple):
+            # split the tuples
+            # the second element is the "counter" structure
+            _, a_counter = a_tuple
+            _, b_counter = b_tuple
+
+            count_result = cmp(a_counter['count'], b_counter['count'])
+            if count_result:
+                return -count_result
+            return cmp(a_counter['latest_name'].lower(), b_counter['latest_name'].lower())
+
+        for author_email, counter in sorted(counters_by_email.items(), counter_cmp):
+            if author_email != counter['latest_email']:
+                continue
+            contributor = self._committer_list.contributor_by_email(author_email)
+            author_name = counter['latest_name']
+            patch_count = counter['count']
+            counter['names'] = counter['names'] - set([author_name])
+            counter['emails'] = counter['emails'] - set([author_email])
+
+            alias_list = []
+            for alias in counter['names']:
+                alias_list.append(alias)
+            for alias in counter['emails']:
+                alias_list.append(alias)
+            if alias_list:
+                print "CONTRIBUTOR: %s (%s) has %d reviewed patches %s" % (author_name, author_email, patch_count, "(aliases: " + ", ".join(alias_list) + ")")
+            else:
+                print "CONTRIBUTOR: %s (%s) has %d reviewed patches" % (author_name, author_email, patch_count)
+        return
+
+    def execute(self, options, args, tool):
+        self._init_options(options)
+        patch_counts = self._count_recent_patches()
+        nominations = self._collect_nominations(patch_counts)
+        self._print_nominations(nominations)
+        if self.verbose:
+            self._print_counts(patch_counts)
+
+
+if __name__ == "__main__":
+    SuggestNominations()
diff --git a/Tools/Scripts/webkitpy/tool/commands/suggestnominations_unittest.py b/Tools/Scripts/webkitpy/tool/commands/suggestnominations_unittest.py
new file mode 100644
index 0000000..88be253
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/commands/suggestnominations_unittest.py
@@ -0,0 +1,77 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+# Copyright (C) 2011 Code Aurora Forum. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#    * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from webkitpy.tool.commands.commandtest import CommandsTest
+from webkitpy.tool.commands.suggestnominations import SuggestNominations
+from webkitpy.tool.mocktool import MockOptions, MockTool
+
+
+class SuggestNominationsTest(CommandsTest):
+
+    mock_git_output = """commit 60831dde5beb22f35aef305a87fca7b5f284c698
+Author: fpizlo@apple.com <fpizlo@apple.com@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
+Date:   Thu Sep 15 19:56:21 2011 +0000
+
+    Value profiles collect no information for global variables
+    https://bugs.webkit.org/show_bug.cgi?id=68143
+
+    Reviewed by Geoffrey Garen.
+
+    git-svn-id: http://svn.webkit.org/repository/webkit/trunk@95219 268f45cc-cd09-0410-ab3c-d52691b4dbfc
+"""
+    mock_same_author_commit_message = """Value profiles collect no information for global variables
+https://bugs.webkit.org/show_bug.cgi?id=68143
+
+Reviewed by Geoffrey Garen."""
+
+    def test_recent_commit_messages(self):
+        tool = MockTool()
+        suggest_nominations = SuggestNominations()
+        suggest_nominations._init_options(options=MockOptions(reviewer_minimum=80, committer_minimum=10, max_commit_age=9, show_commits=False, verbose=False))
+        suggest_nominations.bind_to_tool(tool)
+
+        tool.executive.run_command = lambda command: self.mock_git_output
+        self.assertEqual(list(suggest_nominations._recent_commit_messages()), [self.mock_same_author_commit_message])
+
+    mock_non_committer_commit_message = """Let TestWebKitAPI work for chromium
+https://bugs.webkit.org/show_bug.cgi?id=67756
+
+Patch by Xianzhu Wang <wangxianzhu@chromium.org> on 2011-09-15
+Reviewed by Sam Weinig.
+
+Source/WebKit/chromium:
+
+* WebKit.gyp:"""
+
+    def test_basic(self):
+        expected_stdout = "REVIEWER: Xianzhu Wang (wangxianzhu@chromium.org) has 88 reviewed patches\n"
+        suggest_nominations = SuggestNominations()
+        suggest_nominations._init_options(options=MockOptions(reviewer_minimum=80, committer_minimum=10, max_commit_age=9, show_commits=False, verbose=False))
+        suggest_nominations._recent_commit_messages = lambda: [self.mock_non_committer_commit_message for _ in range(88)]
+        self.assert_execute_outputs(suggest_nominations, [], expected_stdout=expected_stdout, options=MockOptions(reviewer_minimum=80, committer_minimum=10, max_commit_age=9, show_commits=False, verbose=False))
diff --git a/Tools/Scripts/webkitpy/tool/commands/upload.py b/Tools/Scripts/webkitpy/tool/commands/upload.py
new file mode 100644
index 0000000..6b52e6c
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/commands/upload.py
@@ -0,0 +1,505 @@
+#!/usr/bin/env python
+# Copyright (c) 2009, 2010 Google Inc. All rights reserved.
+# Copyright (c) 2009 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+# 
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+# 
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import os
+import re
+import sys
+
+from optparse import make_option
+
+from webkitpy.tool import steps
+
+from webkitpy.common.checkout.changelog import parse_bug_id_from_changelog
+from webkitpy.common.config.committers import CommitterList
+from webkitpy.common.system.deprecated_logging import error, log
+from webkitpy.common.system.user import User
+from webkitpy.thirdparty.mock import Mock
+from webkitpy.tool.commands.abstractsequencedcommand import AbstractSequencedCommand
+from webkitpy.tool.comments import bug_comment_from_svn_revision
+from webkitpy.tool.grammar import pluralize, join_with_separators
+from webkitpy.tool.multicommandtool import AbstractDeclarativeCommand
+
+
+class CommitMessageForCurrentDiff(AbstractDeclarativeCommand):
+    name = "commit-message"
+    help_text = "Print a commit message suitable for the uncommitted changes"
+
+    def __init__(self):
+        options = [
+            steps.Options.git_commit,
+        ]
+        AbstractDeclarativeCommand.__init__(self, options=options)
+
+    def execute(self, options, args, tool):
+        # This command is a useful test to make sure commit_message_for_this_commit
+        # always returns the right value regardless of the current working directory.
+        print "%s" % tool.checkout().commit_message_for_this_commit(options.git_commit).message()
+
+
+class CleanPendingCommit(AbstractDeclarativeCommand):
+    name = "clean-pending-commit"
+    help_text = "Clear r+ on obsolete patches so they do not appear in the pending-commit list."
+
+    # NOTE: This was designed to be generic, but right now we're only processing patches from the pending-commit list, so only r+ matters.
+    def _flags_to_clear_on_patch(self, patch):
+        if not patch.is_obsolete():
+            return None
+        what_was_cleared = []
+        if patch.review() == "+":
+            if patch.reviewer():
+                what_was_cleared.append(u"%s's review+" % patch.reviewer().full_name)
+            else:
+                what_was_cleared.append("review+")
+        return join_with_separators(what_was_cleared)
+
+    def execute(self, options, args, tool):
+        committers = CommitterList()
+        for bug_id in tool.bugs.queries.fetch_bug_ids_from_pending_commit_list():
+            bug = self._tool.bugs.fetch_bug(bug_id)
+            patches = bug.patches(include_obsolete=True)
+            for patch in patches:
+                flags_to_clear = self._flags_to_clear_on_patch(patch)
+                if not flags_to_clear:
+                    continue
+                message = u"Cleared %s from obsolete attachment %s so that this bug does not appear in http://webkit.org/pending-commit." % (flags_to_clear, patch.id())
+                self._tool.bugs.obsolete_attachment(patch.id(), message)
+
+
+# FIXME: This should be share more logic with AssignToCommitter and CleanPendingCommit
+class CleanReviewQueue(AbstractDeclarativeCommand):
+    name = "clean-review-queue"
+    help_text = "Clear r? on obsolete patches so they do not appear in the pending-review list."
+
+    def execute(self, options, args, tool):
+        queue_url = "http://webkit.org/pending-review"
+        # We do this inefficient dance to be more like webkit.org/pending-review
+        # bugs.queries.fetch_bug_ids_from_review_queue() doesn't return
+        # closed bugs, but folks using /pending-review will see them. :(
+        for patch_id in tool.bugs.queries.fetch_attachment_ids_from_review_queue():
+            patch = self._tool.bugs.fetch_attachment(patch_id)
+            if not patch.review() == "?":
+                continue
+            attachment_obsolete_modifier = ""
+            if patch.is_obsolete():
+                attachment_obsolete_modifier = "obsolete "
+            elif patch.bug().is_closed():
+                bug_closed_explanation = "  If you would like this patch reviewed, please attach it to a new bug (or re-open this bug before marking it for review again)."
+            else:
+                # Neither the patch was obsolete or the bug was closed, next patch...
+                continue
+            message = "Cleared review? from %sattachment %s so that this bug does not appear in %s.%s" % (attachment_obsolete_modifier, patch.id(), queue_url, bug_closed_explanation)
+            self._tool.bugs.obsolete_attachment(patch.id(), message)
+
+
+class AssignToCommitter(AbstractDeclarativeCommand):
+    name = "assign-to-committer"
+    help_text = "Assign bug to whoever attached the most recent r+'d patch"
+
+    def _patches_have_commiters(self, reviewed_patches):
+        for patch in reviewed_patches:
+            if not patch.committer():
+                return False
+        return True
+
+    def _assign_bug_to_last_patch_attacher(self, bug_id):
+        committers = CommitterList()
+        bug = self._tool.bugs.fetch_bug(bug_id)
+        if not bug.is_unassigned():
+            assigned_to_email = bug.assigned_to_email()
+            log(u"Bug %s is already assigned to %s (%s)." % (bug_id, assigned_to_email, committers.committer_by_email(assigned_to_email)))
+            return
+
+        reviewed_patches = bug.reviewed_patches()
+        if not reviewed_patches:
+            log("Bug %s has no non-obsolete patches, ignoring." % bug_id)
+            return
+
+        # We only need to do anything with this bug if one of the r+'d patches does not have a valid committer (cq+ set).
+        if self._patches_have_commiters(reviewed_patches):
+            log("All reviewed patches on bug %s already have commit-queue+, ignoring." % bug_id)
+            return
+
+        latest_patch = reviewed_patches[-1]
+        attacher_email = latest_patch.attacher_email()
+        committer = committers.committer_by_email(attacher_email)
+        if not committer:
+            log("Attacher %s is not a committer.  Bug %s likely needs commit-queue+." % (attacher_email, bug_id))
+            return
+
+        reassign_message = u"Attachment %s was posted by a committer and has review+, assigning to %s for commit." % (latest_patch.id(), committer.full_name)
+        self._tool.bugs.reassign_bug(bug_id, committer.bugzilla_email(), reassign_message)
+
+    def execute(self, options, args, tool):
+        for bug_id in tool.bugs.queries.fetch_bug_ids_from_pending_commit_list():
+            self._assign_bug_to_last_patch_attacher(bug_id)
+
+
+class ObsoleteAttachments(AbstractSequencedCommand):
+    name = "obsolete-attachments"
+    help_text = "Mark all attachments on a bug as obsolete"
+    argument_names = "BUGID"
+    steps = [
+        steps.ObsoletePatches,
+    ]
+
+    def _prepare_state(self, options, args, tool):
+        return { "bug_id" : args[0] }
+
+
+class AttachToBug(AbstractSequencedCommand):
+    name = "attach-to-bug"
+    help_text = "Attach the the file to the bug"
+    argument_names = "BUGID FILEPATH"
+    steps = [
+        steps.AttachToBug,
+    ]
+
+    def _prepare_state(self, options, args, tool):
+        state = {}
+        state["bug_id"] = args[0]
+        state["filepath"] = args[1]
+        return state
+
+
+class AbstractPatchUploadingCommand(AbstractSequencedCommand):
+    def _bug_id(self, options, args, tool, state):
+        # Perfer a bug id passed as an argument over a bug url in the diff (i.e. ChangeLogs).
+        bug_id = args and args[0]
+        if not bug_id:
+            changed_files = self._tool.scm().changed_files(options.git_commit)
+            state["changed_files"] = changed_files
+            bug_id = tool.checkout().bug_id_for_this_commit(options.git_commit, changed_files)
+        return bug_id
+
+    def _prepare_state(self, options, args, tool):
+        state = {}
+        state["bug_id"] = self._bug_id(options, args, tool, state)
+        if not state["bug_id"]:
+            error("No bug id passed and no bug url found in ChangeLogs.")
+        return state
+
+
+class Post(AbstractPatchUploadingCommand):
+    name = "post"
+    help_text = "Attach the current working directory diff to a bug as a patch file"
+    argument_names = "[BUGID]"
+    steps = [
+        steps.ValidateChangeLogs,
+        steps.CheckStyle,
+        steps.ConfirmDiff,
+        steps.ObsoletePatches,
+        steps.SuggestReviewers,
+        steps.EnsureBugIsOpenAndAssigned,
+        steps.PostDiff,
+    ]
+
+
+class LandSafely(AbstractPatchUploadingCommand):
+    name = "land-safely"
+    help_text = "Land the current diff via the commit-queue"
+    argument_names = "[BUGID]"
+    long_help = """land-safely updates the ChangeLog with the reviewer listed
+    in bugs.webkit.org for BUGID (or the bug ID detected from the ChangeLog).
+    The command then uploads the current diff to the bug and marks it for
+    commit by the commit-queue."""
+    show_in_main_help = True
+    steps = [
+        steps.UpdateChangeLogsWithReviewer,
+        steps.ValidateChangeLogs,
+        steps.ObsoletePatches,
+        steps.EnsureBugIsOpenAndAssigned,
+        steps.PostDiffForCommit,
+    ]
+
+
+class Prepare(AbstractSequencedCommand):
+    name = "prepare"
+    help_text = "Creates a bug (or prompts for an existing bug) and prepares the ChangeLogs"
+    argument_names = "[BUGID]"
+    steps = [
+        steps.PromptForBugOrTitle,
+        steps.CreateBug,
+        steps.PrepareChangeLog,
+    ]
+
+    def _prepare_state(self, options, args, tool):
+        bug_id = args and args[0]
+        return { "bug_id" : bug_id }
+
+
+class Upload(AbstractPatchUploadingCommand):
+    name = "upload"
+    help_text = "Automates the process of uploading a patch for review"
+    argument_names = "[BUGID]"
+    show_in_main_help = True
+    steps = [
+        steps.ValidateChangeLogs,
+        steps.CheckStyle,
+        steps.PromptForBugOrTitle,
+        steps.CreateBug,
+        steps.PrepareChangeLog,
+        steps.EditChangeLog,
+        steps.ConfirmDiff,
+        steps.ObsoletePatches,
+        steps.SuggestReviewers,
+        steps.EnsureBugIsOpenAndAssigned,
+        steps.PostDiff,
+    ]
+    long_help = """upload uploads the current diff to bugs.webkit.org.
+    If no bug id is provided, upload will create a bug.
+    If the current diff does not have a ChangeLog, upload
+    will prepare a ChangeLog.  Once a patch is read, upload
+    will open the ChangeLogs for editing using the command in the
+    EDITOR environment variable and will display the diff using the
+    command in the PAGER environment variable."""
+
+    def _prepare_state(self, options, args, tool):
+        state = {}
+        state["bug_id"] = self._bug_id(options, args, tool, state)
+        return state
+
+
+class EditChangeLogs(AbstractSequencedCommand):
+    name = "edit-changelogs"
+    help_text = "Opens modified ChangeLogs in $EDITOR"
+    show_in_main_help = True
+    steps = [
+        steps.EditChangeLog,
+    ]
+
+
+class PostCommits(AbstractDeclarativeCommand):
+    name = "post-commits"
+    help_text = "Attach a range of local commits to bugs as patch files"
+    argument_names = "COMMITISH"
+
+    def __init__(self):
+        options = [
+            make_option("-b", "--bug-id", action="store", type="string", dest="bug_id", help="Specify bug id if no URL is provided in the commit log."),
+            make_option("--add-log-as-comment", action="store_true", dest="add_log_as_comment", default=False, help="Add commit log message as a comment when uploading the patch."),
+            make_option("-m", "--description", action="store", type="string", dest="description", help="Description string for the attachment (default: description from commit message)"),
+            steps.Options.obsolete_patches,
+            steps.Options.review,
+            steps.Options.request_commit,
+        ]
+        AbstractDeclarativeCommand.__init__(self, options=options, requires_local_commits=True)
+
+    def _comment_text_for_commit(self, options, commit_message, tool, commit_id):
+        comment_text = None
+        if (options.add_log_as_comment):
+            comment_text = commit_message.body(lstrip=True)
+            comment_text += "---\n"
+            comment_text += tool.scm().files_changed_summary_for_commit(commit_id)
+        return comment_text
+
+    def execute(self, options, args, tool):
+        commit_ids = tool.scm().commit_ids_from_commitish_arguments(args)
+        if len(commit_ids) > 10: # We could lower this limit, 10 is too many for one bug as-is.
+            error("webkit-patch does not support attaching %s at once.  Are you sure you passed the right commit range?" % (pluralize("patch", len(commit_ids))))
+
+        have_obsoleted_patches = set()
+        for commit_id in commit_ids:
+            commit_message = tool.scm().commit_message_for_local_commit(commit_id)
+
+            # Prefer --bug-id=, then a bug url in the commit message, then a bug url in the entire commit diff (i.e. ChangeLogs).
+            bug_id = options.bug_id or parse_bug_id_from_changelog(commit_message.message()) or parse_bug_id_from_changelog(tool.scm().create_patch(git_commit=commit_id))
+            if not bug_id:
+                log("Skipping %s: No bug id found in commit or specified with --bug-id." % commit_id)
+                continue
+
+            if options.obsolete_patches and bug_id not in have_obsoleted_patches:
+                state = { "bug_id": bug_id }
+                steps.ObsoletePatches(tool, options).run(state)
+                have_obsoleted_patches.add(bug_id)
+
+            diff = tool.scm().create_patch(git_commit=commit_id)
+            description = options.description or commit_message.description(lstrip=True, strip_url=True)
+            comment_text = self._comment_text_for_commit(options, commit_message, tool, commit_id)
+            tool.bugs.add_patch_to_bug(bug_id, diff, description, comment_text, mark_for_review=options.review, mark_for_commit_queue=options.request_commit)
+
+
+# FIXME: This command needs to be brought into the modern age with steps and CommitInfo.
+class MarkBugFixed(AbstractDeclarativeCommand):
+    name = "mark-bug-fixed"
+    help_text = "Mark the specified bug as fixed"
+    argument_names = "[SVN_REVISION]"
+    def __init__(self):
+        options = [
+            make_option("--bug-id", action="store", type="string", dest="bug_id", help="Specify bug id if no URL is provided in the commit log."),
+            make_option("--comment", action="store", type="string", dest="comment", help="Text to include in bug comment."),
+            make_option("--open", action="store_true", default=False, dest="open_bug", help="Open bug in default web browser (Mac only)."),
+            make_option("--update-only", action="store_true", default=False, dest="update_only", help="Add comment to the bug, but do not close it."),
+        ]
+        AbstractDeclarativeCommand.__init__(self, options=options)
+
+    # FIXME: We should be using checkout().changelog_entries_for_revision(...) instead here.
+    def _fetch_commit_log(self, tool, svn_revision):
+        if not svn_revision:
+            return tool.scm().last_svn_commit_log()
+        return tool.scm().svn_commit_log(svn_revision)
+
+    def _determine_bug_id_and_svn_revision(self, tool, bug_id, svn_revision):
+        commit_log = self._fetch_commit_log(tool, svn_revision)
+
+        if not bug_id:
+            bug_id = parse_bug_id_from_changelog(commit_log)
+
+        if not svn_revision:
+            match = re.search("^r(?P<svn_revision>\d+) \|", commit_log, re.MULTILINE)
+            if match:
+                svn_revision = match.group('svn_revision')
+
+        if not bug_id or not svn_revision:
+            not_found = []
+            if not bug_id:
+                not_found.append("bug id")
+            if not svn_revision:
+                not_found.append("svn revision")
+            error("Could not find %s on command-line or in %s."
+                  % (" or ".join(not_found), "r%s" % svn_revision if svn_revision else "last commit"))
+
+        return (bug_id, svn_revision)
+
+    def execute(self, options, args, tool):
+        bug_id = options.bug_id
+
+        svn_revision = args and args[0]
+        if svn_revision:
+            if re.match("^r[0-9]+$", svn_revision, re.IGNORECASE):
+                svn_revision = svn_revision[1:]
+            if not re.match("^[0-9]+$", svn_revision):
+                error("Invalid svn revision: '%s'" % svn_revision)
+
+        needs_prompt = False
+        if not bug_id or not svn_revision:
+            needs_prompt = True
+            (bug_id, svn_revision) = self._determine_bug_id_and_svn_revision(tool, bug_id, svn_revision)
+
+        log("Bug: <%s> %s" % (tool.bugs.bug_url_for_bug_id(bug_id), tool.bugs.fetch_bug_dictionary(bug_id)["title"]))
+        log("Revision: %s" % svn_revision)
+
+        if options.open_bug:
+            tool.user.open_url(tool.bugs.bug_url_for_bug_id(bug_id))
+
+        if needs_prompt:
+            if not tool.user.confirm("Is this correct?"):
+                self._exit(1)
+
+        bug_comment = bug_comment_from_svn_revision(svn_revision)
+        if options.comment:
+            bug_comment = "%s\n\n%s" % (options.comment, bug_comment)
+
+        if options.update_only:
+            log("Adding comment to Bug %s." % bug_id)
+            tool.bugs.post_comment_to_bug(bug_id, bug_comment)
+        else:
+            log("Adding comment to Bug %s and marking as Resolved/Fixed." % bug_id)
+            tool.bugs.close_bug_as_fixed(bug_id, bug_comment)
+
+
+# FIXME: Requires unit test.  Blocking issue: too complex for now.
+class CreateBug(AbstractDeclarativeCommand):
+    name = "create-bug"
+    help_text = "Create a bug from local changes or local commits"
+    argument_names = "[COMMITISH]"
+
+    def __init__(self):
+        options = [
+            steps.Options.cc,
+            steps.Options.component,
+            make_option("--no-prompt", action="store_false", dest="prompt", default=True, help="Do not prompt for bug title and comment; use commit log instead."),
+            make_option("--no-review", action="store_false", dest="review", default=True, help="Do not mark the patch for review."),
+            make_option("--request-commit", action="store_true", dest="request_commit", default=False, help="Mark the patch as needing auto-commit after review."),
+        ]
+        AbstractDeclarativeCommand.__init__(self, options=options)
+
+    def create_bug_from_commit(self, options, args, tool):
+        commit_ids = tool.scm().commit_ids_from_commitish_arguments(args)
+        if len(commit_ids) > 3:
+            error("Are you sure you want to create one bug with %s patches?" % len(commit_ids))
+
+        commit_id = commit_ids[0]
+
+        bug_title = ""
+        comment_text = ""
+        if options.prompt:
+            (bug_title, comment_text) = self.prompt_for_bug_title_and_comment()
+        else:
+            commit_message = tool.scm().commit_message_for_local_commit(commit_id)
+            bug_title = commit_message.description(lstrip=True, strip_url=True)
+            comment_text = commit_message.body(lstrip=True)
+            comment_text += "---\n"
+            comment_text += tool.scm().files_changed_summary_for_commit(commit_id)
+
+        diff = tool.scm().create_patch(git_commit=commit_id)
+        bug_id = tool.bugs.create_bug(bug_title, comment_text, options.component, diff, "Patch", cc=options.cc, mark_for_review=options.review, mark_for_commit_queue=options.request_commit)
+
+        if bug_id and len(commit_ids) > 1:
+            options.bug_id = bug_id
+            options.obsolete_patches = False
+            # FIXME: We should pass through --no-comment switch as well.
+            PostCommits.execute(self, options, commit_ids[1:], tool)
+
+    def create_bug_from_patch(self, options, args, tool):
+        bug_title = ""
+        comment_text = ""
+        if options.prompt:
+            (bug_title, comment_text) = self.prompt_for_bug_title_and_comment()
+        else:
+            commit_message = tool.checkout().commit_message_for_this_commit(options.git_commit)
+            bug_title = commit_message.description(lstrip=True, strip_url=True)
+            comment_text = commit_message.body(lstrip=True)
+
+        diff = tool.scm().create_patch(options.git_commit)
+        bug_id = tool.bugs.create_bug(bug_title, comment_text, options.component, diff, "Patch", cc=options.cc, mark_for_review=options.review, mark_for_commit_queue=options.request_commit)
+
+    def prompt_for_bug_title_and_comment(self):
+        bug_title = User.prompt("Bug title: ")
+        # FIXME: User should provide a function for doing this multi-line prompt.
+        print "Bug comment (hit ^D on blank line to end):"
+        lines = sys.stdin.readlines()
+        try:
+            sys.stdin.seek(0, os.SEEK_END)
+        except IOError:
+            # Cygwin raises an Illegal Seek (errno 29) exception when the above
+            # seek() call is made. Ignoring it seems to cause no harm.
+            # FIXME: Figure out a way to get avoid the exception in the first
+            # place.
+            pass
+        comment_text = "".join(lines)
+        return (bug_title, comment_text)
+
+    def execute(self, options, args, tool):
+        if len(args):
+            if (not tool.scm().supports_local_commits()):
+                error("Extra arguments not supported; patch is taken from working directory.")
+            self.create_bug_from_commit(options, args, tool)
+        else:
+            self.create_bug_from_patch(options, args, tool)
diff --git a/Tools/Scripts/webkitpy/tool/commands/upload_unittest.py b/Tools/Scripts/webkitpy/tool/commands/upload_unittest.py
new file mode 100644
index 0000000..185bb97
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/commands/upload_unittest.py
@@ -0,0 +1,149 @@
+# Copyright (C) 2009 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#    * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from webkitpy.thirdparty.mock import Mock
+from webkitpy.tool.commands.commandtest import CommandsTest
+from webkitpy.tool.commands.upload import *
+from webkitpy.tool.mocktool import MockOptions, MockTool
+
+class UploadCommandsTest(CommandsTest):
+    def test_commit_message_for_current_diff(self):
+        tool = MockTool()
+        expected_stdout = "This is a fake commit message that is at least 50 characters.\n"
+        self.assert_execute_outputs(CommitMessageForCurrentDiff(), [], expected_stdout=expected_stdout, tool=tool)
+
+    def test_clean_pending_commit(self):
+        self.assert_execute_outputs(CleanPendingCommit(), [])
+
+    def test_assign_to_committer(self):
+        tool = MockTool()
+        expected_stderr = """Warning, attachment 10001 on bug 50000 has invalid committer (non-committer@example.com)
+MOCK reassign_bug: bug_id=50000, assignee=eric@webkit.org
+-- Begin comment --
+Attachment 10001 was posted by a committer and has review+, assigning to Eric Seidel for commit.
+-- End comment --
+Bug 50003 is already assigned to foo@foo.com (None).
+Bug 50002 has no non-obsolete patches, ignoring.
+"""
+        self.assert_execute_outputs(AssignToCommitter(), [], expected_stderr=expected_stderr, tool=tool)
+
+    def test_obsolete_attachments(self):
+        expected_stderr = "Obsoleting 2 old patches on bug 50000\n"
+        self.assert_execute_outputs(ObsoleteAttachments(), [50000], expected_stderr=expected_stderr)
+
+    def test_post(self):
+        options = MockOptions()
+        options.cc = None
+        options.check_style = True
+        options.check_style_filter = None
+        options.comment = None
+        options.description = "MOCK description"
+        options.request_commit = False
+        options.review = True
+        options.suggest_reviewers = False
+        expected_stderr = """MOCK: user.open_url: file://...
+Was that diff correct?
+Obsoleting 2 old patches on bug 50000
+MOCK reassign_bug: bug_id=50000, assignee=None
+MOCK add_patch_to_bug: bug_id=50000, description=MOCK description, mark_for_review=True, mark_for_commit_queue=False, mark_for_landing=False
+MOCK: user.open_url: http://example.com/50000
+"""
+        self.assert_execute_outputs(Post(), [50000], options=options, expected_stderr=expected_stderr)
+
+    def test_attach_to_bug(self):
+        options = MockOptions()
+        options.comment = "extra comment"
+        options.description = "file description"
+        expected_stderr = """MOCK add_attachment_to_bug: bug_id=50000, description=file description filename=None mimetype=None
+-- Begin comment --
+extra comment
+-- End comment --
+"""
+        self.assert_execute_outputs(AttachToBug(), [50000, "path/to/file.txt", "file description"], options=options, expected_stderr=expected_stderr)
+
+    def test_attach_to_bug_no_description_or_comment(self):
+        options = MockOptions()
+        options.comment = None
+        options.description = None
+        expected_stderr = """MOCK add_attachment_to_bug: bug_id=50000, description=file.txt filename=None mimetype=None
+"""
+        self.assert_execute_outputs(AttachToBug(), [50000, "path/to/file.txt"], options=options, expected_stderr=expected_stderr)
+
+    def test_land_safely(self):
+        expected_stderr = "Obsoleting 2 old patches on bug 50000\nMOCK reassign_bug: bug_id=50000, assignee=None\nMOCK add_patch_to_bug: bug_id=50000, description=Patch for landing, mark_for_review=False, mark_for_commit_queue=False, mark_for_landing=True\n"
+        self.assert_execute_outputs(LandSafely(), [50000], expected_stderr=expected_stderr)
+
+    def test_prepare_diff_with_arg(self):
+        self.assert_execute_outputs(Prepare(), [50000])
+
+    def test_prepare(self):
+        expected_stderr = "MOCK create_bug\nbug_title: Mock user response\nbug_description: Mock user response\ncomponent: MOCK component\ncc: MOCK cc\n"
+        self.assert_execute_outputs(Prepare(), [], expected_stderr=expected_stderr)
+
+    def test_upload(self):
+        options = MockOptions()
+        options.cc = None
+        options.check_style = True
+        options.check_style_filter = None
+        options.comment = None
+        options.description = "MOCK description"
+        options.request_commit = False
+        options.review = True
+        options.suggest_reviewers = False
+        expected_stderr = """MOCK: user.open_url: file://...
+Was that diff correct?
+Obsoleting 2 old patches on bug 50000
+MOCK reassign_bug: bug_id=50000, assignee=None
+MOCK add_patch_to_bug: bug_id=50000, description=MOCK description, mark_for_review=True, mark_for_commit_queue=False, mark_for_landing=False
+MOCK: user.open_url: http://example.com/50000
+"""
+        self.assert_execute_outputs(Upload(), [50000], options=options, expected_stderr=expected_stderr)
+
+    def test_mark_bug_fixed(self):
+        tool = MockTool()
+        tool._scm.last_svn_commit_log = lambda: "r9876 |"
+        options = Mock()
+        options.bug_id = 50000
+        options.comment = "MOCK comment"
+        expected_stderr = """Bug: <http://example.com/50000> Bug with two r+'d and cq+'d patches, one of which has an invalid commit-queue setter.
+Revision: 9876
+MOCK: user.open_url: http://example.com/50000
+Is this correct?
+Adding comment to Bug 50000.
+MOCK bug comment: bug_id=50000, cc=None
+--- Begin comment ---
+MOCK comment
+
+Committed r9876: <http://trac.webkit.org/changeset/9876>
+--- End comment ---
+
+"""
+        self.assert_execute_outputs(MarkBugFixed(), [], expected_stderr=expected_stderr, tool=tool, options=options)
+
+    def test_edit_changelog(self):
+        self.assert_execute_outputs(EditChangeLogs(), [])
diff --git a/Tools/Scripts/webkitpy/tool/comments.py b/Tools/Scripts/webkitpy/tool/comments.py
new file mode 100755
index 0000000..771953e
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/comments.py
@@ -0,0 +1,42 @@
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Copyright (c) 2009 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# A tool for automating dealing with bugzilla, posting patches, committing
+# patches, etc.
+
+from webkitpy.common.config import urls
+
+
+def bug_comment_from_svn_revision(svn_revision):
+    return "Committed r%s: <%s>" % (svn_revision, urls.view_revision_url(svn_revision))
+
+
+def bug_comment_from_commit_text(scm, commit_text):
+    svn_revision = scm.svn_revision_from_commit_text(commit_text)
+    return bug_comment_from_svn_revision(svn_revision)
diff --git a/Tools/Scripts/webkitpy/tool/grammar.py b/Tools/Scripts/webkitpy/tool/grammar.py
new file mode 100644
index 0000000..8db9826
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/grammar.py
@@ -0,0 +1,54 @@
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Copyright (c) 2009 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import re
+
+
+def plural(noun):
+    # This is a dumb plural() implementation that is just enough for our uses.
+    if re.search("h$", noun):
+        return noun + "es"
+    else:
+        return noun + "s"
+
+
+def pluralize(noun, count):
+    if count != 1:
+        noun = plural(noun)
+    return "%d %s" % (count, noun)
+
+
+def join_with_separators(list_of_strings, separator=', ', only_two_separator=" and ", last_separator=', and '):
+    if not list_of_strings:
+        return ""
+    if len(list_of_strings) == 1:
+        return list_of_strings[0]
+    if len(list_of_strings) == 2:
+        return only_two_separator.join(list_of_strings)
+    return "%s%s%s" % (separator.join(list_of_strings[:-1]), last_separator, list_of_strings[-1])
diff --git a/Tools/Scripts/webkitpy/tool/grammar_unittest.py b/Tools/Scripts/webkitpy/tool/grammar_unittest.py
new file mode 100644
index 0000000..cab71db
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/grammar_unittest.py
@@ -0,0 +1,41 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#    * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from webkitpy.tool.grammar import join_with_separators
+
+class GrammarTest(unittest.TestCase):
+
+    def test_join_with_separators(self):
+        self.assertEqual(join_with_separators(["one"]), "one")
+        self.assertEqual(join_with_separators(["one", "two"]), "one and two")
+        self.assertEqual(join_with_separators(["one", "two", "three"]), "one, two, and three")
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/Tools/Scripts/webkitpy/tool/main.py b/Tools/Scripts/webkitpy/tool/main.py
new file mode 100755
index 0000000..68348a0
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/main.py
@@ -0,0 +1,108 @@
+# Copyright (c) 2010 Google Inc. All rights reserved.
+# Copyright (c) 2009 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# A tool for automating dealing with bugzilla, posting patches, committing patches, etc.
+
+from optparse import make_option
+import os
+import threading
+
+from webkitpy.common.config.ports import DeprecatedPort
+from webkitpy.common.host import Host
+from webkitpy.common.net.irc import ircproxy
+from webkitpy.common.net.statusserver import StatusServer
+from webkitpy.tool.multicommandtool import MultiCommandTool
+from webkitpy.tool import commands
+
+
+class WebKitPatch(MultiCommandTool, Host):
+    global_options = [
+        make_option("-v", "--verbose", action="store_true", dest="verbose", default=False, help="enable all logging"),
+        make_option("-d", "--directory", action="append", dest="patch_directories", default=[], help="Directory to look at for changed files"),
+        make_option("--status-host", action="store", dest="status_host", type="string", help="Hostname (e.g. localhost or commit.webkit.org) where status updates should be posted."),
+        make_option("--bot-id", action="store", dest="bot_id", type="string", help="Identifier for this bot (if multiple bots are running for a queue)"),
+        make_option("--irc-password", action="store", dest="irc_password", type="string", help="Password to use when communicating via IRC."),
+        make_option("--port", action="store", dest="port", default=None, help="Specify a port (e.g., mac, qt, gtk, ...)."),
+    ]
+
+    def __init__(self, path):
+        MultiCommandTool.__init__(self)
+        Host.__init__(self)
+        self._path = path
+        self.status_server = StatusServer()
+
+        self.wakeup_event = threading.Event()
+        self._irc = None
+        self._deprecated_port = None
+
+    # FIXME: Rename this deprecated_port()
+    def port(self):
+        return self._deprecated_port
+
+    def path(self):
+        return self._path
+
+    def ensure_irc_connected(self, irc_delegate):
+        if not self._irc:
+            self._irc = ircproxy.IRCProxy(irc_delegate)
+
+    def irc(self):
+        # We don't automatically construct IRCProxy here because constructing
+        # IRCProxy actually connects to IRC.  We want clients to explicitly
+        # connect to IRC.
+        return self._irc
+
+    def command_completed(self):
+        if self._irc:
+            self._irc.disconnect()
+
+    def should_show_in_main_help(self, command):
+        if not command.show_in_main_help:
+            return False
+        if command.requires_local_commits:
+            return self.scm().supports_local_commits()
+        return True
+
+    # FIXME: This may be unnecessary since we pass global options to all commands during execute() as well.
+    def handle_global_options(self, options):
+        self.initialize_scm(options.patch_directories)
+        if options.status_host:
+            self.status_server.set_host(options.status_host)
+        if options.bot_id:
+            self.status_server.set_bot_id(options.bot_id)
+        if options.irc_password:
+            self.irc_password = options.irc_password
+        # If options.port is None, we'll get the default port for this platform.
+        self._deprecated_port = DeprecatedPort.port(options.port)
+
+    def should_execute_command(self, command):
+        if command.requires_local_commits and not self.scm().supports_local_commits():
+            failure_reason = "%s requires local commits using %s in %s." % (command.name, self.scm().display_name(), self.scm().checkout_root)
+            return (False, failure_reason)
+        return (True, None)
diff --git a/Tools/Scripts/webkitpy/tool/mocktool.py b/Tools/Scripts/webkitpy/tool/mocktool.py
new file mode 100644
index 0000000..b8f0976
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/mocktool.py
@@ -0,0 +1,88 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#    * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import threading
+
+from webkitpy.common.host_mock import MockHost
+from webkitpy.common.net.buildbot.buildbot_mock import MockBuildBot
+from webkitpy.common.net.statusserver_mock import MockStatusServer
+from webkitpy.common.net.irc.irc_mock import MockIRC
+
+# FIXME: Old-style "Ports" need to die and be replaced by modern layout_tests.port which needs to move to common.
+from webkitpy.common.config.ports_mock import MockPort
+
+
+# FIXME: We should just replace this with optparse.Values(default=kwargs)
+class MockOptions(object):
+    """Mock implementation of optparse.Values."""
+
+    def __init__(self, **kwargs):
+        # The caller can set option values using keyword arguments. We don't
+        # set any values by default because we don't know how this
+        # object will be used. Generally speaking unit tests should
+        # subclass this or provider wrapper functions that set a common
+        # set of options.
+        self.update(**kwargs)
+
+    def update(self, **kwargs):
+        self.__dict__.update(**kwargs)
+        return self
+
+    def ensure_value(self, key, value):
+        if getattr(self, key, None) == None:
+            self.__dict__[key] = value
+        return self.__dict__[key]
+
+
+# FIXME: This should be renamed MockWebKitPatch.
+class MockTool(MockHost):
+    def __init__(self, *args, **kwargs):
+        MockHost.__init__(self, *args, **kwargs)
+
+        self._deprecated_port = MockPort()
+        self.status_server = MockStatusServer()
+
+        self._irc = None
+        self.irc_password = "MOCK irc password"
+        self.wakeup_event = threading.Event()
+
+    def port(self):
+        return self._deprecated_port
+
+    def path(self):
+        return "echo"
+
+    def ensure_irc_connected(self, delegate):
+        if not self._irc:
+            self._irc = MockIRC()
+
+    def irc(self):
+        return self._irc
+
+    def buildbot_for_builder_name(self, name):
+        return MockBuildBot()
diff --git a/Tools/Scripts/webkitpy/tool/mocktool_unittest.py b/Tools/Scripts/webkitpy/tool/mocktool_unittest.py
new file mode 100644
index 0000000..cceaa2e
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/mocktool_unittest.py
@@ -0,0 +1,59 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#    * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from mocktool import MockOptions
+
+
+class MockOptionsTest(unittest.TestCase):
+    # MockOptions() should implement the same semantics that
+    # optparse.Values does.
+
+    def test_get__set(self):
+        # Test that we can still set options after we construct the
+        # object.
+        options = MockOptions()
+        options.foo = 'bar'
+        self.assertEqual(options.foo, 'bar')
+
+    def test_get__unset(self):
+        # Test that unset options raise an exception (regular Mock
+        # objects return an object and hence are different from
+        # optparse.Values()).
+        options = MockOptions()
+        self.assertRaises(AttributeError, lambda: options.foo)
+
+    def test_kwarg__set(self):
+        # Test that keyword arguments work in the constructor.
+        options = MockOptions(foo='bar')
+        self.assertEqual(options.foo, 'bar')
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/Tools/Scripts/webkitpy/tool/multicommandtool.py b/Tools/Scripts/webkitpy/tool/multicommandtool.py
new file mode 100644
index 0000000..38c410c
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/multicommandtool.py
@@ -0,0 +1,317 @@
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Copyright (c) 2009 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+# 
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+# 
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# MultiCommandTool provides a framework for writing svn-like/git-like tools
+# which are called with the following format:
+# tool-name [global options] command-name [command options]
+
+import sys
+
+from optparse import OptionParser, IndentedHelpFormatter, SUPPRESS_USAGE, make_option
+
+from webkitpy.tool.grammar import pluralize
+from webkitpy.common.system.deprecated_logging import log
+
+
+class TryAgain(Exception):
+    pass
+
+
+class Command(object):
+    name = None
+    show_in_main_help = False
+    def __init__(self, help_text, argument_names=None, options=None, long_help=None, requires_local_commits=False):
+        self.help_text = help_text
+        self.long_help = long_help
+        self.argument_names = argument_names
+        self.required_arguments = self._parse_required_arguments(argument_names)
+        self.options = options
+        self.requires_local_commits = requires_local_commits
+        self._tool = None
+        # option_parser can be overriden by the tool using set_option_parser
+        # This default parser will be used for standalone_help printing.
+        self.option_parser = HelpPrintingOptionParser(usage=SUPPRESS_USAGE, add_help_option=False, option_list=self.options)
+
+    def _exit(self, code):
+        sys.exit(code)
+
+    # This design is slightly awkward, but we need the
+    # the tool to be able to create and modify the option_parser
+    # before it knows what Command to run.
+    def set_option_parser(self, option_parser):
+        self.option_parser = option_parser
+        self._add_options_to_parser()
+
+    def _add_options_to_parser(self):
+        options = self.options or []
+        for option in options:
+            self.option_parser.add_option(option)
+
+    # The tool calls bind_to_tool on each Command after adding it to its list.
+    def bind_to_tool(self, tool):
+        # Command instances can only be bound to one tool at a time.
+        if self._tool and tool != self._tool:
+            raise Exception("Command already bound to tool!")
+        self._tool = tool
+
+    @staticmethod
+    def _parse_required_arguments(argument_names):
+        required_args = []
+        if not argument_names:
+            return required_args
+        split_args = argument_names.split(" ")
+        for argument in split_args:
+            if argument[0] == '[':
+                # For now our parser is rather dumb.  Do some minimal validation that
+                # we haven't confused it.
+                if argument[-1] != ']':
+                    raise Exception("Failure to parse argument string %s.  Argument %s is missing ending ]" % (argument_names, argument))
+            else:
+                required_args.append(argument)
+        return required_args
+
+    def name_with_arguments(self):
+        usage_string = self.name
+        if self.options:
+            usage_string += " [options]"
+        if self.argument_names:
+            usage_string += " " + self.argument_names
+        return usage_string
+
+    def parse_args(self, args):
+        return self.option_parser.parse_args(args)
+
+    def check_arguments_and_execute(self, options, args, tool=None):
+        if len(args) < len(self.required_arguments):
+            log("%s required, %s provided.  Provided: %s  Required: %s\nSee '%s help %s' for usage." % (
+                pluralize("argument", len(self.required_arguments)),
+                pluralize("argument", len(args)),
+                "'%s'" % " ".join(args),
+                " ".join(self.required_arguments),
+                tool.name(),
+                self.name))
+            return 1
+        return self.execute(options, args, tool) or 0
+
+    def standalone_help(self):
+        help_text = self.name_with_arguments().ljust(len(self.name_with_arguments()) + 3) + self.help_text + "\n\n"
+        if self.long_help:
+            help_text += "%s\n\n" % self.long_help
+        help_text += self.option_parser.format_option_help(IndentedHelpFormatter())
+        return help_text
+
+    def execute(self, options, args, tool):
+        raise NotImplementedError, "subclasses must implement"
+
+    # main() exists so that Commands can be turned into stand-alone scripts.
+    # Other parts of the code will likely require modification to work stand-alone.
+    def main(self, args=sys.argv):
+        (options, args) = self.parse_args(args)
+        # Some commands might require a dummy tool
+        return self.check_arguments_and_execute(options, args)
+
+
+# FIXME: This should just be rolled into Command.  help_text and argument_names do not need to be instance variables.
+class AbstractDeclarativeCommand(Command):
+    help_text = None
+    argument_names = None
+    long_help = None
+    def __init__(self, options=None, **kwargs):
+        Command.__init__(self, self.help_text, self.argument_names, options=options, long_help=self.long_help, **kwargs)
+
+
+class HelpPrintingOptionParser(OptionParser):
+    def __init__(self, epilog_method=None, *args, **kwargs):
+        self.epilog_method = epilog_method
+        OptionParser.__init__(self, *args, **kwargs)
+
+    def error(self, msg):
+        self.print_usage(sys.stderr)
+        error_message = "%s: error: %s\n" % (self.get_prog_name(), msg)
+        # This method is overriden to add this one line to the output:
+        error_message += "\nType \"%s --help\" to see usage.\n" % self.get_prog_name()
+        self.exit(1, error_message)
+
+    # We override format_epilog to avoid the default formatting which would paragraph-wrap the epilog
+    # and also to allow us to compute the epilog lazily instead of in the constructor (allowing it to be context sensitive).
+    def format_epilog(self, epilog):
+        if self.epilog_method:
+            return "\n%s\n" % self.epilog_method()
+        return ""
+
+
+class HelpCommand(AbstractDeclarativeCommand):
+    name = "help"
+    help_text = "Display information about this program or its subcommands"
+    argument_names = "[COMMAND]"
+
+    def __init__(self):
+        options = [
+            make_option("-a", "--all-commands", action="store_true", dest="show_all_commands", help="Print all available commands"),
+        ]
+        AbstractDeclarativeCommand.__init__(self, options)
+        self.show_all_commands = False # A hack used to pass --all-commands to _help_epilog even though it's called by the OptionParser.
+
+    def _help_epilog(self):
+        # Only show commands which are relevant to this checkout's SCM system.  Might this be confusing to some users?
+        if self.show_all_commands:
+            epilog = "All %prog commands:\n"
+            relevant_commands = self._tool.commands[:]
+        else:
+            epilog = "Common %prog commands:\n"
+            relevant_commands = filter(self._tool.should_show_in_main_help, self._tool.commands)
+        longest_name_length = max(map(lambda command: len(command.name), relevant_commands))
+        relevant_commands.sort(lambda a, b: cmp(a.name, b.name))
+        command_help_texts = map(lambda command: "   %s   %s\n" % (command.name.ljust(longest_name_length), command.help_text), relevant_commands)
+        epilog += "%s\n" % "".join(command_help_texts)
+        epilog += "See '%prog help --all-commands' to list all commands.\n"
+        epilog += "See '%prog help COMMAND' for more information on a specific command.\n"
+        return epilog.replace("%prog", self._tool.name()) # Use of %prog here mimics OptionParser.expand_prog_name().
+
+    # FIXME: This is a hack so that we don't show --all-commands as a global option:
+    def _remove_help_options(self):
+        for option in self.options:
+            self.option_parser.remove_option(option.get_opt_string())
+
+    def execute(self, options, args, tool):
+        if args:
+            command = self._tool.command_by_name(args[0])
+            if command:
+                print command.standalone_help()
+                return 0
+
+        self.show_all_commands = options.show_all_commands
+        self._remove_help_options()
+        self.option_parser.print_help()
+        return 0
+
+
+class MultiCommandTool(object):
+    global_options = None
+
+    def __init__(self, name=None, commands=None):
+        self._name = name or OptionParser(prog=name).get_prog_name() # OptionParser has nice logic for fetching the name.
+        # Allow the unit tests to disable command auto-discovery.
+        self.commands = commands or [cls() for cls in self._find_all_commands() if cls.name]
+        self.help_command = self.command_by_name(HelpCommand.name)
+        # Require a help command, even if the manual test list doesn't include one.
+        if not self.help_command:
+            self.help_command = HelpCommand()
+            self.commands.append(self.help_command)
+        for command in self.commands:
+            command.bind_to_tool(self)
+
+    @classmethod
+    def _add_all_subclasses(cls, class_to_crawl, seen_classes):
+        for subclass in class_to_crawl.__subclasses__():
+            if subclass not in seen_classes:
+                seen_classes.add(subclass)
+                cls._add_all_subclasses(subclass, seen_classes)
+
+    @classmethod
+    def _find_all_commands(cls):
+        commands = set()
+        cls._add_all_subclasses(Command, commands)
+        return sorted(commands)
+
+    def name(self):
+        return self._name
+
+    def _create_option_parser(self):
+        usage = "Usage: %prog [options] COMMAND [ARGS]"
+        return HelpPrintingOptionParser(epilog_method=self.help_command._help_epilog, prog=self.name(), usage=usage)
+
+    @staticmethod
+    def _split_command_name_from_args(args):
+        # Assume the first argument which doesn't start with "-" is the command name.
+        command_index = 0
+        for arg in args:
+            if arg[0] != "-":
+                break
+            command_index += 1
+        else:
+            return (None, args[:])
+
+        command = args[command_index]
+        return (command, args[:command_index] + args[command_index + 1:])
+
+    def command_by_name(self, command_name):
+        for command in self.commands:
+            if command_name == command.name:
+                return command
+        return None
+
+    def path(self):
+        raise NotImplementedError, "subclasses must implement"
+
+    def command_completed(self):
+        pass
+
+    def should_show_in_main_help(self, command):
+        return command.show_in_main_help
+
+    def should_execute_command(self, command):
+        return True
+
+    def _add_global_options(self, option_parser):
+        global_options = self.global_options or []
+        for option in global_options:
+            option_parser.add_option(option)
+
+    def handle_global_options(self, options):
+        pass
+
+    def main(self, argv=sys.argv):
+        (command_name, args) = self._split_command_name_from_args(argv[1:])
+
+        option_parser = self._create_option_parser()
+        self._add_global_options(option_parser)
+
+        command = self.command_by_name(command_name) or self.help_command
+        if not command:
+            option_parser.error("%s is not a recognized command" % command_name)
+
+        command.set_option_parser(option_parser)
+        (options, args) = command.parse_args(args)
+        self.handle_global_options(options)
+
+        (should_execute, failure_reason) = self.should_execute_command(command)
+        if not should_execute:
+            log(failure_reason)
+            return 0 # FIXME: Should this really be 0?
+
+        while True:
+            try:
+                result = command.check_arguments_and_execute(options, args, self)
+                break
+            except TryAgain, e:
+                pass
+
+        self.command_completed()
+        return result
diff --git a/Tools/Scripts/webkitpy/tool/multicommandtool_unittest.py b/Tools/Scripts/webkitpy/tool/multicommandtool_unittest.py
new file mode 100644
index 0000000..c19095c
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/multicommandtool_unittest.py
@@ -0,0 +1,177 @@
+# Copyright (c) 2009 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+# 
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+# 
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import sys
+import unittest
+
+from optparse import make_option
+
+from webkitpy.common.system.outputcapture import OutputCapture
+from webkitpy.tool.multicommandtool import MultiCommandTool, Command, TryAgain
+
+
+class TrivialCommand(Command):
+    name = "trivial"
+    show_in_main_help = True
+    def __init__(self, **kwargs):
+        Command.__init__(self, "help text", **kwargs)
+
+    def execute(self, options, args, tool):
+        pass
+
+
+class UncommonCommand(TrivialCommand):
+    name = "uncommon"
+    show_in_main_help = False
+
+
+class LikesToRetry(Command):
+    name = "likes-to-retry"
+    show_in_main_help = True
+
+    def __init__(self, **kwargs):
+        Command.__init__(self, "help text", **kwargs)
+        self.execute_count = 0
+
+    def execute(self, options, args, tool):
+        self.execute_count += 1
+        if self.execute_count < 2:
+            raise TryAgain()
+
+
+class CommandTest(unittest.TestCase):
+    def test_name_with_arguments(self):
+        command_with_args = TrivialCommand(argument_names="ARG1 ARG2")
+        self.assertEqual(command_with_args.name_with_arguments(), "trivial ARG1 ARG2")
+
+        command_with_args = TrivialCommand(options=[make_option("--my_option")])
+        self.assertEqual(command_with_args.name_with_arguments(), "trivial [options]")
+
+    def test_parse_required_arguments(self):
+        self.assertEqual(Command._parse_required_arguments("ARG1 ARG2"), ["ARG1", "ARG2"])
+        self.assertEqual(Command._parse_required_arguments("[ARG1] [ARG2]"), [])
+        self.assertEqual(Command._parse_required_arguments("[ARG1] ARG2"), ["ARG2"])
+        # Note: We might make our arg parsing smarter in the future and allow this type of arguments string.
+        self.assertRaises(Exception, Command._parse_required_arguments, "[ARG1 ARG2]")
+
+    def test_required_arguments(self):
+        two_required_arguments = TrivialCommand(argument_names="ARG1 ARG2 [ARG3]")
+        expected_missing_args_error = "2 arguments required, 1 argument provided.  Provided: 'foo'  Required: ARG1 ARG2\nSee 'trivial-tool help trivial' for usage.\n"
+        exit_code = OutputCapture().assert_outputs(self, two_required_arguments.check_arguments_and_execute, [None, ["foo"], TrivialTool()], expected_stderr=expected_missing_args_error)
+        self.assertEqual(exit_code, 1)
+
+
+class TrivialTool(MultiCommandTool):
+    def __init__(self, commands=None):
+        MultiCommandTool.__init__(self, name="trivial-tool", commands=commands)
+
+    def path(self):
+        return __file__
+
+    def should_execute_command(self, command):
+        return (True, None)
+
+
+class MultiCommandToolTest(unittest.TestCase):
+    def _assert_split(self, args, expected_split):
+        self.assertEqual(MultiCommandTool._split_command_name_from_args(args), expected_split)
+
+    def test_split_args(self):
+        # MultiCommandToolTest._split_command_name_from_args returns: (command, args)
+        full_args = ["--global-option", "command", "--option", "arg"]
+        full_args_expected = ("command", ["--global-option", "--option", "arg"])
+        self._assert_split(full_args, full_args_expected)
+
+        full_args = []
+        full_args_expected = (None, [])
+        self._assert_split(full_args, full_args_expected)
+
+        full_args = ["command", "arg"]
+        full_args_expected = ("command", ["arg"])
+        self._assert_split(full_args, full_args_expected)
+
+    def test_command_by_name(self):
+        # This also tests Command auto-discovery.
+        tool = TrivialTool()
+        self.assertEqual(tool.command_by_name("trivial").name, "trivial")
+        self.assertEqual(tool.command_by_name("bar"), None)
+
+    def _assert_tool_main_outputs(self, tool, main_args, expected_stdout, expected_stderr = "", expected_exit_code=0):
+        exit_code = OutputCapture().assert_outputs(self, tool.main, [main_args], expected_stdout=expected_stdout, expected_stderr=expected_stderr)
+        self.assertEqual(exit_code, expected_exit_code)
+
+    def test_retry(self):
+        likes_to_retry = LikesToRetry()
+        tool = TrivialTool(commands=[likes_to_retry])
+        tool.main(["tool", "likes-to-retry"])
+        self.assertEqual(likes_to_retry.execute_count, 2)
+
+    def test_global_help(self):
+        tool = TrivialTool(commands=[TrivialCommand(), UncommonCommand()])
+        expected_common_commands_help = """Usage: trivial-tool [options] COMMAND [ARGS]
+
+Options:
+  -h, --help  show this help message and exit
+
+Common trivial-tool commands:
+   trivial   help text
+
+See 'trivial-tool help --all-commands' to list all commands.
+See 'trivial-tool help COMMAND' for more information on a specific command.
+
+"""
+        self._assert_tool_main_outputs(tool, ["tool"], expected_common_commands_help)
+        self._assert_tool_main_outputs(tool, ["tool", "help"], expected_common_commands_help)
+        expected_all_commands_help = """Usage: trivial-tool [options] COMMAND [ARGS]
+
+Options:
+  -h, --help  show this help message and exit
+
+All trivial-tool commands:
+   help       Display information about this program or its subcommands
+   trivial    help text
+   uncommon   help text
+
+See 'trivial-tool help --all-commands' to list all commands.
+See 'trivial-tool help COMMAND' for more information on a specific command.
+
+"""
+        self._assert_tool_main_outputs(tool, ["tool", "help", "--all-commands"], expected_all_commands_help)
+        # Test that arguments can be passed before commands as well
+        self._assert_tool_main_outputs(tool, ["tool", "--all-commands", "help"], expected_all_commands_help)
+
+
+    def test_command_help(self):
+        command_with_options = TrivialCommand(options=[make_option("--my_option")], long_help="LONG HELP")
+        tool = TrivialTool(commands=[command_with_options])
+        expected_subcommand_help = "trivial [options]   help text\n\nLONG HELP\n\nOptions:\n  --my_option=MY_OPTION\n\n"
+        self._assert_tool_main_outputs(tool, ["tool", "help", "trivial"], expected_subcommand_help)
+
+
+if __name__ == "__main__":
+    unittest.main()
diff --git a/Tools/Scripts/webkitpy/tool/servers/__init__.py b/Tools/Scripts/webkitpy/tool/servers/__init__.py
new file mode 100644
index 0000000..ef65bee
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/servers/__init__.py
@@ -0,0 +1 @@
+# Required for Python to search this directory for module files
diff --git a/Tools/Scripts/webkitpy/tool/servers/data/rebaselineserver/index.html b/Tools/Scripts/webkitpy/tool/servers/data/rebaselineserver/index.html
new file mode 100644
index 0000000..f40a34d
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/servers/data/rebaselineserver/index.html
@@ -0,0 +1,182 @@
+<!DOCTYPE html>
+<!--
+  Copyright (c) 2010 Google Inc. All rights reserved.
+
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions are
+  met:
+
+     * Redistributions of source code must retain the above copyright
+  notice, this list of conditions and the following disclaimer.
+     * Redistributions in binary form must reproduce the above
+  copyright notice, this list of conditions and the following disclaimer
+  in the documentation and/or other materials provided with the
+  distribution.
+     * Neither the name of Google Inc. nor the names of its
+  contributors may be used to endorse or promote products derived from
+  this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+-->
+<html>
+<head>
+  <title>Layout Test Rebaseline Server</title>
+  <link rel="stylesheet" href="/main.css" type="text/css">
+  <script src="/util.js"></script>
+  <script src="/loupe.js"></script>
+  <script src="/main.js"></script>
+  <script src="/queue.js"></script>
+</head>
+<body class="loading">
+
+<pre id="log" style="display: none"></pre>
+<div id="queue" style="display: none">
+  Queue:
+  <select id="queue-select" size="10"></select>
+  <button id="remove-queue-selection">Remove selection</button>
+  <button id="rebaseline-queue">Rebaseline queue</button>
+</div>
+
+<div id="header">
+  <div id="controls">
+    <!-- Add a dummy <select> node so that this lines up with the text on the left -->
+    <select style="visibility: hidden"></select>
+    <span id="toggle-sort" class="link">Sort tests by metric</span>
+    <span class="divider">|</span>
+    <span id="toggle-log" class="link">Log</span>
+    <span class="divider">|</span>
+    <a href="/quitquitquit">Exit</a>
+  </div>
+
+  <span id="selectors">
+    <label>
+      Failure type:
+      <select id="failure-type-selector"></select>
+    </label>
+
+    <label>
+      Directory:
+      <select id="directory-selector"></select>
+    </label>
+
+    <label>
+      Test:
+      <select id="test-selector"></select>
+    </label>
+  </span>
+
+  <a id="test-link" target="_blank">View test</a>
+
+  <span id="nav-buttons">
+    <button id="previous-test">&laquo;</button>
+    <span id="test-index"></span> of <span id="test-count"></span>
+    <button id="next-test">&raquo;</button>
+  </span>
+</div>
+
+<table id="test-output">
+  <thead id="labels">
+    <tr>
+      <th>Expected</th>
+      <th>Actual</th>
+      <th>Diff</th>
+    </tr>
+  </thead>
+  <tbody id="image-outputs" style="display: none">
+    <tr>
+      <td colspan="3"><h2>Image</h2></td>
+    </tr>
+    <tr>
+      <td><img id="expected-image"></td>
+      <td><img id="actual-image"></td>
+      <td>
+        <canvas id="diff-canvas" width="800" height="600"></canvas>
+        <div id="diff-checksum" style="display: none">
+          <h3>Checksum mismatch</h3>
+          Expected: <span id="expected-checksum"></span><br>
+          Actual: <span id="actual-checksum"></span>
+        </div>
+      </td>
+    </tr>
+  </tbody>
+  <tbody id="text-outputs" style="display: none">
+    <tr>
+      <td colspan="3"><h2>Text</h2></td>
+    </tr>
+    <tr>
+      <td><pre id="expected-text" class="text-output"></pre></td>
+      <td><pre id="actual-text" class="text-output"></pre></td>
+      <td><div id="diff-text-pretty" class="text-output"></div></td>
+    </tr>
+  </tbody>
+</table>
+
+<div id="footer">
+  <label>State: <span id="state"></span></label>
+  <label>Existing baselines: <span id="current-baselines"></span></label>
+  <label>
+    Baseline target:
+    <select id="baseline-target"></select>
+  </label>
+  <label>
+    Move current baselines to:
+    <select id="baseline-move-to">
+      <option value="none">Nowhere (replace)</option>
+    </select>
+  </label>
+
+  <!-- Add a dummy <button> node so that this lines up with the text on the right -->
+  <button style="visibility: hidden; padding-left: 0; padding-right: 0;"></button>
+
+  <div id="action-buttons">
+    <span id="toggle-queue" class="link">Queue</span>
+    <button id="add-to-rebaseline-queue">Add to rebaseline queue</button>
+  </div>
+</div>
+
+<table id="loupe" style="display: none">
+  <tr>
+    <td colspan="3" id="loupe-info">
+      <span id="loupe-close" class="link">Close</span>
+      <label>Coordinate: <span id="loupe-coordinate"></span></label>
+    </td>
+  </tr>
+  <tr>
+    <td>
+      <div class="loupe-container">
+        <canvas id="expected-loupe" width="210" height="210"></canvas>
+        <div class="center-highlight"></div>
+      </div>
+    </td>
+    <td>
+      <div class="loupe-container">
+        <canvas id="actual-loupe" width="210" height="210"></canvas>
+        <div class="center-highlight"></div>
+      </div>
+    </td>
+    <td>
+      <div class="loupe-container">
+        <canvas id="diff-loupe" width="210" height="210"></canvas>
+        <div class="center-highlight"></div>
+      </div>
+    </td>
+  </tr>
+  <tr id="loupe-colors">
+    <td><label>Exp. color: <span id="expected-loupe-color"></span></label></td>
+    <td><label>Actual color: <span id="actual-loupe-color"></span></label></td>
+    <td><label>Diff color: <span id="diff-loupe-color"></span></label></td>
+  </tr>
+</table>
+
+</body>
+</html>
diff --git a/Tools/Scripts/webkitpy/tool/servers/data/rebaselineserver/loupe.js b/Tools/Scripts/webkitpy/tool/servers/data/rebaselineserver/loupe.js
new file mode 100644
index 0000000..41f977a
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/servers/data/rebaselineserver/loupe.js
@@ -0,0 +1,144 @@
+/*
+ * Copyright (c) 2010 Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *     * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+var LOUPE_MAGNIFICATION_FACTOR = 10;
+
+function Loupe()
+{
+    this._node = $('loupe');
+    this._currentCornerX = -1;
+    this._currentCornerY = -1;
+
+    var self = this;
+
+    function handleOutputClick(event) { self._handleOutputClick(event); }
+    $('expected-image').addEventListener('click', handleOutputClick);
+    $('actual-image').addEventListener('click', handleOutputClick);
+    $('diff-canvas').addEventListener('click', handleOutputClick);
+
+    function handleLoupeClick(event) { self._handleLoupeClick(event); }
+    $('expected-loupe').addEventListener('click', handleLoupeClick);
+    $('actual-loupe').addEventListener('click', handleLoupeClick);
+    $('diff-loupe').addEventListener('click', handleLoupeClick);
+
+    function hide(event) { self.hide(); }
+    $('loupe-close').addEventListener('click', hide);
+}
+
+Loupe.prototype._handleOutputClick = function(event)
+{
+    // The -1 compensates for the border around the image/canvas.
+    this._showFor(event.offsetX - 1, event.offsetY - 1);
+};
+
+Loupe.prototype._handleLoupeClick = function(event)
+{
+    var deltaX = Math.floor(event.offsetX/LOUPE_MAGNIFICATION_FACTOR);
+    var deltaY = Math.floor(event.offsetY/LOUPE_MAGNIFICATION_FACTOR);
+
+    this._showFor(
+        this._currentCornerX + deltaX, this._currentCornerY + deltaY);
+}
+
+Loupe.prototype.hide = function()
+{
+    this._node.style.display = 'none';
+};
+
+Loupe.prototype._showFor = function(x, y)
+{
+    this._fillFromImage(x, y, 'expected', $('expected-image'));
+    this._fillFromImage(x, y, 'actual', $('actual-image'));
+    this._fillFromCanvas(x, y, 'diff', $('diff-canvas'));
+
+    this._node.style.display = '';
+};
+
+Loupe.prototype._fillFromImage = function(x, y, type, sourceImage)
+{
+    var tempCanvas = document.createElement('canvas');
+    tempCanvas.width = sourceImage.width;
+    tempCanvas.height = sourceImage.height;
+    var tempContext = tempCanvas.getContext('2d');
+
+    tempContext.drawImage(sourceImage, 0, 0);
+
+    this._fillFromCanvas(x, y, type, tempCanvas);
+};
+
+Loupe.prototype._fillFromCanvas = function(x, y, type, canvas)
+{
+    var context = canvas.getContext('2d');
+    var sourceImageData =
+        context.getImageData(0, 0, canvas.width, canvas.height);
+
+    var targetCanvas = $(type + '-loupe');
+    var targetContext = targetCanvas.getContext('2d');
+    targetContext.fillStyle = 'rgba(255, 255, 255, 1)';
+    targetContext.fillRect(0, 0, targetCanvas.width, targetCanvas.height);
+
+    var sourceXOffset = (targetCanvas.width/LOUPE_MAGNIFICATION_FACTOR - 1)/2;
+    var sourceYOffset = (targetCanvas.height/LOUPE_MAGNIFICATION_FACTOR - 1)/2;
+
+    function readPixelComponent(x, y, component) {
+        var offset = (y * sourceImageData.width + x) * 4 + component;
+        return sourceImageData.data[offset];
+    }
+
+    for (var i = -sourceXOffset; i <= sourceXOffset; i++) {
+        for (var j = -sourceYOffset; j <= sourceYOffset; j++) {
+            var sourceX = x + i;
+            var sourceY = y + j;
+
+            var sourceR = readPixelComponent(sourceX, sourceY, 0);
+            var sourceG = readPixelComponent(sourceX, sourceY, 1);
+            var sourceB = readPixelComponent(sourceX, sourceY, 2);
+            var sourceA = readPixelComponent(sourceX, sourceY, 3)/255;
+            sourceA = Math.round(sourceA * 10)/10;
+
+            var targetX = (i + sourceXOffset) * LOUPE_MAGNIFICATION_FACTOR;
+            var targetY = (j + sourceYOffset) * LOUPE_MAGNIFICATION_FACTOR;
+            var colorString =
+                sourceR + ', ' + sourceG + ', ' + sourceB + ', ' + sourceA;
+            targetContext.fillStyle = 'rgba(' + colorString + ')';
+            targetContext.fillRect(
+                targetX, targetY,
+                LOUPE_MAGNIFICATION_FACTOR, LOUPE_MAGNIFICATION_FACTOR);
+
+            if (i == 0 && j == 0) {
+                $('loupe-coordinate').textContent = sourceX + ', ' + sourceY;
+                $(type + '-loupe-color').textContent = colorString;
+            }
+        }
+    }
+
+    this._currentCornerX = x - sourceXOffset;
+    this._currentCornerY = y - sourceYOffset;
+};
diff --git a/Tools/Scripts/webkitpy/tool/servers/data/rebaselineserver/main.css b/Tools/Scripts/webkitpy/tool/servers/data/rebaselineserver/main.css
new file mode 100644
index 0000000..280c3b2
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/servers/data/rebaselineserver/main.css
@@ -0,0 +1,313 @@
+/*
+ * Copyright (c) 2010 Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *     * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+body {
+  font-size: 12px;
+  font-family: Helvetica, Arial, sans-serif;
+  padding: 0;
+  margin: 0;
+}
+
+.loading {
+  opacity: 0.5;
+}
+
+div {
+  margin: 0;
+}
+
+a, .link {
+  color: #aaf;
+  text-decoration: underline;
+  cursor: pointer;
+}
+
+.link.selected {
+  color: #fff;
+  font-weight: bold;
+  text-decoration: none;
+}
+
+#log,
+#queue {
+  padding: .25em 0 0 .25em;
+  position: absolute;
+  right: 0;
+  height: 200px;
+  overflow: auto;
+  background: #fff;
+  -webkit-box-shadow: 1px 1px 5px rgba(0, 0, 0, .5);
+}
+
+#log {
+  top: 2em;
+  width: 500px;
+}
+
+#queue {
+  bottom: 3em;
+  width: 400px;
+}
+
+#queue-select {
+  display: block;
+  width: 390px;
+}
+
+#header,
+#footer {
+  padding: .5em 1em;
+  background: #333;
+  color: #fff;
+  -webkit-box-shadow: 0 1px 5px rgba(0, 0, 0, 0.5);
+}
+
+#header {
+  margin-bottom: 1em;
+}
+
+#header .divider,
+#footer .divider {
+  opacity: .3;
+  padding: 0 .5em;
+}
+
+#header label,
+#footer label {
+  padding-right: 1em;
+  color: #ccc;
+}
+
+#test-link {
+  margin-right: 1em;
+}
+
+#header label span,
+#footer label span {
+  color: #fff;
+  font-weight: bold;
+}
+
+#nav-buttons {
+  white-space: nowrap;
+}
+
+#nav-buttons button {
+  background: #fff;
+  border: 0;
+  border-radius: 10px;
+}
+
+#nav-buttons button:active {
+  -webkit-box-shadow: 0 0 5px #33f inset;
+  background: #aaa;
+}
+
+#nav-buttons button[disabled] {
+  opacity: .5;
+}
+
+#controls {
+  float: right;
+}
+
+.disabled-control {
+  color: #888;
+}
+
+#test-output {
+  border-spacing: 0;
+  border-collapse: collapse;
+  margin: 0 auto;
+  width: 100%;
+}
+
+#test-output td,
+#test-output th {
+  padding: 0;
+  vertical-align: top;
+}
+
+#image-outputs img,
+#image-outputs canvas,
+#image-outputs #diff-checksum {
+  width: 800px;
+  height: 600px;
+  border: solid 1px #ddd;
+  -webkit-user-select: none;
+  -webkit-user-drag: none;
+}
+
+#image-outputs img,
+#image-outputs canvas {
+  cursor: crosshair;
+}
+
+#image-outputs img.loading,
+#image-outputs canvas.loading {
+  opacity: .5;
+}
+
+#image-outputs #actual-image {
+  margin: 0 1em;
+}
+
+#test-output #labels th {
+  text-align: center;
+  color: #666;
+}
+
+#text-outputs .text-output {
+  height: 600px;
+  width: 800px;
+  overflow: auto;
+}
+
+#test-output h2 {
+  border-bottom: solid 1px #ccc;
+  font-weight: bold;
+  margin: 0;
+  background: #eee;
+}
+
+#footer {
+  position: absolute;
+  bottom: 0;
+  left: 0;
+  right: 0;
+  margin-top: 1em;
+}
+
+#state.needs_rebaseline {
+  color: yellow;
+}
+
+#state.rebaseline_failed {
+  color: red;
+}
+
+#state.rebaseline_succeeded {
+  color: green;
+}
+
+#state.in_queue {
+  color: gray;
+}
+
+#current-baselines {
+  font-weight: normal !important;
+}
+
+#current-baselines .platform {
+  font-weight: bold;
+}
+
+#current-baselines a {
+  color: #ddf;
+}
+
+#current-baselines .was-used-for-test {
+  color: #aaf;
+  font-weight: bold;
+}
+
+#action-buttons {
+  float: right;
+}
+
+#action-buttons .link {
+  margin-right: 1em;
+}
+
+#footer button {
+  padding: 1em;
+}
+
+#loupe {
+  -webkit-box-shadow: 2px 2px 5px rgba(0, 0, 0, .5);
+  position: absolute;
+  width: 634px;
+  top: 50%;
+  left: 50%;
+  margin-left: -151px;
+  margin-top: -50px;
+  background: #fff;
+  border-spacing: 0;
+  border-collapse: collapse;
+}
+
+#loupe td {
+  padding: 0;
+  border: solid 1px #ccc;
+}
+
+#loupe label {
+  color: #999;
+  padding-right: 1em;
+}
+
+#loupe span {
+  color: #000;
+  font-weight: bold;
+}
+
+#loupe canvas {
+  cursor: crosshair;
+}
+
+#loupe #loupe-close {
+  float: right;
+}
+
+#loupe #loupe-info {
+  background: #eee;
+  padding: .3em .5em;
+}
+
+#loupe #loupe-colors td {
+  text-align: center;
+}
+
+#loupe .loupe-container {
+  position: relative;
+  width: 210px;
+  height: 210px;
+}
+
+#loupe .center-highlight {
+  position: absolute;
+  width: 10px;
+  height: 10px;
+  top: 50%;
+  left: 50%;
+  margin-left: -5px;
+  margin-top: -5px;
+  outline: solid 1px #999;
+}
diff --git a/Tools/Scripts/webkitpy/tool/servers/data/rebaselineserver/main.js b/Tools/Scripts/webkitpy/tool/servers/data/rebaselineserver/main.js
new file mode 100644
index 0000000..5e1fa52
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/servers/data/rebaselineserver/main.js
@@ -0,0 +1,577 @@
+/*
+ * Copyright (c) 2010 Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *     * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+var ALL_DIRECTORY_PATH = '[all]';
+
+var STATE_NEEDS_REBASELINE = 'needs_rebaseline';
+var STATE_REBASELINE_FAILED = 'rebaseline_failed';
+var STATE_REBASELINE_SUCCEEDED = 'rebaseline_succeeded';
+var STATE_IN_QUEUE = 'in_queue';
+var STATE_TO_DISPLAY_STATE = {};
+STATE_TO_DISPLAY_STATE[STATE_NEEDS_REBASELINE] = 'Needs rebaseline';
+STATE_TO_DISPLAY_STATE[STATE_REBASELINE_FAILED] = 'Rebaseline failed';
+STATE_TO_DISPLAY_STATE[STATE_REBASELINE_SUCCEEDED] = 'Rebaseline succeeded';
+STATE_TO_DISPLAY_STATE[STATE_IN_QUEUE] = 'In queue';
+
+var results;
+var testsByFailureType = {};
+var testsByDirectory = {};
+var selectedTests = [];
+var loupe;
+var queue;
+var shouldSortTestsByMetric = false;
+
+function main()
+{
+    $('failure-type-selector').addEventListener('change', selectFailureType);
+    $('directory-selector').addEventListener('change', selectDirectory);
+    $('test-selector').addEventListener('change', selectTest);
+    $('next-test').addEventListener('click', nextTest);
+    $('previous-test').addEventListener('click', previousTest);
+
+    $('toggle-log').addEventListener('click', function() { toggle('log'); });
+    disableSorting();
+
+    loupe = new Loupe();
+    queue = new RebaselineQueue();
+
+    document.addEventListener('keydown', function(event) {
+        if (event.altKey || event.ctrlKey || event.metaKey || event.shiftKey) {
+            return;
+        }
+
+        switch (event.keyIdentifier) {
+        case 'Left':
+            event.preventDefault();
+            previousTest();
+            break;
+        case 'Right':
+            event.preventDefault();
+            nextTest();
+            break;
+        case 'U+0051': // q
+            queue.addCurrentTest();
+            break;
+        case 'U+0058': // x
+            queue.removeCurrentTest();
+            break;
+        case 'U+0052': // r
+            queue.rebaseline();
+            break;
+        }
+    });
+
+    loadText('/platforms.json', function(text) {
+        var platforms = JSON.parse(text);
+        platforms.platforms.forEach(function(platform) {
+            var platformOption = document.createElement('option');
+            platformOption.value = platform;
+            platformOption.textContent = platform;
+
+            var targetOption = platformOption.cloneNode(true);
+            targetOption.selected = platform == platforms.defaultPlatform;
+            $('baseline-target').appendChild(targetOption);
+            $('baseline-move-to').appendChild(platformOption.cloneNode(true));
+        });
+    });
+
+    loadText('/results.json', function(text) {
+        results = JSON.parse(text);
+        displayResults();
+    });
+}
+
+/**
+ * Groups test results by failure type.
+ */
+function displayResults()
+{
+    var failureTypeSelector = $('failure-type-selector');
+    var failureTypes = [];
+
+    for (var testName in results.tests) {
+        var test = results.tests[testName];
+        if (test.actual == 'PASS') {
+            continue;
+        }
+        var failureType = test.actual + ' (expected ' + test.expected + ')';
+        if (!(failureType in testsByFailureType)) {
+            testsByFailureType[failureType] = [];
+            failureTypes.push(failureType);
+        }
+        testsByFailureType[failureType].push(testName);
+    }
+
+    // Sort by number of failures
+    failureTypes.sort(function(a, b) {
+        return testsByFailureType[b].length - testsByFailureType[a].length;
+    });
+
+    for (var i = 0, failureType; failureType = failureTypes[i]; i++) {
+        var failureTypeOption = document.createElement('option');
+        failureTypeOption.value = failureType;
+        failureTypeOption.textContent = failureType + ' - ' + testsByFailureType[failureType].length + ' tests';
+        failureTypeSelector.appendChild(failureTypeOption);
+    }
+
+    selectFailureType();
+
+    document.body.className = '';
+}
+
+function enableSorting()
+{
+    $('toggle-sort').onclick = function() {
+        shouldSortTestsByMetric = !shouldSortTestsByMetric;
+        // Regenerates the list of tests; this alphabetizes, and
+        // then re-sorts if we turned sorting on.
+        selectDirectory();
+    }
+    $('toggle-sort').classList.remove('disabled-control');
+}
+
+function disableSorting()
+{
+    $('toggle-sort').onclick = function() { return false; }
+    $('toggle-sort').classList.add('disabled-control');
+}
+
+/**
+ * For a given failure type, gets all the tests and groups them by directory
+ * (populating the directory selector with them).
+ */
+function selectFailureType()
+{
+    var selectedFailureType = getSelectValue('failure-type-selector');
+    var tests = testsByFailureType[selectedFailureType];
+
+    testsByDirectory = {}
+    var displayDirectoryNamesByDirectory = {};
+    var directories = [];
+
+    // Include a special option for all tests
+    testsByDirectory[ALL_DIRECTORY_PATH] = tests;
+    displayDirectoryNamesByDirectory[ALL_DIRECTORY_PATH] = 'all';
+    directories.push(ALL_DIRECTORY_PATH);
+
+    // Roll up tests by ancestor directories
+    tests.forEach(function(test) {
+        var pathPieces = test.split('/');
+        var pathDirectories = pathPieces.slice(0, pathPieces.length -1);
+        var ancestorDirectory = '';
+
+        pathDirectories.forEach(function(pathDirectory, index) {
+            ancestorDirectory += pathDirectory + '/';
+            if (!(ancestorDirectory in testsByDirectory)) {
+                testsByDirectory[ancestorDirectory] = [];
+                var displayDirectoryName = new Array(index * 6).join('&nbsp;') + pathDirectory;
+                displayDirectoryNamesByDirectory[ancestorDirectory] = displayDirectoryName;
+                directories.push(ancestorDirectory);
+            }
+
+            testsByDirectory[ancestorDirectory].push(test);
+        });
+    });
+
+    directories.sort();
+
+    var directorySelector = $('directory-selector');
+    directorySelector.innerHTML = '';
+
+    directories.forEach(function(directory) {
+        var directoryOption = document.createElement('option');
+        directoryOption.value = directory;
+        directoryOption.innerHTML =
+            displayDirectoryNamesByDirectory[directory] + ' - ' +
+            testsByDirectory[directory].length + ' tests';
+        directorySelector.appendChild(directoryOption);
+    });
+
+    selectDirectory();
+}
+
+/**
+ * For a given failure type and directory and failure type, gets all the tests
+ * in that directory and populatest the test selector with them.
+ */
+function selectDirectory()
+{
+    var previouslySelectedTest = getSelectedTest();
+
+    var selectedDirectory = getSelectValue('directory-selector');
+    selectedTests = testsByDirectory[selectedDirectory];
+    selectedTests.sort();
+
+    var testsByState = {};
+    selectedTests.forEach(function(testName) {
+        var state = results.tests[testName].state;
+        if (state == STATE_IN_QUEUE) {
+            state = STATE_NEEDS_REBASELINE;
+        }
+        if (!(state in testsByState)) {
+            testsByState[state] = [];
+        }
+        testsByState[state].push(testName);
+    });
+
+    var optionIndexByTest = {};
+
+    var testSelector = $('test-selector');
+    testSelector.innerHTML = '';
+
+    var selectedFailureType = getSelectValue('failure-type-selector');
+    var sampleSelectedTest = testsByFailureType[selectedFailureType][0];
+    var selectedTypeIsSortable = 'metric' in results.tests[sampleSelectedTest];
+    if (selectedTypeIsSortable) {
+        enableSorting();
+        if (shouldSortTestsByMetric) {
+            for (var state in testsByState) {
+                testsByState[state].sort(function(a, b) {
+                    return results.tests[b].metric - results.tests[a].metric
+                })
+            }
+        }
+    } else
+        disableSorting();
+
+    for (var state in testsByState) {
+        var stateOption = document.createElement('option');
+        stateOption.textContent = STATE_TO_DISPLAY_STATE[state];
+        stateOption.disabled = true;
+        testSelector.appendChild(stateOption);
+
+        testsByState[state].forEach(function(testName) {
+            var testOption = document.createElement('option');
+            testOption.value = testName;
+            var testDisplayName = testName;
+            if (testName.lastIndexOf(selectedDirectory) == 0) {
+                testDisplayName = testName.substring(selectedDirectory.length);
+            }
+            testOption.innerHTML = '&nbsp;&nbsp;' + testDisplayName;
+            optionIndexByTest[testName] = testSelector.options.length;
+            testSelector.appendChild(testOption);
+        });
+    }
+
+    if (previouslySelectedTest in optionIndexByTest) {
+        testSelector.selectedIndex = optionIndexByTest[previouslySelectedTest];
+    } else if (STATE_NEEDS_REBASELINE in testsByState) {
+        testSelector.selectedIndex =
+            optionIndexByTest[testsByState[STATE_NEEDS_REBASELINE][0]];
+        selectTest();
+    } else {
+        testSelector.selectedIndex = 1;
+        selectTest();
+    }
+
+    selectTest();
+}
+
+function getSelectedTest()
+{
+    return getSelectValue('test-selector');
+}
+
+function selectTest()
+{
+    var selectedTest = getSelectedTest();
+
+    if (results.tests[selectedTest].actual.indexOf('IMAGE') != -1) {
+        $('image-outputs').style.display = '';
+        displayImageResults(selectedTest);
+    } else {
+        $('image-outputs').style.display = 'none';
+    }
+
+    if (results.tests[selectedTest].actual.indexOf('TEXT') != -1) {
+        $('text-outputs').style.display = '';
+        displayTextResults(selectedTest);
+    } else {
+        $('text-outputs').style.display = 'none';
+    }
+
+    var currentBaselines = $('current-baselines');
+    currentBaselines.textContent = '';
+    var baselines = results.tests[selectedTest].baselines;
+    var testName = selectedTest.split('.').slice(0, -1).join('.');
+    getSortedKeys(baselines).forEach(function(platform, i) {
+        if (i != 0) {
+            currentBaselines.appendChild(document.createTextNode('; '));
+        }
+        var platformName = document.createElement('span');
+        platformName.className = 'platform';
+        platformName.textContent = platform;
+        currentBaselines.appendChild(platformName);
+        currentBaselines.appendChild(document.createTextNode(' ('));
+        getSortedKeys(baselines[platform]).forEach(function(extension, j) {
+            if (j != 0) {
+                currentBaselines.appendChild(document.createTextNode(', '));
+            }
+            var link = document.createElement('a');
+            var baselinePath = '';
+            if (platform != 'base') {
+                baselinePath += 'platform/' + platform + '/';
+            }
+            baselinePath += testName + '-expected' + extension;
+            link.href = getTracUrl(baselinePath);
+            if (extension == '.checksum') {
+                link.textContent = 'chk';
+            } else {
+                link.textContent = extension.substring(1);
+            }
+            link.target = '_blank';
+            if (baselines[platform][extension]) {
+                link.className = 'was-used-for-test';
+            }
+            currentBaselines.appendChild(link);
+        });
+        currentBaselines.appendChild(document.createTextNode(')'));
+    });
+
+    updateState();
+    loupe.hide();
+
+    prefetchNextImageTest();
+}
+
+function prefetchNextImageTest()
+{
+    var testSelector = $('test-selector');
+    if (testSelector.selectedIndex == testSelector.options.length - 1) {
+        return;
+    }
+    var nextTest = testSelector.options[testSelector.selectedIndex + 1].value;
+    if (results.tests[nextTest].actual.indexOf('IMAGE') != -1) {
+        new Image().src = getTestResultUrl(nextTest, 'expected-image');
+        new Image().src = getTestResultUrl(nextTest, 'actual-image');
+    }
+}
+
+function updateState()
+{
+    var testName = getSelectedTest();
+    var testIndex = selectedTests.indexOf(testName);
+    var testCount = selectedTests.length
+    $('test-index').textContent = testIndex + 1;
+    $('test-count').textContent = testCount;
+
+    $('next-test').disabled = testIndex == testCount - 1;
+    $('previous-test').disabled = testIndex == 0;
+
+    $('test-link').href = getTracUrl(testName);
+
+    var state = results.tests[testName].state;
+    $('state').className = state;
+    $('state').innerHTML = STATE_TO_DISPLAY_STATE[state];
+
+    queue.updateState();
+}
+
+function getTestResultUrl(testName, mode)
+{
+    return '/test_result?test=' + testName + '&mode=' + mode;
+}
+
+var currentExpectedImageTest;
+var currentActualImageTest;
+
+function displayImageResults(testName)
+{
+    if (currentExpectedImageTest == currentActualImageTest
+        && currentExpectedImageTest == testName) {
+        return;
+    }
+
+    function displayImageResult(mode, callback) {
+        var image = $(mode);
+        image.className = 'loading';
+        image.src = getTestResultUrl(testName, mode);
+        image.onload = function() {
+            image.className = '';
+            callback();
+            updateImageDiff();
+        };
+    }
+
+    displayImageResult(
+        'expected-image',
+        function() { currentExpectedImageTest = testName; });
+    displayImageResult(
+        'actual-image',
+        function() { currentActualImageTest = testName; });
+
+    $('diff-canvas').className = 'loading';
+    $('diff-canvas').style.display = '';
+    $('diff-checksum').style.display = 'none';
+}
+
+/**
+ * Computes a graphical a diff between the expected and actual images by
+ * rendering each to a canvas, getting the image data, and comparing the RGBA
+ * components of each pixel. The output is put into the diff canvas, with
+ * identical pixels appearing at 12.5% opacity and different pixels being
+ * highlighted in red.
+ */
+function updateImageDiff() {
+    if (currentExpectedImageTest != currentActualImageTest)
+        return;
+
+    var expectedImage = $('expected-image');
+    var actualImage = $('actual-image');
+
+    function getImageData(image) {
+        var imageCanvas = document.createElement('canvas');
+        imageCanvas.width = image.width;
+        imageCanvas.height = image.height;
+        imageCanvasContext = imageCanvas.getContext('2d');
+
+        imageCanvasContext.fillStyle = 'rgba(255, 255, 255, 1)';
+        imageCanvasContext.fillRect(
+            0, 0, image.width, image.height);
+
+        imageCanvasContext.drawImage(image, 0, 0);
+        return imageCanvasContext.getImageData(
+            0, 0, image.width, image.height);
+    }
+
+    var expectedImageData = getImageData(expectedImage);
+    var actualImageData = getImageData(actualImage);
+
+    var diffCanvas = $('diff-canvas');
+    var diffCanvasContext = diffCanvas.getContext('2d');
+    var diffImageData =
+        diffCanvasContext.createImageData(diffCanvas.width, diffCanvas.height);
+
+    // Avoiding property lookups for all these during the per-pixel loop below
+    // provides a significant performance benefit.
+    var expectedWidth = expectedImage.width;
+    var expectedHeight = expectedImage.height;
+    var expected = expectedImageData.data;
+
+    var actualWidth = actualImage.width;
+    var actual = actualImageData.data;
+
+    var diffWidth = diffImageData.width;
+    var diff = diffImageData.data;
+
+    var hadDiff = false;
+    for (var x = 0; x < expectedWidth; x++) {
+        for (var y = 0; y < expectedHeight; y++) {
+            var expectedOffset = (y * expectedWidth + x) * 4;
+            var actualOffset = (y * actualWidth + x) * 4;
+            var diffOffset = (y * diffWidth + x) * 4;
+            if (expected[expectedOffset] != actual[actualOffset] ||
+                expected[expectedOffset + 1] != actual[actualOffset + 1] ||
+                expected[expectedOffset + 2] != actual[actualOffset + 2] ||
+                expected[expectedOffset + 3] != actual[actualOffset + 3]) {
+                hadDiff = true;
+                diff[diffOffset] = 255;
+                diff[diffOffset + 1] = 0;
+                diff[diffOffset + 2] = 0;
+                diff[diffOffset + 3] = 255;
+            } else {
+                diff[diffOffset] = expected[expectedOffset];
+                diff[diffOffset + 1] = expected[expectedOffset + 1];
+                diff[diffOffset + 2] = expected[expectedOffset + 2];
+                diff[diffOffset + 3] = 32;
+            }
+        }
+    }
+
+    diffCanvasContext.putImageData(
+        diffImageData,
+        0, 0,
+        0, 0,
+        diffImageData.width, diffImageData.height);
+    diffCanvas.className = '';
+
+    if (!hadDiff) {
+        diffCanvas.style.display = 'none';
+        $('diff-checksum').style.display = '';
+        loadTextResult(currentExpectedImageTest, 'expected-checksum');
+        loadTextResult(currentExpectedImageTest, 'actual-checksum');
+    }
+}
+
+function loadTextResult(testName, mode, responseIsHtml)
+{
+    loadText(getTestResultUrl(testName, mode), function(text) {
+        if (responseIsHtml) {
+            $(mode).innerHTML = text;
+        } else {
+            $(mode).textContent = text;
+        }
+    });
+}
+
+function displayTextResults(testName)
+{
+    loadTextResult(testName, 'expected-text');
+    loadTextResult(testName, 'actual-text');
+    loadTextResult(testName, 'diff-text-pretty', true);
+}
+
+function nextTest()
+{
+    var testSelector = $('test-selector');
+    var nextTestIndex = testSelector.selectedIndex + 1;
+    while (true) {
+        if (nextTestIndex == testSelector.options.length) {
+            return;
+        }
+        if (testSelector.options[nextTestIndex].disabled) {
+            nextTestIndex++;
+        } else {
+            testSelector.selectedIndex = nextTestIndex;
+            selectTest();
+            return;
+        }
+    }
+}
+
+function previousTest()
+{
+    var testSelector = $('test-selector');
+    var previousTestIndex = testSelector.selectedIndex - 1;
+    while (true) {
+        if (previousTestIndex == -1) {
+            return;
+        }
+        if (testSelector.options[previousTestIndex].disabled) {
+            previousTestIndex--;
+        } else {
+            testSelector.selectedIndex = previousTestIndex;
+            selectTest();
+            return
+        }
+    }
+}
+
+window.addEventListener('DOMContentLoaded', main);
diff --git a/Tools/Scripts/webkitpy/tool/servers/data/rebaselineserver/queue.js b/Tools/Scripts/webkitpy/tool/servers/data/rebaselineserver/queue.js
new file mode 100644
index 0000000..338e28f
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/servers/data/rebaselineserver/queue.js
@@ -0,0 +1,186 @@
+/*
+ * Copyright (c) 2010 Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *     * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+function RebaselineQueue()
+{
+    this._selectNode = $('queue-select');
+    this._rebaselineButtonNode = $('rebaseline-queue');
+    this._toggleNode = $('toggle-queue');
+    this._removeSelectionButtonNode = $('remove-queue-selection');
+
+    this._inProgressRebaselineCount = 0;
+
+    var self = this;
+    $('add-to-rebaseline-queue').addEventListener(
+        'click', function() { self.addCurrentTest(); });
+    this._selectNode.addEventListener('change', updateState);
+    this._removeSelectionButtonNode.addEventListener(
+        'click', function() { self._removeSelection(); });
+    this._rebaselineButtonNode.addEventListener(
+        'click', function() { self.rebaseline(); });
+    this._toggleNode.addEventListener(
+        'click', function() { toggle('queue'); });
+}
+
+RebaselineQueue.prototype.updateState = function()
+{
+    var testName = getSelectedTest();
+
+    var state = results.tests[testName].state;
+    $('add-to-rebaseline-queue').disabled = state != STATE_NEEDS_REBASELINE;
+
+    var queueLength = this._selectNode.options.length;
+    if (this._inProgressRebaselineCount > 0) {
+      this._rebaselineButtonNode.disabled = true;
+      this._rebaselineButtonNode.textContent =
+          'Rebaseline in progress (' + this._inProgressRebaselineCount +
+          ' tests left)';
+    } else if (queueLength == 0) {
+      this._rebaselineButtonNode.disabled = true;
+      this._rebaselineButtonNode.textContent = 'Rebaseline queue';
+      this._toggleNode.textContent = 'Queue';
+    } else {
+      this._rebaselineButtonNode.disabled = false;
+      this._rebaselineButtonNode.textContent =
+          'Rebaseline queue (' + queueLength + ' tests)';
+      this._toggleNode.textContent = 'Queue (' + queueLength + ' tests)';
+    }
+    this._removeSelectionButtonNode.disabled =
+        this._selectNode.selectedIndex == -1;
+};
+
+RebaselineQueue.prototype.addCurrentTest = function()
+{
+    var testName = getSelectedTest();
+    var test = results.tests[testName];
+
+    if (test.state != STATE_NEEDS_REBASELINE) {
+        log('Cannot add test with state "' + test.state + '" to queue.',
+            log.WARNING);
+        return;
+    }
+
+    var queueOption = document.createElement('option');
+    queueOption.value = testName;
+    queueOption.textContent = testName;
+    this._selectNode.appendChild(queueOption);
+    test.state = STATE_IN_QUEUE;
+    updateState();
+};
+
+RebaselineQueue.prototype.removeCurrentTest = function()
+{
+    this._removeTest(getSelectedTest());
+};
+
+RebaselineQueue.prototype._removeSelection = function()
+{
+    if (this._selectNode.selectedIndex == -1)
+        return;
+
+    this._removeTest(
+        this._selectNode.options[this._selectNode.selectedIndex].value);
+};
+
+RebaselineQueue.prototype._removeTest = function(testName)
+{
+    var queueOption = this._selectNode.firstChild;
+
+    while (queueOption && queueOption.value != testName) {
+        queueOption = queueOption.nextSibling;
+    }
+
+    if (!queueOption)
+        return;
+
+    this._selectNode.removeChild(queueOption);
+    var test = results.tests[testName];
+    test.state = STATE_NEEDS_REBASELINE;
+    updateState();
+};
+
+RebaselineQueue.prototype.rebaseline = function()
+{
+    var testNames = [];
+    for (var queueOption = this._selectNode.firstChild;
+         queueOption;
+         queueOption = queueOption.nextSibling) {
+        testNames.push(queueOption.value);
+    }
+
+    this._inProgressRebaselineCount = testNames.length;
+    updateState();
+
+    testNames.forEach(this._rebaselineTest, this);
+};
+
+RebaselineQueue.prototype._rebaselineTest = function(testName)
+{
+    var baselineTarget = getSelectValue('baseline-target');
+    var baselineMoveTo = getSelectValue('baseline-move-to');
+
+    var xhr = new XMLHttpRequest();
+    xhr.open('POST',
+        '/rebaseline?test=' + encodeURIComponent(testName) +
+        '&baseline-target=' + encodeURIComponent(baselineTarget) +
+        '&baseline-move-to=' + encodeURIComponent(baselineMoveTo));
+
+    var self = this;
+    function handleResponse(logType, newState) {
+        log(xhr.responseText, logType);
+        self._removeTest(testName);
+        self._inProgressRebaselineCount--;
+        results.tests[testName].state = newState;
+        updateState();
+        // If we're done with a set of rebaselines, regenerate the test menu
+        // (which is grouped by state) since test states have changed.
+        if (self._inProgressRebaselineCount == 0) {
+            selectDirectory();
+        }
+    }
+
+    function handleSuccess() {
+        handleResponse(log.SUCCESS, STATE_REBASELINE_SUCCEEDED);
+    }
+    function handleFailure() {
+        handleResponse(log.ERROR, STATE_REBASELINE_FAILED);
+    }
+
+    xhr.addEventListener('load', function() {
+      if (xhr.status < 400) {
+          handleSuccess();
+      } else {
+          handleFailure();
+      }
+    });
+    xhr.addEventListener('error', handleFailure);
+
+    xhr.send();
+};
diff --git a/Tools/Scripts/webkitpy/tool/servers/data/rebaselineserver/util.js b/Tools/Scripts/webkitpy/tool/servers/data/rebaselineserver/util.js
new file mode 100644
index 0000000..5ad7612
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/servers/data/rebaselineserver/util.js
@@ -0,0 +1,104 @@
+/*
+ * Copyright (c) 2010 Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *     * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+var results;
+var testsByFailureType = {};
+var testsByDirectory = {};
+var selectedTests = [];
+
+function $(id)
+{
+    return document.getElementById(id);
+}
+
+function getSelectValue(id) 
+{
+    var select = $(id);
+    if (select.selectedIndex == -1) {
+        return null;
+    } else {
+        return select.options[select.selectedIndex].value;
+    }
+}
+
+function loadText(url, callback)
+{
+    var xhr = new XMLHttpRequest();
+    xhr.open('GET', url);
+    xhr.addEventListener('load', function() { callback(xhr.responseText); });
+    xhr.send();
+}
+
+function log(text, type)
+{
+    var node = $('log');
+    
+    if (type) {
+        var typeNode = document.createElement('span');
+        typeNode.textContent = type.text;
+        typeNode.style.color = type.color;
+        node.appendChild(typeNode);
+    }
+
+    node.appendChild(document.createTextNode(text + '\n'));
+    node.scrollTop = node.scrollHeight;
+}
+
+log.WARNING = {text: 'Warning: ', color: '#aa3'};
+log.SUCCESS = {text: 'Success: ', color: 'green'};
+log.ERROR = {text: 'Error: ', color: 'red'};
+
+function toggle(id)
+{
+    var element = $(id);
+    var toggler = $('toggle-' + id);
+    if (element.style.display == 'none') {
+        element.style.display = '';
+        toggler.className = 'link selected';
+    } else {
+        element.style.display = 'none';
+        toggler.className = 'link';
+    }
+}
+
+function getTracUrl(layoutTestPath)
+{
+  return 'http://trac.webkit.org/browser/trunk/LayoutTests/' + layoutTestPath;
+}
+
+function getSortedKeys(obj)
+{
+    var keys = [];
+    for (var key in obj) {
+        keys.push(key);
+    }
+    keys.sort();
+    return keys;
+}
\ No newline at end of file
diff --git a/Tools/Scripts/webkitpy/tool/servers/gardeningserver.py b/Tools/Scripts/webkitpy/tool/servers/gardeningserver.py
new file mode 100644
index 0000000..e89c2a0
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/servers/gardeningserver.py
@@ -0,0 +1,142 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# 1.  Redistributions of source code must retain the above copyright
+#     notice, this list of conditions and the following disclaimer.
+# 2.  Redistributions in binary form must reproduce the above copyright
+#     notice, this list of conditions and the following disclaimer in the
+#     documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import BaseHTTPServer
+import SocketServer
+import logging
+import json
+import os
+import sys
+import urllib
+
+from webkitpy.common.memoized import memoized
+from webkitpy.tool.servers.reflectionhandler import ReflectionHandler
+from webkitpy.layout_tests.port import builders
+
+
+_log = logging.getLogger(__name__)
+
+
+class BuildCoverageExtrapolator(object):
+    def __init__(self, test_configuration_converter):
+        self._test_configuration_converter = test_configuration_converter
+
+    @memoized
+    def _covered_test_configurations_for_builder_name(self):
+        coverage = {}
+        for builder_name in builders.all_builder_names():
+            coverage[builder_name] = self._test_configuration_converter.to_config_set(builders.coverage_specifiers_for_builder_name(builder_name))
+        return coverage
+
+    def extrapolate_test_configurations(self, builder_name):
+        return self._covered_test_configurations_for_builder_name()[builder_name]
+
+
+class GardeningHTTPServer(SocketServer.ThreadingMixIn, BaseHTTPServer.HTTPServer):
+    def __init__(self, httpd_port, config):
+        server_name = ''
+        self.tool = config['tool']
+        self.options = config['options']
+        BaseHTTPServer.HTTPServer.__init__(self, (server_name, httpd_port), GardeningHTTPRequestHandler)
+
+    def url(self, args=None):
+        # We can't use urllib.encode() here because that encodes spaces as plus signs and the buildbots don't decode those properly.
+        arg_string = ('?' + '&'.join("%s=%s" % (key, urllib.quote(value)) for (key, value) in args.items())) if args else ''
+        return 'file://' + os.path.join(GardeningHTTPRequestHandler.STATIC_FILE_DIRECTORY, 'garden-o-matic.html' + arg_string)
+
+
+class GardeningHTTPRequestHandler(ReflectionHandler):
+    STATIC_FILE_NAMES = frozenset()
+
+    STATIC_FILE_DIRECTORY = os.path.join(
+        os.path.dirname(__file__),
+        '..',
+        '..',
+        '..',
+        '..',
+        'BuildSlaveSupport',
+        'build.webkit.org-config',
+        'public_html',
+        'TestFailures')
+
+    allow_cross_origin_requests = True
+    debug_output = ''
+
+    def rollout(self):
+        revision = self.query['revision'][0]
+        reason = self.query['reason'][0]
+        self._run_webkit_patch([
+            'rollout',
+            '--force-clean',
+            '--non-interactive',
+            revision,
+            reason,
+        ])
+        self._serve_text('success')
+
+    def ping(self):
+        self._serve_text('pong')
+
+    def _run_webkit_patch(self, command, input_string):
+        PIPE = self.server.tool.executive.PIPE
+        process = self.server.tool.executive.popen([self.server.tool.path()] + command, cwd=self.server.tool.scm().checkout_root, stdin=PIPE, stdout=PIPE, stderr=PIPE)
+        process.stdin.write(input_string)
+        output, error = process.communicate()
+        return (process.returncode, output, error)
+
+    def rebaselineall(self):
+        command = ['rebaseline-json']
+        if self.server.options.move_overwritten_baselines:
+            command.append('--move-overwritten-baselines')
+        if self.server.options.results_directory:
+            command.extend(['--results-directory', self.server.options.results_directory])
+        if not self.server.options.optimize:
+            command.append('--no-optimize')
+        if self.server.options.verbose:
+            command.append('--verbose')
+        json_input = self.read_entity_body()
+
+        _log.debug("calling %s, input='%s'", command, json_input)
+        return_code, output, error = self._run_webkit_patch(command, json_input)
+        print >> sys.stderr, error
+        if return_code:
+            _log.error("rebaseline-json failed: %d, output='%s'" % (return_code, output))
+        else:
+            _log.debug("rebaseline-json succeeded")
+
+        # FIXME: propagate error and/or log messages back to the UI.
+        self._serve_text('success')
+
+    def localresult(self):
+        path = self.query['path'][0]
+        filesystem = self.server.tool.filesystem
+
+        # Ensure that we're only serving files from inside the results directory.
+        if not filesystem.isabs(path) and self.server.options.results_directory:
+            fullpath = filesystem.abspath(filesystem.join(self.server.options.results_directory, path))
+            if fullpath.startswith(filesystem.abspath(self.server.options.results_directory)):
+                self._serve_file(fullpath, headers_only=(self.command == 'HEAD'))
+                return
+
+        self._send_response(403)
diff --git a/Tools/Scripts/webkitpy/tool/servers/gardeningserver_unittest.py b/Tools/Scripts/webkitpy/tool/servers/gardeningserver_unittest.py
new file mode 100644
index 0000000..9961648
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/servers/gardeningserver_unittest.py
@@ -0,0 +1,123 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#    * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import json
+import sys
+import unittest
+
+from webkitpy.common.system.outputcapture import OutputCapture
+from webkitpy.layout_tests.models.test_configuration import *
+from webkitpy.layout_tests.port import builders
+from webkitpy.thirdparty.mock import Mock
+from webkitpy.tool.mocktool import MockTool
+from webkitpy.common.system.executive_mock import MockExecutive
+from webkitpy.common.host_mock import MockHost
+from webkitpy.tool.servers.gardeningserver import *
+
+
+class TestPortFactory(object):
+    # FIXME: Why is this a class method?
+    @classmethod
+    def create(cls):
+        host = MockHost()
+        return host.port_factory.get("test-win-xp")
+
+    @classmethod
+    def path_to_test_expectations_file(cls):
+        return cls.create().path_to_test_expectations_file()
+
+
+class MockServer(object):
+    def __init__(self):
+        self.tool = MockTool()
+        self.tool.executive = MockExecutive(should_log=True)
+        self.tool.filesystem.files[TestPortFactory.path_to_test_expectations_file()] = ""
+
+
+# The real GardeningHTTPRequestHandler has a constructor that's too hard to
+# call in a unit test, so we create a subclass that's easier to constrcut.
+class TestGardeningHTTPRequestHandler(GardeningHTTPRequestHandler):
+    def __init__(self, server):
+        self.server = server
+        self.body = None
+
+    def _expectations_updater(self):
+        return GardeningExpectationsUpdater(self.server.tool, TestPortFactory.create())
+
+    def read_entity_body(self):
+        return self.body if self.body else ''
+
+    def _serve_text(self, text):
+        print "== Begin Response =="
+        print text
+        print "== End Response =="
+
+    def _serve_json(self, json_object):
+        print "== Begin JSON Response =="
+        print json.dumps(json_object)
+        print "== End JSON Response =="
+
+
+class BuildCoverageExtrapolatorTest(unittest.TestCase):
+    def test_extrapolate(self):
+        # FIXME: Make this test not rely on actual (not mock) port objects.
+        host = MockHost()
+        port = host.port_factory.get('chromium-win-win7', None)
+        converter = TestConfigurationConverter(port.all_test_configurations(), port.configuration_specifier_macros())
+        extrapolator = BuildCoverageExtrapolator(converter)
+        self.assertEquals(extrapolator.extrapolate_test_configurations("WebKit XP"), set([TestConfiguration(version='xp', architecture='x86', build_type='release')]))
+        self.assertRaises(KeyError, extrapolator.extrapolate_test_configurations, "Potato")
+
+
+class GardeningServerTest(unittest.TestCase):
+    def _post_to_path(self, path, body=None, expected_stderr=None, expected_stdout=None, server=None):
+        handler = TestGardeningHTTPRequestHandler(server or MockServer())
+        handler.path = path
+        handler.body = body
+        OutputCapture().assert_outputs(self, handler.do_POST, expected_stderr=expected_stderr, expected_stdout=expected_stdout)
+
+    def disabled_test_rollout(self):
+        expected_stderr = "MOCK run_command: ['echo', 'rollout', '--force-clean', '--non-interactive', '2314', 'MOCK rollout reason'], cwd=/mock-checkout\n"
+        expected_stdout = "== Begin Response ==\nsuccess\n== End Response ==\n"
+        self._post_to_path("/rollout?revision=2314&reason=MOCK+rollout+reason", expected_stderr=expected_stderr, expected_stdout=expected_stdout)
+
+    def disabled_test_rebaselineall(self):
+        expected_stderr = "MOCK run_command: ['echo', 'rebaseline-json'], cwd=/mock-checkout, input={\"user-scripts/another-test.html\":{\"%s\": [%s]}}\n"
+        expected_stdout = "== Begin Response ==\nsuccess\n== End Response ==\n"
+        server = MockServer()
+
+        self.output = ['{"add": [], "delete": []}', '']
+
+        def run_command(args, cwd=None, input=None, **kwargs):
+            print >> sys.stderr, "MOCK run_command: %s, cwd=%s, input=%s" % (args, cwd, input)
+            return self.output.pop(0)
+
+        server.tool.executive.run_command = run_command
+        self._post_to_path("/rebaselineall", body='{"user-scripts/another-test.html":{"MOCK builder": ["txt","png"]}}', expected_stderr=expected_stderr % ('MOCK builder', '"txt","png"'), expected_stdout=expected_stdout, server=server)
+
+        self._post_to_path("/rebaselineall", body='{"user-scripts/another-test.html":{"MOCK builder (Debug)": ["txt","png"]}}', expected_stderr=expected_stderr % ('MOCK builder (Debug)', '"txt","png"'), expected_stdout=expected_stdout)
diff --git a/Tools/Scripts/webkitpy/tool/servers/rebaselineserver.py b/Tools/Scripts/webkitpy/tool/servers/rebaselineserver.py
new file mode 100644
index 0000000..9e9c379
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/servers/rebaselineserver.py
@@ -0,0 +1,288 @@
+# Copyright (c) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import fnmatch
+import os
+import os.path
+import BaseHTTPServer
+
+from webkitpy.common.host import Host  # FIXME: This should not be needed!
+from webkitpy.layout_tests.port.base import Port
+from webkitpy.tool.servers.reflectionhandler import ReflectionHandler
+
+
+STATE_NEEDS_REBASELINE = 'needs_rebaseline'
+STATE_REBASELINE_FAILED = 'rebaseline_failed'
+STATE_REBASELINE_SUCCEEDED = 'rebaseline_succeeded'
+
+
+def _get_actual_result_files(test_file, test_config):
+    test_name, _ = os.path.splitext(test_file)
+    test_directory = os.path.dirname(test_file)
+
+    test_results_directory = test_config.filesystem.join(
+        test_config.results_directory, test_directory)
+    actual_pattern = os.path.basename(test_name) + '-actual.*'
+    actual_files = []
+    for filename in test_config.filesystem.listdir(test_results_directory):
+        if fnmatch.fnmatch(filename, actual_pattern):
+            actual_files.append(filename)
+    actual_files.sort()
+    return tuple(actual_files)
+
+
+def _rebaseline_test(test_file, baseline_target, baseline_move_to, test_config, log):
+    test_name, _ = os.path.splitext(test_file)
+    test_directory = os.path.dirname(test_name)
+
+    log('Rebaselining %s...' % test_name)
+
+    actual_result_files = _get_actual_result_files(test_file, test_config)
+    filesystem = test_config.filesystem
+    scm = test_config.scm
+    layout_tests_directory = test_config.layout_tests_directory
+    results_directory = test_config.results_directory
+    target_expectations_directory = filesystem.join(
+        layout_tests_directory, 'platform', baseline_target, test_directory)
+    test_results_directory = test_config.filesystem.join(
+        test_config.results_directory, test_directory)
+
+    # If requested, move current baselines out
+    current_baselines = get_test_baselines(test_file, test_config)
+    if baseline_target in current_baselines and baseline_move_to != 'none':
+        log('  Moving current %s baselines to %s' %
+            (baseline_target, baseline_move_to))
+
+        # See which ones we need to move (only those that are about to be
+        # updated), and make sure we're not clobbering any files in the
+        # destination.
+        current_extensions = set(current_baselines[baseline_target].keys())
+        actual_result_extensions = [
+            os.path.splitext(f)[1] for f in actual_result_files]
+        extensions_to_move = current_extensions.intersection(
+            actual_result_extensions)
+
+        if extensions_to_move.intersection(
+            current_baselines.get(baseline_move_to, {}).keys()):
+            log('    Already had baselines in %s, could not move existing '
+                '%s ones' % (baseline_move_to, baseline_target))
+            return False
+
+        # Do the actual move.
+        if extensions_to_move:
+            if not _move_test_baselines(
+                test_file,
+                list(extensions_to_move),
+                baseline_target,
+                baseline_move_to,
+                test_config,
+                log):
+                return False
+        else:
+            log('    No current baselines to move')
+
+    log('  Updating baselines for %s' % baseline_target)
+    filesystem.maybe_make_directory(target_expectations_directory)
+    for source_file in actual_result_files:
+        source_path = filesystem.join(test_results_directory, source_file)
+        destination_file = source_file.replace('-actual', '-expected')
+        destination_path = filesystem.join(
+            target_expectations_directory, destination_file)
+        filesystem.copyfile(source_path, destination_path)
+        exit_code = scm.add(destination_path, return_exit_code=True)
+        if exit_code:
+            log('    Could not update %s in SCM, exit code %d' %
+                (destination_file, exit_code))
+            return False
+        else:
+            log('    Updated %s' % destination_file)
+
+    return True
+
+
+def _move_test_baselines(test_file, extensions_to_move, source_platform, destination_platform, test_config, log):
+    test_file_name = os.path.splitext(os.path.basename(test_file))[0]
+    test_directory = os.path.dirname(test_file)
+    filesystem = test_config.filesystem
+
+    # Want predictable output order for unit tests.
+    extensions_to_move.sort()
+
+    source_directory = os.path.join(
+        test_config.layout_tests_directory,
+        'platform',
+        source_platform,
+        test_directory)
+    destination_directory = os.path.join(
+        test_config.layout_tests_directory,
+        'platform',
+        destination_platform,
+        test_directory)
+    filesystem.maybe_make_directory(destination_directory)
+
+    for extension in extensions_to_move:
+        file_name = test_file_name + '-expected' + extension
+        source_path = filesystem.join(source_directory, file_name)
+        destination_path = filesystem.join(destination_directory, file_name)
+        filesystem.copyfile(source_path, destination_path)
+        exit_code = test_config.scm.add(destination_path, return_exit_code=True)
+        if exit_code:
+            log('    Could not update %s in SCM, exit code %d' %
+                (file_name, exit_code))
+            return False
+        else:
+            log('    Moved %s' % file_name)
+
+    return True
+
+
+def get_test_baselines(test_file, test_config):
+    # FIXME: This seems like a hack. This only seems used to access the Port.expected_baselines logic.
+    class AllPlatformsPort(Port):
+        def __init__(self, host):
+            super(AllPlatformsPort, self).__init__(host, 'mac')
+            self._platforms_by_directory = dict([(self._webkit_baseline_path(p), p) for p in test_config.platforms])
+
+        def baseline_search_path(self):
+            return self._platforms_by_directory.keys()
+
+        def platform_from_directory(self, directory):
+            return self._platforms_by_directory[directory]
+
+    test_path = test_config.filesystem.join(test_config.layout_tests_directory, test_file)
+
+    # FIXME: This should get the Host from the test_config to be mockable!
+    host = Host()
+    host.initialize_scm()
+    host.filesystem = test_config.filesystem
+    all_platforms_port = AllPlatformsPort(host)
+
+    all_test_baselines = {}
+    for baseline_extension in ('.txt', '.checksum', '.png'):
+        test_baselines = test_config.test_port.expected_baselines(test_file, baseline_extension)
+        baselines = all_platforms_port.expected_baselines(test_file, baseline_extension, all_baselines=True)
+        for platform_directory, expected_filename in baselines:
+            if not platform_directory:
+                continue
+            if platform_directory == test_config.layout_tests_directory:
+                platform = 'base'
+            else:
+                platform = all_platforms_port.platform_from_directory(platform_directory)
+            platform_baselines = all_test_baselines.setdefault(platform, {})
+            was_used_for_test = (platform_directory, expected_filename) in test_baselines
+            platform_baselines[baseline_extension] = was_used_for_test
+
+    return all_test_baselines
+
+
+class RebaselineHTTPServer(BaseHTTPServer.HTTPServer):
+    def __init__(self, httpd_port, config):
+        server_name = ""
+        BaseHTTPServer.HTTPServer.__init__(self, (server_name, httpd_port), RebaselineHTTPRequestHandler)
+        self.test_config = config['test_config']
+        self.results_json = config['results_json']
+        self.platforms_json = config['platforms_json']
+
+
+class RebaselineHTTPRequestHandler(ReflectionHandler):
+    STATIC_FILE_NAMES = frozenset([
+        "index.html",
+        "loupe.js",
+        "main.js",
+        "main.css",
+        "queue.js",
+        "util.js",
+    ])
+
+    STATIC_FILE_DIRECTORY = os.path.join(os.path.dirname(__file__), "data", "rebaselineserver")
+
+    def results_json(self):
+        self._serve_json(self.server.results_json)
+
+    def test_config(self):
+        self._serve_json(self.server.test_config)
+
+    def platforms_json(self):
+        self._serve_json(self.server.platforms_json)
+
+    def rebaseline(self):
+        test = self.query['test'][0]
+        baseline_target = self.query['baseline-target'][0]
+        baseline_move_to = self.query['baseline-move-to'][0]
+        test_json = self.server.results_json['tests'][test]
+
+        if test_json['state'] != STATE_NEEDS_REBASELINE:
+            self.send_error(400, "Test %s is in unexpected state: %s" % (test, test_json["state"]))
+            return
+
+        log = []
+        success = _rebaseline_test(
+            test,
+            baseline_target,
+            baseline_move_to,
+            self.server.test_config,
+            log=lambda l: log.append(l))
+
+        if success:
+            test_json['state'] = STATE_REBASELINE_SUCCEEDED
+            self.send_response(200)
+        else:
+            test_json['state'] = STATE_REBASELINE_FAILED
+            self.send_response(500)
+
+        self.send_header('Content-type', 'text/plain')
+        self.end_headers()
+        self.wfile.write('\n'.join(log))
+
+    def test_result(self):
+        test_name, _ = os.path.splitext(self.query['test'][0])
+        mode = self.query['mode'][0]
+        if mode == 'expected-image':
+            file_name = test_name + '-expected.png'
+        elif mode == 'actual-image':
+            file_name = test_name + '-actual.png'
+        if mode == 'expected-checksum':
+            file_name = test_name + '-expected.checksum'
+        elif mode == 'actual-checksum':
+            file_name = test_name + '-actual.checksum'
+        elif mode == 'diff-image':
+            file_name = test_name + '-diff.png'
+        if mode == 'expected-text':
+            file_name = test_name + '-expected.txt'
+        elif mode == 'actual-text':
+            file_name = test_name + '-actual.txt'
+        elif mode == 'diff-text':
+            file_name = test_name + '-diff.txt'
+        elif mode == 'diff-text-pretty':
+            file_name = test_name + '-pretty-diff.html'
+
+        file_path = os.path.join(self.server.test_config.results_directory, file_name)
+
+        # Let results be cached for 60 seconds, so that they can be pre-fetched
+        # by the UI
+        self._serve_file(file_path, cacheable_seconds=60)
diff --git a/Tools/Scripts/webkitpy/tool/servers/rebaselineserver_unittest.py b/Tools/Scripts/webkitpy/tool/servers/rebaselineserver_unittest.py
new file mode 100644
index 0000000..f5c1cbf
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/servers/rebaselineserver_unittest.py
@@ -0,0 +1,311 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#    * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import json
+import unittest
+
+from webkitpy.common.net import resultsjsonparser_unittest
+from webkitpy.common.host_mock import MockHost
+from webkitpy.layout_tests.layout_package.json_results_generator import strip_json_wrapper
+from webkitpy.layout_tests.port.base import Port
+from webkitpy.tool.commands.rebaselineserver import TestConfig, RebaselineServer
+from webkitpy.tool.servers import rebaselineserver
+
+
+class RebaselineTestTest(unittest.TestCase):
+    def test_text_rebaseline_update(self):
+        self._assertRebaseline(
+            test_files=(
+                'fast/text-expected.txt',
+                'platform/mac/fast/text-expected.txt',
+            ),
+            results_files=(
+                'fast/text-actual.txt',
+            ),
+            test_name='fast/text.html',
+            baseline_target='mac',
+            baseline_move_to='none',
+            expected_success=True,
+            expected_log=[
+                'Rebaselining fast/text...',
+                '  Updating baselines for mac',
+                '    Updated text-expected.txt',
+            ])
+
+    def test_text_rebaseline_new(self):
+        self._assertRebaseline(
+            test_files=(
+                'fast/text-expected.txt',
+            ),
+            results_files=(
+                'fast/text-actual.txt',
+            ),
+            test_name='fast/text.html',
+            baseline_target='mac',
+            baseline_move_to='none',
+            expected_success=True,
+            expected_log=[
+                'Rebaselining fast/text...',
+                '  Updating baselines for mac',
+                '    Updated text-expected.txt',
+            ])
+
+    def test_text_rebaseline_move_no_op_1(self):
+        self._assertRebaseline(
+            test_files=(
+                'fast/text-expected.txt',
+                'platform/win/fast/text-expected.txt',
+            ),
+            results_files=(
+                'fast/text-actual.txt',
+            ),
+            test_name='fast/text.html',
+            baseline_target='mac',
+            baseline_move_to='mac-leopard',
+            expected_success=True,
+            expected_log=[
+                'Rebaselining fast/text...',
+                '  Updating baselines for mac',
+                '    Updated text-expected.txt',
+            ])
+
+    def test_text_rebaseline_move_no_op_2(self):
+        self._assertRebaseline(
+            test_files=(
+                'fast/text-expected.txt',
+                'platform/mac/fast/text-expected.checksum',
+            ),
+            results_files=(
+                'fast/text-actual.txt',
+            ),
+            test_name='fast/text.html',
+            baseline_target='mac',
+            baseline_move_to='mac-leopard',
+            expected_success=True,
+            expected_log=[
+                'Rebaselining fast/text...',
+                '  Moving current mac baselines to mac-leopard',
+                '    No current baselines to move',
+                '  Updating baselines for mac',
+                '    Updated text-expected.txt',
+            ])
+
+    def test_text_rebaseline_move(self):
+        self._assertRebaseline(
+            test_files=(
+                'fast/text-expected.txt',
+                'platform/mac/fast/text-expected.txt',
+            ),
+            results_files=(
+                'fast/text-actual.txt',
+            ),
+            test_name='fast/text.html',
+            baseline_target='mac',
+            baseline_move_to='mac-leopard',
+            expected_success=True,
+            expected_log=[
+                'Rebaselining fast/text...',
+                '  Moving current mac baselines to mac-leopard',
+                '    Moved text-expected.txt',
+                '  Updating baselines for mac',
+                '    Updated text-expected.txt',
+            ])
+
+    def test_text_rebaseline_move_only_images(self):
+        self._assertRebaseline(
+            test_files=(
+                'fast/image-expected.txt',
+                'platform/mac/fast/image-expected.txt',
+                'platform/mac/fast/image-expected.png',
+                'platform/mac/fast/image-expected.checksum',
+            ),
+            results_files=(
+                'fast/image-actual.png',
+                'fast/image-actual.checksum',
+            ),
+            test_name='fast/image.html',
+            baseline_target='mac',
+            baseline_move_to='mac-leopard',
+            expected_success=True,
+            expected_log=[
+                'Rebaselining fast/image...',
+                '  Moving current mac baselines to mac-leopard',
+                '    Moved image-expected.checksum',
+                '    Moved image-expected.png',
+                '  Updating baselines for mac',
+                '    Updated image-expected.checksum',
+                '    Updated image-expected.png',
+            ])
+
+    def test_text_rebaseline_move_already_exist(self):
+        self._assertRebaseline(
+            test_files=(
+                'fast/text-expected.txt',
+                'platform/mac-leopard/fast/text-expected.txt',
+                'platform/mac/fast/text-expected.txt',
+            ),
+            results_files=(
+                'fast/text-actual.txt',
+            ),
+            test_name='fast/text.html',
+            baseline_target='mac',
+            baseline_move_to='mac-leopard',
+            expected_success=False,
+            expected_log=[
+                'Rebaselining fast/text...',
+                '  Moving current mac baselines to mac-leopard',
+                '    Already had baselines in mac-leopard, could not move existing mac ones',
+            ])
+
+    def test_image_rebaseline(self):
+        self._assertRebaseline(
+            test_files=(
+                'fast/image-expected.txt',
+                'platform/mac/fast/image-expected.png',
+                'platform/mac/fast/image-expected.checksum',
+            ),
+            results_files=(
+                'fast/image-actual.png',
+                'fast/image-actual.checksum',
+            ),
+            test_name='fast/image.html',
+            baseline_target='mac',
+            baseline_move_to='none',
+            expected_success=True,
+            expected_log=[
+                'Rebaselining fast/image...',
+                '  Updating baselines for mac',
+                '    Updated image-expected.checksum',
+                '    Updated image-expected.png',
+            ])
+
+    def test_gather_baselines(self):
+        example_json = resultsjsonparser_unittest.ResultsJSONParserTest._example_full_results_json
+        results_json = json.loads(strip_json_wrapper(example_json))
+        server = RebaselineServer()
+        server._test_config = get_test_config()
+        server._gather_baselines(results_json)
+        self.assertEqual(results_json['tests']['svg/dynamic-updates/SVGFEDropShadowElement-dom-stdDeviation-attr.html']['state'], 'needs_rebaseline')
+        self.assertFalse('prototype-chocolate.html' in results_json['tests'])
+
+    def _assertRebaseline(self, test_files, results_files, test_name, baseline_target, baseline_move_to, expected_success, expected_log):
+        log = []
+        test_config = get_test_config(test_files, results_files)
+        success = rebaselineserver._rebaseline_test(
+            test_name,
+            baseline_target,
+            baseline_move_to,
+            test_config,
+            log=lambda l: log.append(l))
+        self.assertEqual(expected_log, log)
+        self.assertEqual(expected_success, success)
+
+
+class GetActualResultFilesTest(unittest.TestCase):
+    def test(self):
+        test_config = get_test_config(result_files=(
+            'fast/text-actual.txt',
+            'fast2/text-actual.txt',
+            'fast/text2-actual.txt',
+            'fast/text-notactual.txt',
+        ))
+        self.assertEqual(
+            ('text-actual.txt',),
+            rebaselineserver._get_actual_result_files(
+                'fast/text.html', test_config))
+
+
+class GetBaselinesTest(unittest.TestCase):
+    def test_no_baselines(self):
+        self._assertBaselines(
+            test_files=(),
+            test_name='fast/missing.html',
+            expected_baselines={})
+
+    def test_text_baselines(self):
+        self._assertBaselines(
+            test_files=(
+                'fast/text-expected.txt',
+                'platform/mac/fast/text-expected.txt',
+            ),
+            test_name='fast/text.html',
+            expected_baselines={
+                'mac': {'.txt': True},
+                'base': {'.txt': False},
+            })
+
+    def test_image_and_text_baselines(self):
+        self._assertBaselines(
+            test_files=(
+                'fast/image-expected.txt',
+                'platform/mac/fast/image-expected.png',
+                'platform/mac/fast/image-expected.checksum',
+                'platform/win/fast/image-expected.png',
+                'platform/win/fast/image-expected.checksum',
+            ),
+            test_name='fast/image.html',
+            expected_baselines={
+                'base': {'.txt': True},
+                'mac': {'.checksum': True, '.png': True},
+                'win': {'.checksum': False, '.png': False},
+            })
+
+    def test_extra_baselines(self):
+        self._assertBaselines(
+            test_files=(
+                'fast/text-expected.txt',
+                'platform/nosuchplatform/fast/text-expected.txt',
+            ),
+            test_name='fast/text.html',
+            expected_baselines={'base': {'.txt': True}})
+
+    def _assertBaselines(self, test_files, test_name, expected_baselines):
+        actual_baselines = rebaselineserver.get_test_baselines(test_name, get_test_config(test_files))
+        self.assertEqual(expected_baselines, actual_baselines)
+
+
+def get_test_config(test_files=[], result_files=[]):
+    # We could grab this from port.layout_tests_dir(), but instantiating a fully mocked port is a pain.
+    layout_tests_directory = "/mock-checkout/LayoutTests"
+    results_directory = '/WebKitBuild/Debug/layout-test-results'
+    host = MockHost()
+    for file in test_files:
+        host.filesystem.write_binary_file(host.filesystem.join(layout_tests_directory, file), '')
+    for file in result_files:
+        host.filesystem.write_binary_file(host.filesystem.join(results_directory, file), '')
+
+    class TestMacPort(Port):
+        port_name = "mac"
+
+    return TestConfig(
+        TestMacPort(host, 'mac'),
+        layout_tests_directory,
+        results_directory,
+        ('mac', 'mac-leopard', 'win', 'linux'),
+        host.filesystem,
+        host.scm())
diff --git a/Tools/Scripts/webkitpy/tool/servers/reflectionhandler.py b/Tools/Scripts/webkitpy/tool/servers/reflectionhandler.py
new file mode 100644
index 0000000..9308709
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/servers/reflectionhandler.py
@@ -0,0 +1,143 @@
+# Copyright (c) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import BaseHTTPServer
+
+import cgi
+import codecs
+import datetime
+import fnmatch
+import json
+import mimetypes
+import os.path
+import shutil
+import threading
+import time
+import urlparse
+import wsgiref.handlers
+import BaseHTTPServer
+
+
+class ReflectionHandler(BaseHTTPServer.BaseHTTPRequestHandler):
+    # Subclasses should override.
+    STATIC_FILE_NAMES = None
+    STATIC_FILE_DIRECTORY = None
+
+    # Setting this flag to True causes the server to send
+    #   Access-Control-Allow-Origin: *
+    # with every response.
+    allow_cross_origin_requests = False
+
+    def do_GET(self):
+        self._handle_request()
+
+    def do_POST(self):
+        self._handle_request()
+
+    def do_HEAD(self):
+        self._handle_request()
+
+    def read_entity_body(self):
+        length = int(self.headers.getheader('content-length'))
+        return self.rfile.read(length)
+
+    def _read_entity_body_as_json(self):
+        return json.loads(self.read_entity_body())
+
+    def _handle_request(self):
+        if "?" in self.path:
+            path, query_string = self.path.split("?", 1)
+            self.query = cgi.parse_qs(query_string)
+        else:
+            path = self.path
+            self.query = {}
+        function_or_file_name = path[1:] or "index.html"
+
+        if function_or_file_name in self.STATIC_FILE_NAMES:
+            self._serve_static_file(function_or_file_name)
+            return
+
+        function_name = function_or_file_name.replace(".", "_")
+        if not hasattr(self, function_name):
+            self.send_error(404, "Unknown function %s" % function_name)
+            return
+        if function_name[0] == "_":
+            self.send_error(401, "Not allowed to invoke private or protected methods")
+            return
+        function = getattr(self, function_name)
+        function()
+
+    def _serve_static_file(self, static_path):
+        self._serve_file(os.path.join(self.STATIC_FILE_DIRECTORY, static_path))
+
+    def quitquitquit(self):
+        self._serve_text("Server quit.\n")
+        # Shutdown has to happen on another thread from the server's thread,
+        # otherwise there's a deadlock
+        threading.Thread(target=lambda: self.server.shutdown()).start()
+
+    def _send_access_control_header(self):
+        if self.allow_cross_origin_requests:
+            self.send_header('Access-Control-Allow-Origin', '*')
+
+    def _serve_text(self, text):
+        self.send_response(200)
+        self._send_access_control_header()
+        self.send_header("Content-type", "text/plain")
+        self.end_headers()
+        self.wfile.write(text)
+
+    def _serve_json(self, json_object):
+        self.send_response(200)
+        self._send_access_control_header()
+        self.send_header('Content-type', 'application/json')
+        self.end_headers()
+        json.dump(json_object, self.wfile)
+
+    def _serve_file(self, file_path, cacheable_seconds=0, headers_only=False):
+        if not os.path.exists(file_path):
+            self.send_error(404, "File not found")
+            return
+        with codecs.open(file_path, "rb") as static_file:
+            self.send_response(200)
+            self._send_access_control_header()
+            self.send_header("Content-Length", os.path.getsize(file_path))
+            mime_type, encoding = mimetypes.guess_type(file_path)
+            if mime_type:
+                self.send_header("Content-type", mime_type)
+
+            if cacheable_seconds:
+                expires_time = (datetime.datetime.now() +
+                    datetime.timedelta(0, cacheable_seconds))
+                expires_formatted = wsgiref.handlers.format_date_time(
+                    time.mktime(expires_time.timetuple()))
+                self.send_header("Expires", expires_formatted)
+            self.end_headers()
+
+            if not headers_only:
+                shutil.copyfileobj(static_file, self.wfile)
diff --git a/Tools/Scripts/webkitpy/tool/steps/__init__.py b/Tools/Scripts/webkitpy/tool/steps/__init__.py
new file mode 100644
index 0000000..56429e8
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/steps/__init__.py
@@ -0,0 +1,65 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+# 
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+# 
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+# 
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# FIXME: Is this the right way to do this?
+from webkitpy.tool.steps.addsvnmimetypeforpng import AddSvnMimetypeForPng
+from webkitpy.tool.steps.applypatch import ApplyPatch
+from webkitpy.tool.steps.applypatchwithlocalcommit import ApplyPatchWithLocalCommit
+from webkitpy.tool.steps.applywatchlist import ApplyWatchList
+from webkitpy.tool.steps.attachtobug import AttachToBug
+from webkitpy.tool.steps.build import Build
+from webkitpy.tool.steps.checkstyle import CheckStyle
+from webkitpy.tool.steps.cleanworkingdirectory import CleanWorkingDirectory
+from webkitpy.tool.steps.cleanworkingdirectorywithlocalcommits import CleanWorkingDirectoryWithLocalCommits
+from webkitpy.tool.steps.closebug import CloseBug
+from webkitpy.tool.steps.closebugforlanddiff import CloseBugForLandDiff
+from webkitpy.tool.steps.closepatch import ClosePatch
+from webkitpy.tool.steps.commit import Commit
+from webkitpy.tool.steps.confirmdiff import ConfirmDiff
+from webkitpy.tool.steps.createbug import CreateBug
+from webkitpy.tool.steps.editchangelog import EditChangeLog
+from webkitpy.tool.steps.ensurebugisopenandassigned import EnsureBugIsOpenAndAssigned
+from webkitpy.tool.steps.ensurelocalcommitifneeded import EnsureLocalCommitIfNeeded
+from webkitpy.tool.steps.obsoletepatches import ObsoletePatches
+from webkitpy.tool.steps.options import Options
+from webkitpy.tool.steps.postdiff import PostDiff
+from webkitpy.tool.steps.postdiffforcommit import PostDiffForCommit
+from webkitpy.tool.steps.postdiffforrevert import PostDiffForRevert
+from webkitpy.tool.steps.preparechangelog import PrepareChangeLog
+from webkitpy.tool.steps.preparechangelogfordepsroll import PrepareChangeLogForDEPSRoll
+from webkitpy.tool.steps.preparechangelogforrevert import PrepareChangeLogForRevert
+from webkitpy.tool.steps.promptforbugortitle import PromptForBugOrTitle
+from webkitpy.tool.steps.reopenbugafterrollout import ReopenBugAfterRollout
+from webkitpy.tool.steps.revertrevision import RevertRevision
+from webkitpy.tool.steps.runtests import RunTests
+from webkitpy.tool.steps.suggestreviewers import SuggestReviewers
+from webkitpy.tool.steps.update import Update
+from webkitpy.tool.steps.updatechangelogswithreviewer import UpdateChangeLogsWithReviewer
+from webkitpy.tool.steps.updatechromiumdeps import UpdateChromiumDEPS
+from webkitpy.tool.steps.validatechangelogs import ValidateChangeLogs
+from webkitpy.tool.steps.validatereviewer import ValidateReviewer
diff --git a/Tools/Scripts/webkitpy/tool/steps/abstractstep.py b/Tools/Scripts/webkitpy/tool/steps/abstractstep.py
new file mode 100644
index 0000000..2a5fea6
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/steps/abstractstep.py
@@ -0,0 +1,78 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+# 
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+# 
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+# 
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import sys
+
+from webkitpy.common.system.executive import ScriptError
+from webkitpy.tool.steps.options import Options
+
+
+class AbstractStep(object):
+    def __init__(self, tool, options):
+        self._tool = tool
+        self._options = options
+
+    def _exit(self, code):
+        sys.exit(code)
+
+    def _changed_files(self, state):
+        return self.cached_lookup(state, "changed_files")
+
+    _well_known_keys = {
+        # FIXME: Should this use state.get('bug_id') or state.get('patch').bug_id() like UpdateChangeLogsWithReviewer does?
+        "bug": lambda self, state: self._tool.bugs.fetch_bug(state["bug_id"]),
+        # bug_title can either be a new title given by the user, or one from an existing bug.
+        "bug_title": lambda self, state: self.cached_lookup(state, 'bug').title(),
+        "changed_files": lambda self, state: self._tool.scm().changed_files(self._options.git_commit),
+        "diff": lambda self, state: self._tool.scm().create_patch(self._options.git_commit, changed_files=self._changed_files(state)),
+        # Absolute path to ChangeLog files.
+        "changelogs": lambda self, state: self._tool.checkout().modified_changelogs(self._options.git_commit, changed_files=self._changed_files(state)),
+    }
+
+    def cached_lookup(self, state, key, promise=None):
+        if state.get(key):
+            return state[key]
+        if not promise:
+            promise = self._well_known_keys.get(key)
+        state[key] = promise(self, state)
+        return state[key]
+
+    def did_modify_checkout(self, state):
+        state["diff"] = None
+        state["changelogs"] = None
+        state["changed_files"] = None
+
+    @classmethod
+    def options(cls):
+        return [
+            # We need this option here because cached_lookup uses it.  :(
+            Options.git_commit,
+        ]
+
+    def run(self, state):
+        raise NotImplementedError, "subclasses must implement"
diff --git a/Tools/Scripts/webkitpy/tool/steps/addsvnmimetypeforpng.py b/Tools/Scripts/webkitpy/tool/steps/addsvnmimetypeforpng.py
new file mode 100644
index 0000000..73bec15
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/steps/addsvnmimetypeforpng.py
@@ -0,0 +1,77 @@
+# Copyright (C) 2012 Balazs Ankes (bank@inf.u-szeged.hu) University of Szeged
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1.  Redistributions of source code must retain the above copyright
+#     notice, this list of conditions and the following disclaimer.
+# 2.  Redistributions in binary form must reproduce the above copyright
+#     notice, this list of conditions and the following disclaimer in the
+#     documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from webkitpy.tool.steps.abstractstep import AbstractStep
+from webkitpy.common import checksvnconfigfile
+from webkitpy.common.system.deprecated_logging import log
+from webkitpy.common.checkout.scm.detection import SCMDetector
+from webkitpy.common.system.systemhost import SystemHost
+
+
+class AddSvnMimetypeForPng(AbstractStep):
+    def __init__(self, tool, options, host=None, scm=None):
+        self._tool = tool
+        self._options = options
+        self._host = host or SystemHost()
+        self._fs = self._host.filesystem
+        self._detector = scm or SCMDetector(self._fs, self._host.executive).detect_scm_system(self._fs.getcwd())
+
+    def run(self, state):
+        png_files = self._check_pngs(self._changed_files(state))
+
+        if png_files:
+            detection = self._detector.display_name()
+
+            if detection == "git":
+                (file_missing, autoprop_missing, png_missing) = checksvnconfigfile.check(self._host, self._fs)
+                config_file_path = checksvnconfigfile.config_file_path(self._host, self._fs)
+
+                if file_missing:
+                    log("There is no SVN config file. The svn:mime-type of pngs won't set.")
+                    if not self._tool.user.confirm("Are you sure you want to continue?", default="n"):
+                        self._exit(1)
+                elif autoprop_missing and png_missing:
+                    log(checksvnconfigfile.errorstr_autoprop(config_file_path) + checksvnconfigfile.errorstr_png(config_file_path))
+                    if not self._tool.user.confirm("Do you want to continue?", default="n"):
+                        self._exit(1)
+                elif autoprop_missing:
+                    log(checksvnconfigfile.errorstr_autoprop(config_file_path))
+                    if not self._tool.user.confirm("Do you want to continue?", default="n"):
+                        self._exit(1)
+                elif png_missing:
+                    log(checksvnconfigfile.errorstr_png(config_file_path))
+                    if not self._tool.user.confirm("Do you want to continue?", default="n"):
+                        self._exit(1)
+
+            elif detection == "svn":
+                for filename in png_files:
+                    if self._detector.exists(filename) and self._detector.propget('svn:mime-type', filename) != 'image/png':
+                        print "Adding image/png mime-type to %s" % filename
+                        self._detector.propset('svn:mime-type', 'image/png', filename)
+
+    def _check_pngs(self, changed_files):
+        png_files = []
+        for filename in changed_files:
+            if filename.endswith('.png'):
+                png_files.append(filename)
+        return png_files
diff --git a/Tools/Scripts/webkitpy/tool/steps/addsvnmimetypeforpng_unittest.py b/Tools/Scripts/webkitpy/tool/steps/addsvnmimetypeforpng_unittest.py
new file mode 100644
index 0000000..221c6bc
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/steps/addsvnmimetypeforpng_unittest.py
@@ -0,0 +1,58 @@
+# Copyright (C) 2012 Balazs Ankes (bank@inf.u-szeged.hu) University of Szeged
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1.  Redistributions of source code must retain the above copyright
+#     notice, this list of conditions and the following disclaimer.
+# 2.  Redistributions in binary form must reproduce the above copyright
+#     notice, this list of conditions and the following disclaimer in the
+#     documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from webkitpy.tool.steps.addsvnmimetypeforpng import AddSvnMimetypeForPng
+from webkitpy.common.system.filesystem_mock import MockFileSystem
+from webkitpy.tool.mocktool import MockOptions, MockTool
+from webkitpy.common.system.systemhost_mock import MockSystemHost
+from webkitpy.common.system.outputcapture import OutputCapture
+
+
+class MockSCMDetector(object):
+
+    def __init__(self, scm):
+        self._scm = scm
+
+    def display_name(self):
+        return self._scm
+
+
+class AddSvnMimetypeForPngTest(unittest.TestCase):
+    def test_run(self):
+        capture = OutputCapture()
+        options = MockOptions(git_commit='MOCK git commit')
+
+        files = {'/Users/mock/.subversion/config': 'enable-auto-props = yes\n*.png = svn:mime-type=image/png'}
+        fs = MockFileSystem(files)
+        scm = MockSCMDetector('git')
+
+        step = AddSvnMimetypeForPng(MockTool(), options, MockSystemHost(os_name='linux', filesystem=fs), scm)
+        state = {
+            "changed_files": ["test.png", "test.txt"],
+        }
+        try:
+            capture.assert_outputs(self, step.run, [state])
+        except SystemExit, e:
+            self.assertEquals(e.code, 1)
diff --git a/Tools/Scripts/webkitpy/tool/steps/applypatch.py b/Tools/Scripts/webkitpy/tool/steps/applypatch.py
new file mode 100644
index 0000000..5c36169
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/steps/applypatch.py
@@ -0,0 +1,42 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+# 
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+# 
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+# 
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from webkitpy.tool.steps.abstractstep import AbstractStep
+from webkitpy.tool.steps.options import Options
+from webkitpy.common.system.deprecated_logging import log
+
+class ApplyPatch(AbstractStep):
+    @classmethod
+    def options(cls):
+        return AbstractStep.options() + [
+            Options.non_interactive,
+        ]
+
+    def run(self, state):
+        log("Processing patch %s from bug %s." % (state["patch"].id(), state["patch"].bug_id()))
+        self._tool.checkout().apply_patch(state["patch"])
diff --git a/Tools/Scripts/webkitpy/tool/steps/applypatchwithlocalcommit.py b/Tools/Scripts/webkitpy/tool/steps/applypatchwithlocalcommit.py
new file mode 100644
index 0000000..3dcd8d9
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/steps/applypatchwithlocalcommit.py
@@ -0,0 +1,43 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+# 
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+# 
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+# 
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from webkitpy.tool.steps.applypatch import ApplyPatch
+from webkitpy.tool.steps.options import Options
+
+class ApplyPatchWithLocalCommit(ApplyPatch):
+    @classmethod
+    def options(cls):
+        return ApplyPatch.options() + [
+            Options.local_commit,
+        ]
+
+    def run(self, state):
+        ApplyPatch.run(self, state)
+        if self._options.local_commit:
+            commit_message = self._tool.checkout().commit_message_for_this_commit(git_commit=None)
+            self._tool.scm().commit_locally_with_message(commit_message.message() or state["patch"].name())
diff --git a/Tools/Scripts/webkitpy/tool/steps/applywatchlist.py b/Tools/Scripts/webkitpy/tool/steps/applywatchlist.py
new file mode 100644
index 0000000..a4bb891
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/steps/applywatchlist.py
@@ -0,0 +1,67 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from webkitpy.common.system import logutils
+from webkitpy.tool.steps.abstractstep import AbstractStep
+from webkitpy.tool.steps.options import Options
+
+
+_log = logutils.get_logger(__file__)
+
+
+class ApplyWatchList(AbstractStep):
+    @classmethod
+    def options(cls):
+        return AbstractStep.options() + [
+            Options.git_commit,
+        ]
+
+    def run(self, state):
+        diff = self.cached_lookup(state, 'diff')
+        bug_id = state.get('bug_id')
+
+        cc_and_messages = self._tool.watch_list().determine_cc_and_messages(diff)
+        cc_emails = cc_and_messages['cc_list']
+        messages = cc_and_messages['messages']
+        if bug_id:
+            # Remove emails and cc's which are already in the bug or the reporter.
+            bug = self._tool.bugs.fetch_bug(bug_id)
+
+            messages = filter(lambda message: not bug.is_in_comments(message), messages)
+            cc_emails = set(cc_emails).difference(bug.cc_emails())
+            cc_emails.discard(bug.reporter_email())
+
+        comment_text = '\n\n'.join(messages)
+        if bug_id:
+            if cc_emails or comment_text:
+                self._tool.bugs.post_comment_to_bug(bug_id, comment_text, cc_emails)
+            log_result = _log.debug
+        else:
+            _log.info('No bug was updated because no id was given.')
+            log_result = _log.info
+        log_result('Result of watchlist: cc "%s" messages "%s"' % (', '.join(cc_emails), comment_text))
diff --git a/Tools/Scripts/webkitpy/tool/steps/applywatchlist_unittest.py b/Tools/Scripts/webkitpy/tool/steps/applywatchlist_unittest.py
new file mode 100644
index 0000000..bdaaf75
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/steps/applywatchlist_unittest.py
@@ -0,0 +1,51 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#    * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from webkitpy.common.system.outputcapture import OutputCapture
+from webkitpy.tool.mocktool import MockOptions, MockTool
+from webkitpy.tool.steps.applywatchlist import ApplyWatchList
+
+
+class ApplyWatchListTest(unittest.TestCase):
+    def test_apply_watch_list_local(self):
+        capture = OutputCapture()
+        step = ApplyWatchList(MockTool(log_executive=True), MockOptions())
+        state = {
+            'bug_id': '50001',
+            'diff': 'The diff',
+        }
+        expected_stderr = """MockWatchList: determine_cc_and_messages
+MOCK bug comment: bug_id=50001, cc=set(['levin@chromium.org'])
+--- Begin comment ---
+Message2.
+--- End comment ---
+
+"""
+        capture.assert_outputs(self, step.run, [state], expected_stderr=expected_stderr)
diff --git a/Tools/Scripts/webkitpy/tool/steps/attachtobug.py b/Tools/Scripts/webkitpy/tool/steps/attachtobug.py
new file mode 100644
index 0000000..a389e65
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/steps/attachtobug.py
@@ -0,0 +1,49 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from webkitpy.tool.steps.abstractstep import AbstractStep
+from webkitpy.tool.steps.options import Options
+
+
+class AttachToBug(AbstractStep):
+    @classmethod
+    def options(cls):
+        return AbstractStep.options() + [
+            Options.comment,
+            Options.description,
+        ]
+
+    def run(self, state):
+        filepath = state["filepath"]
+        bug_id = state["bug_id"]
+        description = self._options.description or self._tool.filesystem.basename(filepath)
+        comment_text = self._options.comment
+
+        # add_attachment_to_bug fills in the filename from the file path.
+        filename = None
+        self._tool.bugs.add_attachment_to_bug(bug_id, filepath, description, filename, comment_text)
diff --git a/Tools/Scripts/webkitpy/tool/steps/build.py b/Tools/Scripts/webkitpy/tool/steps/build.py
new file mode 100644
index 0000000..7f7dd9f
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/steps/build.py
@@ -0,0 +1,60 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+# 
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+# 
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+# 
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from webkitpy.tool.steps.abstractstep import AbstractStep
+from webkitpy.tool.steps.options import Options
+from webkitpy.common.system.deprecated_logging import log
+
+
+class Build(AbstractStep):
+    @classmethod
+    def options(cls):
+        return AbstractStep.options() + [
+            Options.build,
+            Options.quiet,
+            Options.build_style,
+        ]
+
+    def build(self, build_style):
+        environment = self._tool.copy_current_environment()
+        environment.disable_gcc_smartquotes()
+        env = environment.to_dictionary()
+
+        build_webkit_command = self._tool.port().build_webkit_command(build_style=build_style)
+        self._tool.executive.run_and_throw_if_fail(build_webkit_command, self._options.quiet,
+            cwd=self._tool.scm().checkout_root, env=env)
+
+    def run(self, state):
+        if not self._options.build:
+            return
+        log("Building WebKit")
+        if self._options.build_style == "both":
+            self.build("debug")
+            self.build("release")
+        else:
+            self.build(self._options.build_style)
diff --git a/Tools/Scripts/webkitpy/tool/steps/checkstyle.py b/Tools/Scripts/webkitpy/tool/steps/checkstyle.py
new file mode 100644
index 0000000..3304f01
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/steps/checkstyle.py
@@ -0,0 +1,68 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+# 
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+# 
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+# 
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from webkitpy.common.system.executive import ScriptError
+from webkitpy.tool.steps.abstractstep import AbstractStep
+from webkitpy.tool.steps.options import Options
+from webkitpy.common.system.deprecated_logging import error
+
+class CheckStyle(AbstractStep):
+    @classmethod
+    def options(cls):
+        return AbstractStep.options() + [
+            Options.non_interactive,
+            Options.check_style,
+            Options.check_style_filter,
+            Options.git_commit,
+        ]
+
+    def run(self, state):
+        if not self._options.check_style:
+            return
+
+        args = []
+        if self._options.git_commit:
+            args.append("--git-commit")
+            args.append(self._options.git_commit)
+
+        args.append("--diff-files")
+        args.extend(self._changed_files(state))
+
+        if self._options.check_style_filter:
+            args.append("--filter")
+            args.append(self._options.check_style_filter)
+
+        try:
+            self._tool.executive.run_and_throw_if_fail(self._tool.port().check_webkit_style_command() + args, cwd=self._tool.scm().checkout_root)
+        except ScriptError, e:
+            if self._options.non_interactive:
+                # We need to re-raise the exception here to have the
+                # style-queue do the right thing.
+                raise e
+            if not self._tool.user.confirm("Are you sure you want to continue?"):
+                self._exit(1)
diff --git a/Tools/Scripts/webkitpy/tool/steps/cleanworkingdirectory.py b/Tools/Scripts/webkitpy/tool/steps/cleanworkingdirectory.py
new file mode 100644
index 0000000..1913524
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/steps/cleanworkingdirectory.py
@@ -0,0 +1,50 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+# 
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+# 
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+# 
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from webkitpy.tool.steps.abstractstep import AbstractStep
+from webkitpy.tool.steps.options import Options
+
+
+class CleanWorkingDirectory(AbstractStep):
+    def __init__(self, tool, options, allow_local_commits=False):
+        AbstractStep.__init__(self, tool, options)
+        self._allow_local_commits = allow_local_commits
+
+    @classmethod
+    def options(cls):
+        return AbstractStep.options() + [
+            Options.force_clean,
+            Options.clean,
+        ]
+
+    def run(self, state):
+        if not self._options.clean:
+            return
+        if not self._allow_local_commits:
+            self._tool.scm().ensure_no_local_commits(self._options.force_clean)
+        self._tool.scm().ensure_clean_working_directory(force_clean=self._options.force_clean)
diff --git a/Tools/Scripts/webkitpy/tool/steps/cleanworkingdirectory_unittest.py b/Tools/Scripts/webkitpy/tool/steps/cleanworkingdirectory_unittest.py
new file mode 100644
index 0000000..15a8850
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/steps/cleanworkingdirectory_unittest.py
@@ -0,0 +1,52 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from webkitpy.thirdparty.mock import Mock
+from webkitpy.tool.mocktool import MockOptions, MockTool
+from webkitpy.tool.steps.cleanworkingdirectory import CleanWorkingDirectory
+
+
+class CleanWorkingDirectoryTest(unittest.TestCase):
+    def test_run(self):
+        tool = MockTool()
+        tool._scm = Mock()
+        tool._scm.checkout_root = '/mock-checkout'
+        step = CleanWorkingDirectory(tool, MockOptions(clean=True, force_clean=False))
+        step.run({})
+        self.assertEqual(tool._scm.ensure_no_local_commits.call_count, 1)
+        self.assertEqual(tool._scm.ensure_clean_working_directory.call_count, 1)
+
+    def test_no_clean(self):
+        tool = MockTool()
+        tool._scm = Mock()
+        step = CleanWorkingDirectory(tool, MockOptions(clean=False))
+        step.run({})
+        self.assertEqual(tool._scm.ensure_no_local_commits.call_count, 0)
+        self.assertEqual(tool._scm.ensure_clean_working_directory.call_count, 0)
diff --git a/Tools/Scripts/webkitpy/tool/steps/cleanworkingdirectorywithlocalcommits.py b/Tools/Scripts/webkitpy/tool/steps/cleanworkingdirectorywithlocalcommits.py
new file mode 100644
index 0000000..f06f94e
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/steps/cleanworkingdirectorywithlocalcommits.py
@@ -0,0 +1,34 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+# 
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+# 
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+# 
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from webkitpy.tool.steps.cleanworkingdirectory import CleanWorkingDirectory
+
+class CleanWorkingDirectoryWithLocalCommits(CleanWorkingDirectory):
+    def __init__(self, tool, options):
+        # FIXME: This a bit of a hack.  Consider doing this more cleanly.
+        CleanWorkingDirectory.__init__(self, tool, options, allow_local_commits=True)
diff --git a/Tools/Scripts/webkitpy/tool/steps/closebug.py b/Tools/Scripts/webkitpy/tool/steps/closebug.py
new file mode 100644
index 0000000..b33e373
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/steps/closebug.py
@@ -0,0 +1,53 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+# 
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+# 
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+# 
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from webkitpy.tool.steps.abstractstep import AbstractStep
+from webkitpy.tool.steps.options import Options
+from webkitpy.common.system.deprecated_logging import log
+
+
+class CloseBug(AbstractStep):
+    @classmethod
+    def options(cls):
+        return AbstractStep.options() + [
+            Options.close_bug,
+        ]
+
+    def run(self, state):
+        if not self._options.close_bug:
+            return
+        # Check to make sure there are no r? or r+ patches on the bug before closing.
+        # Assume that r- patches are just previous patches someone forgot to obsolete.
+        # FIXME: Should this use self.cached_lookup('bug')?  It's unclear if
+        # state["patch"].bug_id() always equals state['bug_id'].
+        patches = self._tool.bugs.fetch_bug(state["patch"].bug_id()).patches()
+        for patch in patches:
+            if patch.review() == "?" or patch.review() == "+":
+                log("Not closing bug %s as attachment %s has review=%s.  Assuming there are more patches to land from this bug." % (patch.bug_id(), patch.id(), patch.review()))
+                return
+        self._tool.bugs.close_bug_as_fixed(state["patch"].bug_id(), "All reviewed patches have been landed.  Closing bug.")
diff --git a/Tools/Scripts/webkitpy/tool/steps/closebugforlanddiff.py b/Tools/Scripts/webkitpy/tool/steps/closebugforlanddiff.py
new file mode 100644
index 0000000..e5a68db
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/steps/closebugforlanddiff.py
@@ -0,0 +1,58 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+# 
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+# 
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+# 
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from webkitpy.tool.comments import bug_comment_from_commit_text
+from webkitpy.tool.steps.abstractstep import AbstractStep
+from webkitpy.tool.steps.options import Options
+from webkitpy.common.system.deprecated_logging import log
+
+
+class CloseBugForLandDiff(AbstractStep):
+    @classmethod
+    def options(cls):
+        return AbstractStep.options() + [
+            Options.close_bug,
+        ]
+
+    def run(self, state):
+        comment_text = bug_comment_from_commit_text(self._tool.scm(), state["commit_text"])
+        bug_id = state.get("bug_id")
+        if not bug_id and state.get("patch"):
+            bug_id = state.get("patch").bug_id()
+
+        if bug_id:
+            log("Updating bug %s" % bug_id)
+            if self._options.close_bug:
+                self._tool.bugs.close_bug_as_fixed(bug_id, comment_text)
+            else:
+                # FIXME: We should a smart way to figure out if the patch is attached
+                # to the bug, and if so obsolete it.
+                self._tool.bugs.post_comment_to_bug(bug_id, comment_text)
+        else:
+            log(comment_text)
+            log("No bug id provided.")
diff --git a/Tools/Scripts/webkitpy/tool/steps/closebugforlanddiff_unittest.py b/Tools/Scripts/webkitpy/tool/steps/closebugforlanddiff_unittest.py
new file mode 100644
index 0000000..0a56564
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/steps/closebugforlanddiff_unittest.py
@@ -0,0 +1,40 @@
+# Copyright (C) 2009 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#    * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from webkitpy.common.system.outputcapture import OutputCapture
+from webkitpy.tool.mocktool import MockOptions, MockTool
+from webkitpy.tool.steps.closebugforlanddiff import CloseBugForLandDiff
+
+class CloseBugForLandDiffTest(unittest.TestCase):
+    def test_empty_state(self):
+        capture = OutputCapture()
+        step = CloseBugForLandDiff(MockTool(), MockOptions())
+        expected_stderr = "Committed r49824: <http://trac.webkit.org/changeset/49824>\nNo bug id provided.\n"
+        capture.assert_outputs(self, step.run, [{"commit_text" : "Mock commit text"}], expected_stderr=expected_stderr)
diff --git a/Tools/Scripts/webkitpy/tool/steps/closepatch.py b/Tools/Scripts/webkitpy/tool/steps/closepatch.py
new file mode 100644
index 0000000..ff94df8
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/steps/closepatch.py
@@ -0,0 +1,36 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+# 
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+# 
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+# 
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from webkitpy.tool.comments import bug_comment_from_commit_text
+from webkitpy.tool.steps.abstractstep import AbstractStep
+
+
+class ClosePatch(AbstractStep):
+    def run(self, state):
+        comment_text = bug_comment_from_commit_text(self._tool.scm(), state["commit_text"])
+        self._tool.bugs.clear_attachment_flags(state["patch"].id(), comment_text)
diff --git a/Tools/Scripts/webkitpy/tool/steps/commit.py b/Tools/Scripts/webkitpy/tool/steps/commit.py
new file mode 100644
index 0000000..0e5ca91
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/steps/commit.py
@@ -0,0 +1,110 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+# 
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+# 
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+# 
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import sys
+
+from webkitpy.common.checkout.scm import AuthenticationError, AmbiguousCommitError
+from webkitpy.common.config import urls
+from webkitpy.common.system.deprecated_logging import log
+from webkitpy.common.system.executive import ScriptError
+from webkitpy.common.system.user import User
+from webkitpy.tool.steps.abstractstep import AbstractStep
+from webkitpy.tool.steps.options import Options
+
+
+class Commit(AbstractStep):
+    # FIXME: This option exists only to make sure we don't break scripts which include --ignore-builders
+    # You can safely delete this option any time after 11/01/11.
+    @classmethod
+    def options(cls):
+        return AbstractStep.options() + [
+            Options.check_builders,
+            Options.non_interactive,
+        ]
+
+    def _commit_warning(self, error):
+        working_directory_message = "" if error.working_directory_is_clean else " and working copy changes"
+        return ('There are %s local commits%s. Everything will be committed as a single commit. '
+                'To avoid this prompt, set "git config webkit-patch.commit-should-always-squash true".' % (
+                error.num_local_commits, working_directory_message))
+
+    def _check_test_expectations(self, changed_files):
+        test_expectations_files = [filename for filename in changed_files if filename.endswith('TestExpectations')]
+        if not test_expectations_files:
+            return
+
+        args = ["--diff-files"]
+        args.extend(test_expectations_files)
+        try:
+            self._tool.executive.run_and_throw_if_fail(self._tool.port().check_webkit_style_command() + args, cwd=self._tool.scm().checkout_root)
+        except ScriptError, e:
+            if self._options.non_interactive:
+                raise
+            if not self._tool.user.confirm("Are you sure you want to continue?", default="n"):
+                self._exit(1)
+
+    def run(self, state):
+        self._commit_message = self._tool.checkout().commit_message_for_this_commit(self._options.git_commit).message()
+        if len(self._commit_message) < 10:
+            raise Exception("Attempted to commit with a commit message shorter than 10 characters.  Either your patch is missing a ChangeLog or webkit-patch may have a bug.")
+
+        self._check_test_expectations(self._changed_files(state))
+
+        self._state = state
+
+        username = None
+        password = None
+        force_squash = False
+
+        num_tries = 0
+        while num_tries < 3:
+            num_tries += 1
+
+            try:
+                scm = self._tool.scm()
+                commit_text = scm.commit_with_message(self._commit_message, git_commit=self._options.git_commit, username=username, password=password, force_squash=force_squash, changed_files=self._changed_files(state))
+                svn_revision = scm.svn_revision_from_commit_text(commit_text)
+                log("Committed r%s: <%s>" % (svn_revision, urls.view_revision_url(svn_revision)))
+                self._state["commit_text"] = commit_text
+                break;
+            except AmbiguousCommitError, e:
+                if self._options.non_interactive or self._tool.user.confirm(self._commit_warning(e)):
+                    force_squash = True
+                else:
+                    # This will correctly interrupt the rest of the commit process.
+                    raise ScriptError(message="Did not commit")
+            except AuthenticationError, e:
+                if self._options.non_interactive:
+                    raise ScriptError(message="Authentication required")
+                username = self._tool.user.prompt("%s login: " % e.server_host, repeat=5)
+                if not username:
+                    raise ScriptError("You need to specify the username on %s to perform the commit as." % e.server_host)
+                if e.prompt_for_password:
+                    password = self._tool.user.prompt_password("%s password for %s: " % (e.server_host, username), repeat=5)
+                    if not password:
+                        raise ScriptError("You need to specify the password for %s on %s to perform the commit." % (username, e.server_host))
diff --git a/Tools/Scripts/webkitpy/tool/steps/commit_unittest.py b/Tools/Scripts/webkitpy/tool/steps/commit_unittest.py
new file mode 100644
index 0000000..25d9b61
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/steps/commit_unittest.py
@@ -0,0 +1,65 @@
+# Copyright (C) 2012 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#    * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from webkitpy.common.system.outputcapture import OutputCapture
+from webkitpy.common.system.executive import ScriptError
+from webkitpy.common.system.executive_mock import MockExecutive
+from webkitpy.tool.mocktool import MockOptions, MockTool
+from webkitpy.tool.steps.commit import Commit
+
+
+class CommitTest(unittest.TestCase):
+    def _test_check_test_expectations(self, filename):
+        capture = OutputCapture()
+        options = MockOptions()
+        options.git_commit = ""
+        options.non_interactive = True
+
+        tool = MockTool()
+        tool.user = None  # Will cause any access of tool.user to raise an exception.
+        step = Commit(tool, options)
+        state = {
+            "changed_files": [filename + "XXX"],
+        }
+
+        tool.executive = MockExecutive(should_log=True, should_throw_when_run=False)
+        capture.assert_outputs(self, step.run, [state], expected_stderr="Committed r49824: <http://trac.webkit.org/changeset/49824>\n")
+
+        state = {
+            "changed_files": ["platform/chromium/" + filename],
+        }
+        capture.assert_outputs(self, step.run, [state], expected_stderr="MOCK run_and_throw_if_fail: ['mock-check-webkit-style', '--diff-files', 'platform/chromium/"
+            + filename + "'], cwd=/mock-checkout\nCommitted r49824: <http://trac.webkit.org/changeset/49824>\n")
+
+        tool.executive = MockExecutive(should_log=True, should_throw_when_run=set(["platform/chromium/" + filename]))
+        self.assertRaises(ScriptError, capture.assert_outputs, self, step.run, [state])
+
+    def test_check_test_expectations(self):
+        self._test_check_test_expectations('TestExpectations')
diff --git a/Tools/Scripts/webkitpy/tool/steps/confirmdiff.py b/Tools/Scripts/webkitpy/tool/steps/confirmdiff.py
new file mode 100644
index 0000000..86c8a2c
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/steps/confirmdiff.py
@@ -0,0 +1,77 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+# 
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+# 
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+# 
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import urllib
+
+from webkitpy.tool.steps.abstractstep import AbstractStep
+from webkitpy.tool.steps.options import Options
+from webkitpy.common.prettypatch import PrettyPatch
+from webkitpy.common.system import logutils
+from webkitpy.common.system.executive import ScriptError
+
+
+_log = logutils.get_logger(__file__)
+
+
+class ConfirmDiff(AbstractStep):
+    @classmethod
+    def options(cls):
+        return AbstractStep.options() + [
+            Options.confirm,
+        ]
+
+    def _show_pretty_diff(self, diff):
+        if not self._tool.user.can_open_url():
+            return None
+
+        try:
+            pretty_patch = PrettyPatch(self._tool.executive,
+                                       self._tool.scm().checkout_root)
+            pretty_diff_file = pretty_patch.pretty_diff_file(diff)
+            url = "file://%s" % urllib.quote(pretty_diff_file.name)
+            self._tool.user.open_url(url)
+            # We return the pretty_diff_file here because we need to keep the
+            # file alive until the user has had a chance to confirm the diff.
+            return pretty_diff_file
+        except ScriptError, e:
+            _log.warning("PrettyPatch failed.  :(")
+        except OSError, e:
+            _log.warning("PrettyPatch unavailable.")
+
+    def run(self, state):
+        if not self._options.confirm:
+            return
+        diff = self.cached_lookup(state, "diff")
+        pretty_diff_file = self._show_pretty_diff(diff)
+        if not pretty_diff_file:
+            self._tool.user.page(diff)
+        diff_correct = self._tool.user.confirm("Was that diff correct?")
+        if pretty_diff_file:
+            pretty_diff_file.close()
+        if not diff_correct:
+            self._exit(1)
diff --git a/Tools/Scripts/webkitpy/tool/steps/createbug.py b/Tools/Scripts/webkitpy/tool/steps/createbug.py
new file mode 100644
index 0000000..7e4a835
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/steps/createbug.py
@@ -0,0 +1,56 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from webkitpy.tool.steps.abstractstep import AbstractStep
+from webkitpy.tool.steps.options import Options
+
+
+class CreateBug(AbstractStep):
+    @classmethod
+    def options(cls):
+        return AbstractStep.options() + [
+            Options.cc,
+            Options.component,
+            Options.blocks,
+        ]
+
+    def run(self, state):
+        # No need to create a bug if we already have one.
+        if state.get("bug_id"):
+            return
+        cc = self._options.cc
+        if not cc:
+            cc = state.get("bug_cc")
+        blocks = self._options.blocks
+        if not blocks:
+            blocks = state.get("bug_blocked")
+        state["bug_id"] = self._tool.bugs.create_bug(state["bug_title"], state["bug_description"], blocked=blocks, component=self._options.component, cc=cc)
+        if blocks:
+            status = self._tool.bugs.fetch_bug(blocks).status()
+            if status == 'RESOLVED':
+                self._tool.bugs.reopen_bug(blocks, "Re-opened since this is blocked by bug %s" % state["bug_id"])
diff --git a/Tools/Scripts/webkitpy/tool/steps/editchangelog.py b/Tools/Scripts/webkitpy/tool/steps/editchangelog.py
new file mode 100644
index 0000000..35cd504
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/steps/editchangelog.py
@@ -0,0 +1,36 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+# 
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+# 
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+# 
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from webkitpy.tool.steps.abstractstep import AbstractStep
+
+
+class EditChangeLog(AbstractStep):
+    def run(self, state):
+        absolute_paths = map(self._tool.scm().absolute_path, self.cached_lookup(state, "changelogs"))
+        self._tool.user.edit_changelog(absolute_paths)
+        self.did_modify_checkout(state)
diff --git a/Tools/Scripts/webkitpy/tool/steps/ensurebugisopenandassigned.py b/Tools/Scripts/webkitpy/tool/steps/ensurebugisopenandassigned.py
new file mode 100644
index 0000000..54f90b6
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/steps/ensurebugisopenandassigned.py
@@ -0,0 +1,41 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from webkitpy.tool.steps.abstractstep import AbstractStep
+
+
+class EnsureBugIsOpenAndAssigned(AbstractStep):
+    def run(self, state):
+        bug = self.cached_lookup(state, "bug")
+        if bug.is_unassigned():
+            self._tool.bugs.reassign_bug(bug.id())
+
+        if bug.is_closed():
+            # FIXME: We should probably pass this message in somehow?
+            # Right now this step is only used before PostDiff steps, so this is OK.
+            self._tool.bugs.reopen_bug(bug.id(), "Reopening to attach new patch.")
diff --git a/Tools/Scripts/webkitpy/tool/steps/ensurelocalcommitifneeded.py b/Tools/Scripts/webkitpy/tool/steps/ensurelocalcommitifneeded.py
new file mode 100644
index 0000000..2167351
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/steps/ensurelocalcommitifneeded.py
@@ -0,0 +1,43 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+# 
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+# 
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+# 
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from webkitpy.tool.steps.abstractstep import AbstractStep
+from webkitpy.tool.steps.options import Options
+from webkitpy.common.system.deprecated_logging import error
+
+
+class EnsureLocalCommitIfNeeded(AbstractStep):
+    @classmethod
+    def options(cls):
+        return AbstractStep.options() + [
+            Options.local_commit,
+        ]
+
+    def run(self, state):
+        if self._options.local_commit and not self._tool.scm().supports_local_commits():
+            error("--local-commit passed, but %s does not support local commits" % self._tool.scm().display_name())
diff --git a/Tools/Scripts/webkitpy/tool/steps/metastep.py b/Tools/Scripts/webkitpy/tool/steps/metastep.py
new file mode 100644
index 0000000..7cbd1c5
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/steps/metastep.py
@@ -0,0 +1,54 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+# 
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+# 
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+# 
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from webkitpy.tool.steps.abstractstep import AbstractStep
+
+
+# FIXME: Unify with StepSequence?  I'm not sure yet which is the better design.
+class MetaStep(AbstractStep):
+    substeps = [] # Override in subclasses
+    def __init__(self, tool, options):
+        AbstractStep.__init__(self, tool, options)
+        self._step_instances = []
+        for step_class in self.substeps:
+            self._step_instances.append(step_class(tool, options))
+
+    @staticmethod
+    def _collect_options_from_steps(steps):
+        collected_options = []
+        for step in steps:
+            collected_options = collected_options + step.options()
+        return collected_options
+
+    @classmethod
+    def options(cls):
+        return cls._collect_options_from_steps(cls.substeps)
+
+    def run(self, state):
+        for step in self._step_instances:
+             step.run(state)
diff --git a/Tools/Scripts/webkitpy/tool/steps/obsoletepatches.py b/Tools/Scripts/webkitpy/tool/steps/obsoletepatches.py
new file mode 100644
index 0000000..de508c6
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/steps/obsoletepatches.py
@@ -0,0 +1,51 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+# 
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+# 
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+# 
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from webkitpy.tool.grammar import pluralize
+from webkitpy.tool.steps.abstractstep import AbstractStep
+from webkitpy.tool.steps.options import Options
+from webkitpy.common.system.deprecated_logging import log
+
+
+class ObsoletePatches(AbstractStep):
+    @classmethod
+    def options(cls):
+        return AbstractStep.options() + [
+            Options.obsolete_patches,
+        ]
+
+    def run(self, state):
+        if not self._options.obsolete_patches:
+            return
+        bug_id = state["bug_id"]
+        patches = self._tool.bugs.fetch_bug(bug_id).patches()
+        if not patches:
+            return
+        log("Obsoleting %s on bug %s" % (pluralize("old patch", len(patches)), bug_id))
+        for patch in patches:
+            self._tool.bugs.obsolete_attachment(patch.id())
diff --git a/Tools/Scripts/webkitpy/tool/steps/options.py b/Tools/Scripts/webkitpy/tool/steps/options.py
new file mode 100644
index 0000000..c29e59d
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/steps/options.py
@@ -0,0 +1,60 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from optparse import make_option
+
+class Options(object):
+    blocks = make_option("--blocks", action="store", type="string", dest="blocks", default=None, help="Bug number which the created bug blocks.")
+    build = make_option("--build", action="store_true", dest="build", default=False, help="Build and run run-webkit-tests before committing.")
+    build_style = make_option("--build-style", action="store", dest="build_style", default=None, help="Whether to build debug, release, or both.")
+    cc = make_option("--cc", action="store", type="string", dest="cc", help="Comma-separated list of email addresses to carbon-copy.")
+    check_builders = make_option("--ignore-builders", action="store_false", dest="check_builders", default=True, help="DEPRECATED: Will be removed any time after 11/01/11.")
+    check_style = make_option("--ignore-style", action="store_false", dest="check_style", default=True, help="Don't check to see if the patch has proper style before uploading.")
+    check_style_filter = make_option("--check-style-filter", action="store", type="string", dest="check_style_filter", default=None, help="Filter style-checker rules (see check-webkit-style --help).")
+    clean = make_option("--no-clean", action="store_false", dest="clean", default=True, help="Don't check if the working directory is clean before applying patches")
+    close_bug = make_option("--no-close", action="store_false", dest="close_bug", default=True, help="Leave bug open after landing.")
+    comment = make_option("--comment", action="store", type="string", dest="comment", help="Comment to post to bug.")
+    component = make_option("--component", action="store", type="string", dest="component", help="Component for the new bug.")
+    confirm = make_option("--no-confirm", action="store_false", dest="confirm", default=True, help="Skip confirmation steps.")
+    description = make_option("-m", "--description", action="store", type="string", dest="description", help="Description string for the attachment")
+    email = make_option("--email", action="store", type="string", dest="email", help="Email address to use in ChangeLogs.")
+    force_clean = make_option("--force-clean", action="store_true", dest="force_clean", default=False, help="Clean working directory before applying patches (removes local changes and commits)")
+    git_commit = make_option("-g", "--git-commit", action="store", dest="git_commit", help="Operate on a local commit. If a range, the commits are squashed into one. <ref>.... includes the working copy changes. UPSTREAM can be used for the upstream/tracking branch.")
+    local_commit = make_option("--local-commit", action="store_true", dest="local_commit", default=False, help="Make a local commit for each applied patch")
+    non_interactive = make_option("--non-interactive", action="store_true", dest="non_interactive", default=False, help="Never prompt the user, fail as fast as possible.")
+    obsolete_patches = make_option("--no-obsolete", action="store_false", dest="obsolete_patches", default=True, help="Do not obsolete old patches before posting this one.")
+    open_bug = make_option("--open-bug", action="store_true", dest="open_bug", default=False, help="Opens the associated bug in a browser.")
+    parent_command = make_option("--parent-command", action="store", dest="parent_command", default=None, help="(Internal) The command that spawned this instance.")
+    quiet = make_option("--quiet", action="store_true", dest="quiet", default=False, help="Produce less console output.")
+    request_commit = make_option("--request-commit", action="store_true", dest="request_commit", default=False, help="Mark the patch as needing auto-commit after review.")
+    review = make_option("--no-review", action="store_false", dest="review", default=True, help="Do not mark the patch for review.")
+    reviewer = make_option("-r", "--reviewer", action="store", type="string", dest="reviewer", help="Update ChangeLogs to say Reviewed by REVIEWER.")
+    suggest_reviewers = make_option("--suggest-reviewers", action="store_true", default=False, help="Offer to CC appropriate reviewers.")
+    test = make_option("--test", action="store_true", dest="test", default=False, help="Run run-webkit-tests before committing.")
+    update = make_option("--no-update", action="store_false", dest="update", default=True, help="Don't update the working directory.")
+    changelog_count = make_option("--changelog-count", action="store", type="int", dest="changelog_count", help="Number of changelogs to parse.")
diff --git a/Tools/Scripts/webkitpy/tool/steps/postdiff.py b/Tools/Scripts/webkitpy/tool/steps/postdiff.py
new file mode 100644
index 0000000..6913cab
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/steps/postdiff.py
@@ -0,0 +1,52 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+# 
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+# 
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+# 
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from webkitpy.tool.steps.abstractstep import AbstractStep
+from webkitpy.tool.steps.options import Options
+
+
+class PostDiff(AbstractStep):
+    @classmethod
+    def options(cls):
+        return AbstractStep.options() + [
+            Options.description,
+            Options.comment,
+            Options.review,
+            Options.request_commit,
+            Options.open_bug,
+        ]
+
+    def run(self, state):
+        diff = self.cached_lookup(state, "diff")
+        description = self._options.description or "Patch"
+        comment_text = self._options.comment
+        bug_id = state["bug_id"]
+
+        self._tool.bugs.add_patch_to_bug(bug_id, diff, description, comment_text=comment_text, mark_for_review=self._options.review, mark_for_commit_queue=self._options.request_commit)
+        if self._options.open_bug:
+            self._tool.user.open_url(self._tool.bugs.bug_url_for_bug_id(bug_id))
diff --git a/Tools/Scripts/webkitpy/tool/steps/postdiffforcommit.py b/Tools/Scripts/webkitpy/tool/steps/postdiffforcommit.py
new file mode 100644
index 0000000..13bc00c
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/steps/postdiffforcommit.py
@@ -0,0 +1,39 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+# 
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+# 
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+# 
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from webkitpy.tool.steps.abstractstep import AbstractStep
+
+
+class PostDiffForCommit(AbstractStep):
+    def run(self, state):
+        self._tool.bugs.add_patch_to_bug(
+            state["bug_id"],
+            self.cached_lookup(state, "diff"),
+            "Patch for landing",
+            mark_for_review=False,
+            mark_for_landing=True)
diff --git a/Tools/Scripts/webkitpy/tool/steps/postdiffforrevert.py b/Tools/Scripts/webkitpy/tool/steps/postdiffforrevert.py
new file mode 100644
index 0000000..2900eb3
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/steps/postdiffforrevert.py
@@ -0,0 +1,49 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+# 
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+# 
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+# 
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from webkitpy.common.net.bugzilla import Attachment
+from webkitpy.tool.steps.abstractstep import AbstractStep
+
+
+class PostDiffForRevert(AbstractStep):
+    def run(self, state):
+        comment_text = "Any committer can land this patch automatically by \
+marking it commit-queue+.  The commit-queue will build and test \
+the patch before landing to ensure that the rollout will be \
+successful.  This process takes approximately 15 minutes.\n\n\
+If you would like to land the rollout faster, you can use the \
+following command:\n\n\
+  webkit-patch land-attachment ATTACHMENT_ID\n\n\
+where ATTACHMENT_ID is the ID of this attachment."
+        self._tool.bugs.add_patch_to_bug(
+            state["bug_id"],
+            self.cached_lookup(state, "diff"),
+            "%s%s" % (Attachment.rollout_preamble, state["revision"]),
+            comment_text=comment_text,
+            mark_for_review=False,
+            mark_for_commit_queue=True)
diff --git a/Tools/Scripts/webkitpy/tool/steps/preparechangelog.py b/Tools/Scripts/webkitpy/tool/steps/preparechangelog.py
new file mode 100644
index 0000000..19caace
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/steps/preparechangelog.py
@@ -0,0 +1,77 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+# 
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+# 
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+# 
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from webkitpy.common.checkout.changelog import ChangeLog
+from webkitpy.common.system.executive import ScriptError
+from webkitpy.tool.steps.abstractstep import AbstractStep
+from webkitpy.tool.steps.options import Options
+from webkitpy.common.system.deprecated_logging import error
+
+
+class PrepareChangeLog(AbstractStep):
+    @classmethod
+    def options(cls):
+        return AbstractStep.options() + [
+            Options.quiet,
+            Options.email,
+            Options.git_commit,
+        ]
+
+    def _ensure_bug_url(self, state):
+        if not state.get("bug_id"):
+            return
+        bug_id = state.get("bug_id")
+        changelogs = self.cached_lookup(state, "changelogs")
+        for changelog_path in changelogs:
+            changelog = ChangeLog(changelog_path)
+            if not changelog.latest_entry().bug_id():
+                changelog.set_short_description_and_bug_url(
+                    self.cached_lookup(state, "bug_title"),
+                    self._tool.bugs.bug_url_for_bug_id(bug_id))
+
+    def run(self, state):
+        if self.cached_lookup(state, "changelogs"):
+            self._ensure_bug_url(state)
+            return
+        args = self._tool.port().prepare_changelog_command()
+        if state.get("bug_id"):
+            args.append("--bug=%s" % state["bug_id"])
+            args.append("--description=%s" % self.cached_lookup(state, 'bug_title'))
+        if self._options.email:
+            args.append("--email=%s" % self._options.email)
+
+        if self._tool.scm().supports_local_commits():
+            args.append("--merge-base=%s" % self._tool.scm().merge_base(self._options.git_commit))
+
+        args.extend(self._changed_files(state))
+
+        try:
+            self._tool.executive.run_and_throw_if_fail(args, self._options.quiet, cwd=self._tool.scm().checkout_root)
+        except ScriptError, e:
+            error("Unable to prepare ChangeLogs.")
+        self.did_modify_checkout(state)
diff --git a/Tools/Scripts/webkitpy/tool/steps/preparechangelog_unittest.py b/Tools/Scripts/webkitpy/tool/steps/preparechangelog_unittest.py
new file mode 100644
index 0000000..847dc2f
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/steps/preparechangelog_unittest.py
@@ -0,0 +1,57 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#    * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import os
+import unittest
+
+# Do not import changelog_unittest.ChangeLogTest directly as that will cause it to be run again.
+from webkitpy.common.checkout import changelog_unittest
+
+from webkitpy.common.system.outputcapture import OutputCapture
+from webkitpy.tool.mocktool import MockOptions, MockTool
+from webkitpy.tool.steps.preparechangelog import PrepareChangeLog
+
+
+class PrepareChangeLogTest(changelog_unittest.ChangeLogTest):
+    def test_ensure_bug_url(self):
+        # FIXME: This should use a MockFileSystem instead of a real FileSystem.
+        capture = OutputCapture()
+        step = PrepareChangeLog(MockTool(), MockOptions())
+        changelog_contents = u"%s\n%s" % (self._new_entry_boilerplate, self._example_changelog)
+        changelog_path = self._write_tmp_file_with_contents(changelog_contents.encode("utf-8"))
+        state = {
+            "bug_title": "Example title",
+            "bug_id": 1234,
+            "changelogs": [changelog_path],
+        }
+        capture.assert_outputs(self, step.run, [state])
+        actual_contents = self._read_file_contents(changelog_path, "utf-8")
+        expected_message = "Example title\n        http://example.com/1234"
+        expected_contents = changelog_contents.replace("Need a short description (OOPS!).\n        Need the bug URL (OOPS!).", expected_message)
+        os.remove(changelog_path)
+        self.assertEquals(actual_contents.splitlines(), expected_contents.splitlines())
diff --git a/Tools/Scripts/webkitpy/tool/steps/preparechangelogfordepsroll.py b/Tools/Scripts/webkitpy/tool/steps/preparechangelogfordepsroll.py
new file mode 100644
index 0000000..4bbd383
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/steps/preparechangelogfordepsroll.py
@@ -0,0 +1,38 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from webkitpy.common.checkout.changelog import ChangeLog
+from webkitpy.tool.steps.abstractstep import AbstractStep
+
+
+class PrepareChangeLogForDEPSRoll(AbstractStep):
+    def run(self, state):
+        self._tool.executive.run_and_throw_if_fail(self._tool.port().prepare_changelog_command())
+        changelog_paths = self._tool.checkout().modified_changelogs(git_commit=None)
+        for changelog_path in changelog_paths:
+            ChangeLog(changelog_path).update_with_unreviewed_message("Unreviewed.  Rolled DEPS.\n\n")
diff --git a/Tools/Scripts/webkitpy/tool/steps/preparechangelogforrevert.py b/Tools/Scripts/webkitpy/tool/steps/preparechangelogforrevert.py
new file mode 100644
index 0000000..95a99c3
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/steps/preparechangelogforrevert.py
@@ -0,0 +1,58 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+# 
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+# 
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+# 
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from webkitpy.common.checkout.changelog import ChangeLog
+from webkitpy.common.config import urls
+from webkitpy.tool.grammar import join_with_separators
+from webkitpy.tool.steps.abstractstep import AbstractStep
+
+
+class PrepareChangeLogForRevert(AbstractStep):
+    @classmethod
+    def _message_for_revert(cls, revision_list, reason, bug_url=None):
+        message = "Unreviewed, rolling out %s.\n" % join_with_separators(['r' + str(revision) for revision in revision_list])
+        for revision in revision_list:
+            message += "%s\n" % urls.view_revision_url(revision)
+        if bug_url:
+            message += "%s\n" % bug_url
+        # Add an extra new line after the rollout links, before any reason.
+        message += "\n"
+        if reason:
+            message += "%s\n\n" % reason
+        return message
+
+    def run(self, state):
+        # This could move to prepare-ChangeLog by adding a --revert= option.
+        self._tool.executive.run_and_throw_if_fail(self._tool.port().prepare_changelog_command(), cwd=self._tool.scm().checkout_root)
+        changelog_paths = self._tool.checkout().modified_changelogs(git_commit=None)
+        bug_url = self._tool.bugs.bug_url_for_bug_id(state["bug_id"]) if state["bug_id"] else None
+        message = self._message_for_revert(state["revision_list"], state["reason"], bug_url)
+        for changelog_path in changelog_paths:
+            # FIXME: Seems we should prepare the message outside of changelogs.py and then just pass in
+            # text that we want to use to replace the reviewed by line.
+            ChangeLog(changelog_path).update_with_unreviewed_message(message)
diff --git a/Tools/Scripts/webkitpy/tool/steps/preparechangelogforrevert_unittest.py b/Tools/Scripts/webkitpy/tool/steps/preparechangelogforrevert_unittest.py
new file mode 100644
index 0000000..076e602
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/steps/preparechangelogforrevert_unittest.py
@@ -0,0 +1,130 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#    * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import codecs
+import os
+import tempfile
+import unittest
+
+# Do not import changelog_unittest.ChangeLogTest directly as that will cause it to be run again.
+from webkitpy.common.checkout import changelog_unittest
+
+from webkitpy.common.checkout.changelog import ChangeLog
+from webkitpy.tool.steps.preparechangelogforrevert import *
+
+
+class UpdateChangeLogsForRevertTest(unittest.TestCase):
+    @staticmethod
+    def _write_tmp_file_with_contents(byte_array):
+        assert(isinstance(byte_array, str))
+        (file_descriptor, file_path) = tempfile.mkstemp()  # NamedTemporaryFile always deletes the file on close in python < 2.6
+        with os.fdopen(file_descriptor, "w") as file:
+            file.write(byte_array)
+        return file_path
+
+    _revert_entry_with_bug_url = '''2009-08-19  Eric Seidel  <eric@webkit.org>
+
+        Unreviewed, rolling out r12345.
+        http://trac.webkit.org/changeset/12345
+        http://example.com/123
+
+        Reason
+
+        * Scripts/bugzilla-tool:
+'''
+
+    _revert_entry_without_bug_url = '''2009-08-19  Eric Seidel  <eric@webkit.org>
+
+        Unreviewed, rolling out r12345.
+        http://trac.webkit.org/changeset/12345
+
+        Reason
+
+        * Scripts/bugzilla-tool:
+'''
+
+    _multiple_revert_entry_with_bug_url = '''2009-08-19  Eric Seidel  <eric@webkit.org>
+
+        Unreviewed, rolling out r12345, r12346, and r12347.
+        http://trac.webkit.org/changeset/12345
+        http://trac.webkit.org/changeset/12346
+        http://trac.webkit.org/changeset/12347
+        http://example.com/123
+
+        Reason
+
+        * Scripts/bugzilla-tool:
+'''
+
+    _multiple_revert_entry_without_bug_url = '''2009-08-19  Eric Seidel  <eric@webkit.org>
+
+        Unreviewed, rolling out r12345, r12346, and r12347.
+        http://trac.webkit.org/changeset/12345
+        http://trac.webkit.org/changeset/12346
+        http://trac.webkit.org/changeset/12347
+
+        Reason
+
+        * Scripts/bugzilla-tool:
+'''
+
+    _revert_with_log_reason = """2009-08-19  Eric Seidel  <eric@webkit.org>
+
+        Unreviewed, rolling out r12345.
+        http://trac.webkit.org/changeset/12345
+        http://example.com/123
+
+        This is a very long reason which should be long enough so that
+        _message_for_revert will need to wrap it.  We'll also include
+        a
+        https://veryveryveryveryverylongbugurl.com/reallylongbugthingy.cgi?bug_id=12354
+        link so that we can make sure we wrap that right too.
+
+        * Scripts/bugzilla-tool:
+"""
+
+    def _assert_message_for_revert_output(self, args, expected_entry):
+        changelog_contents = u"%s\n%s" % (changelog_unittest.ChangeLogTest._new_entry_boilerplate, changelog_unittest.ChangeLogTest._example_changelog)
+        changelog_path = self._write_tmp_file_with_contents(changelog_contents.encode("utf-8"))
+        changelog = ChangeLog(changelog_path)
+        changelog.update_with_unreviewed_message(PrepareChangeLogForRevert._message_for_revert(*args))
+        actual_entry = changelog.latest_entry()
+        os.remove(changelog_path)
+        self.assertEquals(actual_entry.contents(), expected_entry)
+        self.assertEquals(actual_entry.reviewer_text(), None)
+        # These checks could be removed to allow this to work on other entries:
+        self.assertEquals(actual_entry.author_name(), "Eric Seidel")
+        self.assertEquals(actual_entry.author_email(), "eric@webkit.org")
+
+    def test_message_for_revert(self):
+        self._assert_message_for_revert_output([[12345], "Reason"], self._revert_entry_without_bug_url)
+        self._assert_message_for_revert_output([[12345], "Reason", "http://example.com/123"], self._revert_entry_with_bug_url)
+        self._assert_message_for_revert_output([[12345, 12346, 12347], "Reason"], self._multiple_revert_entry_without_bug_url)
+        self._assert_message_for_revert_output([[12345, 12346, 12347], "Reason", "http://example.com/123"], self._multiple_revert_entry_with_bug_url)
+        long_reason = "This is a very long reason which should be long enough so that _message_for_revert will need to wrap it.  We'll also include a https://veryveryveryveryverylongbugurl.com/reallylongbugthingy.cgi?bug_id=12354 link so that we can make sure we wrap that right too."
+        self._assert_message_for_revert_output([[12345], long_reason, "http://example.com/123"], self._revert_with_log_reason)
diff --git a/Tools/Scripts/webkitpy/tool/steps/promptforbugortitle.py b/Tools/Scripts/webkitpy/tool/steps/promptforbugortitle.py
new file mode 100644
index 0000000..31c913c
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/steps/promptforbugortitle.py
@@ -0,0 +1,45 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+# 
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+# 
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+# 
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from webkitpy.tool.steps.abstractstep import AbstractStep
+
+
+class PromptForBugOrTitle(AbstractStep):
+    def run(self, state):
+        # No need to prompt if we alrady have the bug_id.
+        if state.get("bug_id"):
+            return
+        user_response = self._tool.user.prompt("Please enter a bug number or a title for a new bug:\n")
+        # If the user responds with a number, we assume it's bug number.
+        # Otherwise we assume it's a bug subject.
+        try:
+            state["bug_id"] = int(user_response)
+        except ValueError, TypeError:
+            state["bug_title"] = user_response
+            # FIXME: This is kind of a lame description.
+            state["bug_description"] = user_response
diff --git a/Tools/Scripts/webkitpy/tool/steps/reopenbugafterrollout.py b/Tools/Scripts/webkitpy/tool/steps/reopenbugafterrollout.py
new file mode 100644
index 0000000..f369ca9
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/steps/reopenbugafterrollout.py
@@ -0,0 +1,44 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+# 
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+# 
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+# 
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from webkitpy.tool.comments import bug_comment_from_commit_text
+from webkitpy.tool.steps.abstractstep import AbstractStep
+from webkitpy.common.system.deprecated_logging import log
+
+
+class ReopenBugAfterRollout(AbstractStep):
+    def run(self, state):
+        commit_comment = bug_comment_from_commit_text(self._tool.scm(), state["commit_text"])
+        comment_text = "Reverted r%s for reason:\n\n%s\n\n%s" % (state["revision"], state["reason"], commit_comment)
+
+        bug_id = state["bug_id"]
+        if not bug_id:
+            log(comment_text)
+            log("No bugs were updated.")
+            return
+        self._tool.bugs.reopen_bug(bug_id, comment_text)
diff --git a/Tools/Scripts/webkitpy/tool/steps/revertrevision.py b/Tools/Scripts/webkitpy/tool/steps/revertrevision.py
new file mode 100644
index 0000000..8016be5
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/steps/revertrevision.py
@@ -0,0 +1,35 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+# 
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+# 
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+# 
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from webkitpy.tool.steps.abstractstep import AbstractStep
+
+
+class RevertRevision(AbstractStep):
+    def run(self, state):
+        self._tool.checkout().apply_reverse_diffs(state["revision_list"])
+        self.did_modify_checkout(state)
diff --git a/Tools/Scripts/webkitpy/tool/steps/runtests.py b/Tools/Scripts/webkitpy/tool/steps/runtests.py
new file mode 100644
index 0000000..aa87291
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/steps/runtests.py
@@ -0,0 +1,93 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+# 
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+# 
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+# 
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from webkitpy.tool.steps.abstractstep import AbstractStep
+from webkitpy.tool.steps.options import Options
+from webkitpy.common.system.deprecated_logging import log
+from webkitpy.common.system.executive import ScriptError
+
+class RunTests(AbstractStep):
+    # FIXME: This knowledge really belongs in the commit-queue.
+    NON_INTERACTIVE_FAILURE_LIMIT_COUNT = 30
+
+    @classmethod
+    def options(cls):
+        return AbstractStep.options() + [
+            Options.test,
+            Options.non_interactive,
+            Options.quiet,
+        ]
+
+    def run(self, state):
+        if not self._options.test:
+            return
+
+        if not self._options.non_interactive:
+            # FIXME: We should teach the commit-queue and the EWS how to run these tests.
+
+            python_unittests_command = self._tool.port().run_python_unittests_command()
+            if python_unittests_command:
+                log("Running Python unit tests")
+                self._tool.executive.run_and_throw_if_fail(python_unittests_command, cwd=self._tool.scm().checkout_root)
+
+            perl_unittests_command = self._tool.port().run_perl_unittests_command()
+            if perl_unittests_command:
+                log("Running Perl unit tests")
+                self._tool.executive.run_and_throw_if_fail(perl_unittests_command, cwd=self._tool.scm().checkout_root)
+
+            javascriptcore_tests_command = self._tool.port().run_javascriptcore_tests_command()
+            if javascriptcore_tests_command:
+                log("Running JavaScriptCore tests")
+                self._tool.executive.run_and_throw_if_fail(javascriptcore_tests_command, quiet=True, cwd=self._tool.scm().checkout_root)
+
+        webkit_unit_tests_command = self._tool.port().run_webkit_unit_tests_command()
+        if webkit_unit_tests_command:
+            log("Running WebKit unit tests")
+            args = webkit_unit_tests_command
+            if self._options.non_interactive:
+                args.append("--gtest_output=xml:%s/webkit_unit_tests_output.xml" % self._tool.port().results_directory)
+            try:
+                self._tool.executive.run_and_throw_if_fail(args, cwd=self._tool.scm().checkout_root)
+            except ScriptError, e:
+                log("Error running webkit_unit_tests: %s" % e.message_with_output())
+
+        log("Running run-webkit-tests")
+        args = self._tool.port().run_webkit_tests_command()
+        if self._options.non_interactive:
+            args.extend([
+                "--no-new-test-results",
+                "--no-launch-safari",
+                "--skip-failing-tests",
+                "--exit-after-n-failures=%s" % self.NON_INTERACTIVE_FAILURE_LIMIT_COUNT,
+                "--results-directory=%s" % self._tool.port().results_directory,
+                "--quiet",
+            ])
+
+        if self._options.quiet:
+            args.append("--quiet")
+        self._tool.executive.run_and_throw_if_fail(args, cwd=self._tool.scm().checkout_root)
diff --git a/Tools/Scripts/webkitpy/tool/steps/runtests_unittest.py b/Tools/Scripts/webkitpy/tool/steps/runtests_unittest.py
new file mode 100644
index 0000000..bf888e5
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/steps/runtests_unittest.py
@@ -0,0 +1,46 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#    * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from webkitpy.common.system.outputcapture import OutputCapture
+from webkitpy.tool.mocktool import MockOptions, MockTool
+from webkitpy.tool.steps.runtests import RunTests
+
+class RunTestsTest(unittest.TestCase):
+    def test_webkit_run_unit_tests(self):
+        tool = MockTool(log_executive=True)
+        tool._deprecated_port.run_python_unittests_command = lambda: None
+        tool._deprecated_port.run_perl_unittests_command = lambda: None
+        step = RunTests(tool, MockOptions(test=True, non_interactive=True, quiet=False))
+        expected_stderr = """Running WebKit unit tests
+MOCK run_and_throw_if_fail: ['mock-run-webkit-unit-tests', '--gtest_output=xml:/mock-results/webkit_unit_tests_output.xml'], cwd=/mock-checkout
+Running run-webkit-tests
+MOCK run_and_throw_if_fail: ['mock-run-webkit-tests', '--no-new-test-results', '--no-launch-safari', '--skip-failing-tests', '--exit-after-n-failures=30', '--results-directory=/mock-results', '--quiet'], cwd=/mock-checkout
+"""
+        OutputCapture().assert_outputs(self, step.run, [{}], expected_stderr=expected_stderr)
diff --git a/Tools/Scripts/webkitpy/tool/steps/steps_unittest.py b/Tools/Scripts/webkitpy/tool/steps/steps_unittest.py
new file mode 100644
index 0000000..99f1749
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/steps/steps_unittest.py
@@ -0,0 +1,116 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+# 
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+# 
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+# 
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from webkitpy.common.system.outputcapture import OutputCapture
+from webkitpy.common.config.ports import DeprecatedPort
+from webkitpy.tool.mocktool import MockOptions, MockTool
+
+from webkitpy.tool import steps
+
+class StepsTest(unittest.TestCase):
+    def _step_options(self):
+        options = MockOptions()
+        options.non_interactive = True
+        options.port = 'MOCK port'
+        options.quiet = True
+        options.test = True
+        return options
+
+    def _run_step(self, step, tool=None, options=None, state=None):
+        if not tool:
+            tool = MockTool()
+        if not options:
+            options = self._step_options()
+        if not state:
+            state = {}
+        step(tool, options).run(state)
+
+    def test_update_step(self):
+        tool = MockTool()
+        options = self._step_options()
+        options.update = True
+        expected_stderr = "Updating working directory\n"
+        OutputCapture().assert_outputs(self, self._run_step, [steps.Update, tool, options], expected_stderr=expected_stderr)
+
+    def test_prompt_for_bug_or_title_step(self):
+        tool = MockTool()
+        tool.user.prompt = lambda message: 50000
+        self._run_step(steps.PromptForBugOrTitle, tool=tool)
+
+    def _post_diff_options(self):
+        options = self._step_options()
+        options.git_commit = None
+        options.description = None
+        options.comment = None
+        options.review = True
+        options.request_commit = False
+        options.open_bug = True
+        return options
+
+    def _assert_step_output_with_bug(self, step, bug_id, expected_stderr, options=None):
+        state = {'bug_id': bug_id}
+        OutputCapture().assert_outputs(self, self._run_step, [step, MockTool(), options, state], expected_stderr=expected_stderr)
+
+    def _assert_post_diff_output_for_bug(self, step, bug_id, expected_stderr):
+        self._assert_step_output_with_bug(step, bug_id, expected_stderr, self._post_diff_options())
+
+    def test_post_diff(self):
+        expected_stderr = "MOCK add_patch_to_bug: bug_id=78, description=Patch, mark_for_review=True, mark_for_commit_queue=False, mark_for_landing=False\nMOCK: user.open_url: http://example.com/78\n"
+        self._assert_post_diff_output_for_bug(steps.PostDiff, 78, expected_stderr)
+
+    def test_post_diff_for_commit(self):
+        expected_stderr = "MOCK add_patch_to_bug: bug_id=78, description=Patch for landing, mark_for_review=False, mark_for_commit_queue=False, mark_for_landing=True\n"
+        self._assert_post_diff_output_for_bug(steps.PostDiffForCommit, 78, expected_stderr)
+
+    def test_ensure_bug_is_open_and_assigned(self):
+        expected_stderr = "MOCK reopen_bug 50004 with comment 'Reopening to attach new patch.'\n"
+        self._assert_step_output_with_bug(steps.EnsureBugIsOpenAndAssigned, 50004, expected_stderr)
+        expected_stderr = "MOCK reassign_bug: bug_id=50002, assignee=None\n"
+        self._assert_step_output_with_bug(steps.EnsureBugIsOpenAndAssigned, 50002, expected_stderr)
+
+    def test_runtests_args(self):
+        mock_options = self._step_options()
+        mock_options.non_interactive = False
+        step = steps.RunTests(MockTool(log_executive=True), mock_options)
+        # FIXME: We shouldn't use a real port-object here, but there is too much to mock at the moment.
+        mock_port = DeprecatedPort()
+        tool = MockTool(log_executive=True)
+        tool.port = lambda: mock_port
+        step = steps.RunTests(tool, mock_options)
+        expected_stderr = """Running Python unit tests
+MOCK run_and_throw_if_fail: ['Tools/Scripts/test-webkitpy'], cwd=/mock-checkout
+Running Perl unit tests
+MOCK run_and_throw_if_fail: ['Tools/Scripts/test-webkitperl'], cwd=/mock-checkout
+Running JavaScriptCore tests
+MOCK run_and_throw_if_fail: ['Tools/Scripts/run-javascriptcore-tests'], cwd=/mock-checkout
+Running run-webkit-tests
+MOCK run_and_throw_if_fail: ['Tools/Scripts/run-webkit-tests', '--quiet'], cwd=/mock-checkout
+"""
+        OutputCapture().assert_outputs(self, step.run, [{}], expected_stderr=expected_stderr)
diff --git a/Tools/Scripts/webkitpy/tool/steps/suggestreviewers.py b/Tools/Scripts/webkitpy/tool/steps/suggestreviewers.py
new file mode 100644
index 0000000..76bef35
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/steps/suggestreviewers.py
@@ -0,0 +1,51 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from webkitpy.tool.steps.abstractstep import AbstractStep
+from webkitpy.tool.steps.options import Options
+
+
+class SuggestReviewers(AbstractStep):
+    @classmethod
+    def options(cls):
+        return AbstractStep.options() + [
+            Options.git_commit,
+            Options.suggest_reviewers,
+        ]
+
+    def run(self, state):
+        if not self._options.suggest_reviewers:
+            return
+
+        reviewers = self._tool.checkout().suggested_reviewers(self._options.git_commit, self._changed_files(state))
+        print "The following reviewers have recently modified files in your patch:"
+        print "\n".join([reviewer.full_name for reviewer in reviewers])
+        if not self._tool.user.confirm("Would you like to CC them?"):
+            return
+        reviewer_emails = [reviewer.bugzilla_email() for reviewer in reviewers]
+        self._tool.bugs.add_cc_to_bug(state['bug_id'], reviewer_emails)
diff --git a/Tools/Scripts/webkitpy/tool/steps/suggestreviewers_unittest.py b/Tools/Scripts/webkitpy/tool/steps/suggestreviewers_unittest.py
new file mode 100644
index 0000000..e995663
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/steps/suggestreviewers_unittest.py
@@ -0,0 +1,46 @@
+# Copyright (C) 2009 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#    * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from webkitpy.common.system.outputcapture import OutputCapture
+from webkitpy.tool.mocktool import MockOptions, MockTool
+from webkitpy.tool.steps.suggestreviewers import SuggestReviewers
+
+
+class SuggestReviewersTest(unittest.TestCase):
+    def test_disabled(self):
+        step = SuggestReviewers(MockTool(), MockOptions(suggest_reviewers=False))
+        OutputCapture().assert_outputs(self, step.run, [{}])
+
+    def test_basic(self):
+        capture = OutputCapture()
+        step = SuggestReviewers(MockTool(), MockOptions(suggest_reviewers=True, git_commit=None))
+        expected_stdout = "The following reviewers have recently modified files in your patch:\nFoo Bar\n"
+        expected_stderr = "Would you like to CC them?\n"
+        capture.assert_outputs(self, step.run, [{"bug_id": "123"}], expected_stdout=expected_stdout, expected_stderr=expected_stderr)
diff --git a/Tools/Scripts/webkitpy/tool/steps/update.py b/Tools/Scripts/webkitpy/tool/steps/update.py
new file mode 100644
index 0000000..cae2bbd
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/steps/update.py
@@ -0,0 +1,51 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+# 
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+# 
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+# 
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from webkitpy.tool.steps.abstractstep import AbstractStep
+from webkitpy.tool.steps.options import Options
+from webkitpy.common.system.deprecated_logging import log
+
+
+class Update(AbstractStep):
+    @classmethod
+    def options(cls):
+        return AbstractStep.options() + [
+            Options.non_interactive,
+            Options.update,
+            Options.quiet,
+        ]
+
+    def run(self, state):
+        if not self._options.update:
+            return
+        log("Updating working directory")
+        self._tool.executive.run_and_throw_if_fail(self._update_command(), quiet=self._options.quiet, cwd=self._tool.scm().checkout_root)
+
+    def _update_command(self):
+        update_command = self._tool.port().update_webkit_command(self._options.non_interactive)
+        return update_command
diff --git a/Tools/Scripts/webkitpy/tool/steps/update_unittest.py b/Tools/Scripts/webkitpy/tool/steps/update_unittest.py
new file mode 100644
index 0000000..c1a934d
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/steps/update_unittest.py
@@ -0,0 +1,66 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from webkitpy.common.config.ports import ChromiumPort, ChromiumAndroidPort, ChromiumXVFBPort
+from webkitpy.tool.mocktool import MockOptions, MockTool
+from webkitpy.tool.steps.update import Update
+
+
+class UpdateTest(unittest.TestCase):
+
+    def test_update_command_non_interactive(self):
+        tool = MockTool()
+        options = MockOptions(non_interactive=True)
+        step = Update(tool, options)
+        self.assertEqual(["mock-update-webkit"], step._update_command())
+
+        tool._deprecated_port = ChromiumPort()
+        self.assertEqual(["Tools/Scripts/update-webkit", "--chromium", "--force-update"], step._update_command())
+
+        tool._deprecated_port = ChromiumXVFBPort()
+        self.assertEqual(["Tools/Scripts/update-webkit", "--chromium", "--force-update"], step._update_command())
+
+        tool._deprecated_port = ChromiumAndroidPort()
+        self.assertEqual(["Tools/Scripts/update-webkit", "--chromium", "--force-update", "--chromium-android"], step._update_command())
+
+    def test_update_command_interactive(self):
+        tool = MockTool()
+        options = MockOptions(non_interactive=False)
+        step = Update(tool, options)
+        self.assertEqual(["mock-update-webkit"], step._update_command())
+
+        tool._deprecated_port = ChromiumPort()
+        self.assertEqual(["Tools/Scripts/update-webkit", "--chromium"], step._update_command())
+
+        tool._deprecated_port = ChromiumXVFBPort()
+        self.assertEqual(["Tools/Scripts/update-webkit", "--chromium"], step._update_command())
+
+        tool._deprecated_port = ChromiumAndroidPort()
+        self.assertEqual(["Tools/Scripts/update-webkit", "--chromium", "--chromium-android"], step._update_command())
diff --git a/Tools/Scripts/webkitpy/tool/steps/updatechangelogswithreview_unittest.py b/Tools/Scripts/webkitpy/tool/steps/updatechangelogswithreview_unittest.py
new file mode 100644
index 0000000..8ec8891
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/steps/updatechangelogswithreview_unittest.py
@@ -0,0 +1,54 @@
+# Copyright (C) 2009 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#    * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from webkitpy.common.system.outputcapture import OutputCapture
+from webkitpy.tool.mocktool import MockOptions, MockTool
+from webkitpy.tool.steps.updatechangelogswithreviewer import UpdateChangeLogsWithReviewer
+
+class UpdateChangeLogsWithReviewerTest(unittest.TestCase):
+    def test_guess_reviewer_from_bug(self):
+        capture = OutputCapture()
+        step = UpdateChangeLogsWithReviewer(MockTool(), MockOptions())
+        expected_stderr = "No reviewed patches on bug 50001, cannot infer reviewer.\n"
+        capture.assert_outputs(self, step._guess_reviewer_from_bug, [50001], expected_stderr=expected_stderr)
+
+    def test_guess_reviewer_from_multipatch_bug(self):
+        capture = OutputCapture()
+        step = UpdateChangeLogsWithReviewer(MockTool(), MockOptions())
+        expected_stderr = "Guessing \"Reviewer2\" as reviewer from attachment 10001 on bug 50000.\n"
+        capture.assert_outputs(self, step._guess_reviewer_from_bug, [50000], expected_stderr=expected_stderr)
+
+    def test_empty_state(self):
+        capture = OutputCapture()
+        options = MockOptions()
+        options.reviewer = 'MOCK reviewer'
+        options.git_commit = 'MOCK git commit'
+        step = UpdateChangeLogsWithReviewer(MockTool(), options)
+        capture.assert_outputs(self, step.run, [{}])
diff --git a/Tools/Scripts/webkitpy/tool/steps/updatechangelogswithreviewer.py b/Tools/Scripts/webkitpy/tool/steps/updatechangelogswithreviewer.py
new file mode 100644
index 0000000..cc3e965
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/steps/updatechangelogswithreviewer.py
@@ -0,0 +1,75 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+# 
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+# 
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+# 
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from webkitpy.common.checkout.changelog import ChangeLog
+from webkitpy.tool.grammar import pluralize
+from webkitpy.tool.steps.abstractstep import AbstractStep
+from webkitpy.tool.steps.options import Options
+from webkitpy.common.system.deprecated_logging import log, error
+
+class UpdateChangeLogsWithReviewer(AbstractStep):
+    @classmethod
+    def options(cls):
+        return AbstractStep.options() + [
+            Options.git_commit,
+            Options.reviewer,
+        ]
+
+    def _guess_reviewer_from_bug(self, bug_id):
+        # FIXME: It's unclear if it would be safe to use self.cached_lookup(state, 'bug')
+        # here as we don't currently have a way to invalidate a bug after making changes (like ObsoletePatches does).
+        patches = self._tool.bugs.fetch_bug(bug_id).reviewed_patches()
+        if not patches:
+            log("%s on bug %s, cannot infer reviewer." % ("No reviewed patches", bug_id))
+            return None
+        patch = patches[-1]
+        log("Guessing \"%s\" as reviewer from attachment %s on bug %s." % (patch.reviewer().full_name, patch.id(), bug_id))
+        return patch.reviewer().full_name
+
+    def run(self, state):
+        bug_id = state.get("bug_id")
+        if not bug_id and state.get("patch"):
+            bug_id = state.get("patch").bug_id()
+
+        reviewer = self._options.reviewer
+        if not reviewer:
+            if not bug_id:
+                log("No bug id provided and --reviewer= not provided.  Not updating ChangeLogs with reviewer.")
+                return
+            reviewer = self._guess_reviewer_from_bug(bug_id)
+
+        if not reviewer:
+            log("Failed to guess reviewer from bug %s and --reviewer= not provided.  Not updating ChangeLogs with reviewer." % bug_id)
+            return
+
+        # cached_lookup("changelogs") is always absolute paths.
+        for changelog_path in self.cached_lookup(state, "changelogs"):
+            ChangeLog(changelog_path).set_reviewer(reviewer)
+
+        # Tell the world that we just changed something on disk so that the cached diff is invalidated.
+        self.did_modify_checkout(state)
diff --git a/Tools/Scripts/webkitpy/tool/steps/updatechromiumdeps.py b/Tools/Scripts/webkitpy/tool/steps/updatechromiumdeps.py
new file mode 100644
index 0000000..c9fc631
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/steps/updatechromiumdeps.py
@@ -0,0 +1,73 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import urllib2
+
+from webkitpy.tool.steps.abstractstep import AbstractStep
+from webkitpy.tool.steps.options import Options
+from webkitpy.common.config import urls
+from webkitpy.common.system.deprecated_logging import log, error
+
+
+class UpdateChromiumDEPS(AbstractStep):
+    @classmethod
+    def options(cls):
+        return AbstractStep.options() + [
+            Options.non_interactive,
+        ]
+
+    # Notice that this method throws lots of exciting exceptions!
+    def _fetch_last_known_good_revision(self):
+        return int(urllib2.urlopen(urls.chromium_lkgr_url).read())
+
+    def _validate_revisions(self, current_chromium_revision, new_chromium_revision):
+        if new_chromium_revision < current_chromium_revision:
+            message = "Current Chromium DEPS revision %s is newer than %s." % (current_chromium_revision, new_chromium_revision)
+            if self._options.non_interactive:
+                error(message)  # Causes the process to terminate.
+            log(message)
+            new_chromium_revision = self._tool.user.prompt("Enter new chromium revision (enter nothing to cancel):\n")
+            try:
+                new_chromium_revision = int(new_chromium_revision)
+            except ValueError, TypeError:
+                new_chromium_revision = None
+            if not new_chromium_revision:
+                error("Unable to update Chromium DEPS")
+
+
+    def run(self, state):
+        # Note that state["chromium_revision"] must be defined, but can be None.
+        new_chromium_revision = state["chromium_revision"]
+        if not new_chromium_revision:
+            new_chromium_revision = self._fetch_last_known_good_revision()
+
+        deps = self._tool.checkout().chromium_deps()
+        current_chromium_revision = deps.read_variable("chromium_rev")
+        self._validate_revisions(current_chromium_revision, new_chromium_revision)
+        log("Updating Chromium DEPS to %s" % new_chromium_revision)
+        deps.write_variable("chromium_rev", new_chromium_revision)
diff --git a/Tools/Scripts/webkitpy/tool/steps/validatechangelogs.py b/Tools/Scripts/webkitpy/tool/steps/validatechangelogs.py
new file mode 100644
index 0000000..b6b33c0
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/steps/validatechangelogs.py
@@ -0,0 +1,76 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from webkitpy.tool.steps.abstractstep import AbstractStep
+from webkitpy.tool.steps.options import Options
+from webkitpy.common.checkout.diff_parser import DiffParser
+from webkitpy.common.system.deprecated_logging import error, log
+
+
+# This is closely related to the ValidateReviewer step and the CommitterValidator class.
+# We may want to unify more of this code in one place.
+class ValidateChangeLogs(AbstractStep):
+    @classmethod
+    def options(cls):
+        return AbstractStep.options() + [
+            Options.non_interactive,
+        ]
+
+    def _check_changelog_diff(self, diff_file):
+        if not self._tool.checkout().is_path_to_changelog(diff_file.filename):
+            return True
+        # Each line is a tuple, the first value is the deleted line number
+        # Date, reviewer, bug title, bug url, and empty lines could all be
+        # identical in the most recent entries.  If the diff starts any
+        # later than that, assume that the entry is wrong.
+        if diff_file.lines[0][0] < 8:
+            return True
+        if self._options.non_interactive:
+            return False
+
+        log("The diff to %s looks wrong.  Are you sure your ChangeLog entry is at the top of the file?" % (diff_file.filename))
+        # FIXME: Do we need to make the file path absolute?
+        self._tool.scm().diff_for_file(diff_file.filename)
+        if self._tool.user.confirm("OK to continue?", default='n'):
+            return True
+        return False
+
+    def run(self, state):
+        changed_files = self.cached_lookup(state, "changed_files")
+        for filename in changed_files:
+            if not self._tool.checkout().is_path_to_changelog(filename):
+                continue
+            # Diff ChangeLogs directly because svn-create-patch will move
+            # ChangeLog entries to the # top automatically, defeating our
+            # validation here.
+            # FIXME: Should we diff all the ChangeLogs at once?
+            diff = self._tool.scm().diff_for_file(filename)
+            parsed_diff = DiffParser(diff.splitlines())
+            for filename, diff_file in parsed_diff.files.items():
+                if not self._check_changelog_diff(diff_file):
+                    error("ChangeLog entry in %s is not at the top of the file." % diff_file.filename)
diff --git a/Tools/Scripts/webkitpy/tool/steps/validatechangelogs_unittest.py b/Tools/Scripts/webkitpy/tool/steps/validatechangelogs_unittest.py
new file mode 100644
index 0000000..96bae9f
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/steps/validatechangelogs_unittest.py
@@ -0,0 +1,58 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from webkitpy.common.system.outputcapture import OutputCapture
+from webkitpy.thirdparty.mock import Mock
+from webkitpy.tool.mocktool import MockOptions, MockTool
+from webkitpy.tool.steps.validatechangelogs import ValidateChangeLogs
+
+
+class ValidateChangeLogsTest(unittest.TestCase):
+
+    def _assert_start_line_produces_output(self, start_line, should_fail=False, non_interactive=False):
+        tool = MockTool()
+        tool._checkout.is_path_to_changelog = lambda path: True
+        step = ValidateChangeLogs(tool, MockOptions(git_commit=None, non_interactive=non_interactive))
+        diff_file = Mock()
+        diff_file.filename = "mock/ChangeLog"
+        diff_file.lines = [(start_line, start_line, "foo")]
+        expected_stdout = expected_stderr = ""
+        if should_fail and not non_interactive:
+            expected_stderr = "The diff to mock/ChangeLog looks wrong.  Are you sure your ChangeLog entry is at the top of the file?\nOK to continue?\n"
+        result = OutputCapture().assert_outputs(self, step._check_changelog_diff, [diff_file], expected_stderr=expected_stderr)
+        self.assertEqual(not result, should_fail)
+
+    def test_check_changelog_diff(self):
+        self._assert_start_line_produces_output(1)
+        self._assert_start_line_produces_output(7)
+        self._assert_start_line_produces_output(8, should_fail=True)
+
+        self._assert_start_line_produces_output(1, non_interactive=False)
+        self._assert_start_line_produces_output(8, non_interactive=True, should_fail=True)
diff --git a/Tools/Scripts/webkitpy/tool/steps/validatereviewer.py b/Tools/Scripts/webkitpy/tool/steps/validatereviewer.py
new file mode 100644
index 0000000..5e93821
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/steps/validatereviewer.py
@@ -0,0 +1,55 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+# 
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+# 
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+# 
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from webkitpy.common.checkout.changelog import ChangeLog
+from webkitpy.tool.steps.abstractstep import AbstractStep
+from webkitpy.tool.steps.options import Options
+from webkitpy.common.system.deprecated_logging import error, log
+
+
+# FIXME: Some of this logic should probably be unified with CommitterValidator?
+class ValidateReviewer(AbstractStep):
+    @classmethod
+    def options(cls):
+        return AbstractStep.options() + [
+            Options.non_interactive,
+        ]
+
+    def run(self, state):
+        # FIXME: For now we disable this check when a user is driving the script
+        # this check is too draconian (and too poorly tested) to foist upon users.
+        if not self._options.non_interactive:
+            return
+        for changelog_path in self.cached_lookup(state, "changelogs"):
+            changelog_entry = ChangeLog(changelog_path).latest_entry()
+            if changelog_entry.has_valid_reviewer():
+                continue
+            reviewer_text = changelog_entry.reviewer_text()
+            if reviewer_text:
+                log("%s found in %s does not appear to be a valid reviewer according to committers.py." % (reviewer_text, changelog_path))
+            error('%s neither lists a valid reviewer nor contains the string "Unreviewed" or "Rubber stamp" (case insensitive).' % changelog_path)
diff --git a/Tools/Scripts/webkitpy/webkitpy.pyproj b/Tools/Scripts/webkitpy/webkitpy.pyproj
new file mode 100644
index 0000000..0bff5fc
--- /dev/null
+++ b/Tools/Scripts/webkitpy/webkitpy.pyproj
@@ -0,0 +1,540 @@
+<?xml version="1.0" encoding="utf-8"?>
+<Project DefaultTargets="Build" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+  <PropertyGroup>
+    <Configuration Condition=" '$(Configuration)' == '' ">Debug</Configuration>
+    <SchemaVersion>2.0</SchemaVersion>
+    <ProjectGuid>{59b0a791-93fe-40f8-a52b-ba19b73e8fa6}</ProjectGuid>
+    <ProjectHome>.</ProjectHome>
+    <StartupFile>layout_tests\run_webkit_tests.py</StartupFile>
+    <SearchPath>
+    </SearchPath>
+    <WorkingDirectory>../</WorkingDirectory>
+    <OutputPath>.</OutputPath>
+    <Name>webkitpy</Name>
+    <RootNamespace>webkitpy</RootNamespace>
+    <IsWindowsApplication>False</IsWindowsApplication>
+    <LaunchProvider>Standard Python launcher</LaunchProvider>
+    <CommandLineArguments>--platform=mock --no-pixel-tests --no-retry-failures</CommandLineArguments>
+    <InterpreterPath />
+    <InterpreterArguments />
+  </PropertyGroup>
+  <PropertyGroup Condition=" '$(Configuration)' == 'Debug' ">
+    <DebugSymbols>true</DebugSymbols>
+    <EnableUnmanagedDebugging>false</EnableUnmanagedDebugging>
+  </PropertyGroup>
+  <PropertyGroup Condition=" '$(Configuration)' == 'Release' ">
+    <DebugSymbols>true</DebugSymbols>
+    <EnableUnmanagedDebugging>false</EnableUnmanagedDebugging>
+  </PropertyGroup>
+  <ItemGroup>
+    <Compile Include="bindings\main.py" />
+    <Compile Include="bindings\__init__.py" />
+    <Compile Include="common\checkout\baselineoptimizer.py" />
+    <Compile Include="common\checkout\baselineoptimizer_unittest.py" />
+    <Compile Include="common\checkout\changelog.py" />
+    <Compile Include="common\checkout\changelog_unittest.py" />
+    <Compile Include="common\checkout\checkout.py" />
+    <Compile Include="common\checkout\checkout_mock.py" />
+    <Compile Include="common\checkout\checkout_unittest.py" />
+    <Compile Include="common\checkout\commitinfo.py" />
+    <Compile Include="common\checkout\commitinfo_unittest.py" />
+    <Compile Include="common\checkout\deps.py" />
+    <Compile Include="common\checkout\deps_mock.py" />
+    <Compile Include="common\checkout\diff_parser.py" />
+    <Compile Include="common\checkout\diff_parser_unittest.py" />
+    <Compile Include="common\checkout\diff_test_data.py" />
+    <Compile Include="common\checkout\scm\commitmessage.py" />
+    <Compile Include="common\checkout\scm\detection.py" />
+    <Compile Include="common\checkout\scm\detection_unittest.py" />
+    <Compile Include="common\checkout\scm\git.py" />
+    <Compile Include="common\checkout\scm\scm.py" />
+    <Compile Include="common\checkout\scm\scm_mock.py" />
+    <Compile Include="common\checkout\scm\scm_unittest.py" />
+    <Compile Include="common\checkout\scm\svn.py" />
+    <Compile Include="common\checkout\scm\__init__.py" />
+    <Compile Include="common\checkout\__init__.py" />
+    <Compile Include="common\checksvnconfigfile.py" />
+    <Compile Include="common\config\build.py" />
+    <Compile Include="common\config\build_unittest.py" />
+    <Compile Include="common\config\committers.py" />
+    <Compile Include="common\config\committers_unittest.py" />
+    <Compile Include="common\config\committervalidator.py" />
+    <Compile Include="common\config\committervalidator_unittest.py" />
+    <Compile Include="common\config\contributionareas.py" />
+    <Compile Include="common\config\contributionareas_unittest.py" />
+    <Compile Include="common\config\irc.py" />
+    <Compile Include="common\config\ports.py" />
+    <Compile Include="common\config\ports_mock.py" />
+    <Compile Include="common\config\ports_unittest.py" />
+    <Compile Include="common\config\urls.py" />
+    <Compile Include="common\config\urls_unittest.py" />
+    <Compile Include="common\config\__init__.py" />
+    <Compile Include="common\editdistance.py" />
+    <Compile Include="common\editdistance_unittest.py" />
+    <Compile Include="common\find_files.py" />
+    <Compile Include="common\find_files_unittest.py" />
+    <Compile Include="common\host.py" />
+    <Compile Include="common\host_mock.py" />
+    <Compile Include="common\lru_cache.py" />
+    <Compile Include="common\lru_cache_unittest.py" />
+    <Compile Include="common\memoized.py" />
+    <Compile Include="common\memoized_unittest.py" />
+    <Compile Include="common\message_pool.py" />
+    <Compile Include="common\net\bugzilla\attachment.py" />
+    <Compile Include="common\net\bugzilla\bug.py" />
+    <Compile Include="common\net\bugzilla\bugzilla.py" />
+    <Compile Include="common\net\bugzilla\bugzilla_mock.py" />
+    <Compile Include="common\net\bugzilla\bugzilla_unittest.py" />
+    <Compile Include="common\net\bugzilla\bug_unittest.py" />
+    <Compile Include="common\net\bugzilla\__init__.py" />
+    <Compile Include="common\net\buildbot\buildbot.py" />
+    <Compile Include="common\net\buildbot\buildbot_mock.py" />
+    <Compile Include="common\net\buildbot\buildbot_unittest.py" />
+    <Compile Include="common\net\buildbot\chromiumbuildbot.py" />
+    <Compile Include="common\net\buildbot\__init__.py" />
+    <Compile Include="common\net\credentials.py" />
+    <Compile Include="common\net\credentials_unittest.py" />
+    <Compile Include="common\net\failuremap.py" />
+    <Compile Include="common\net\failuremap_unittest.py" />
+    <Compile Include="common\net\file_uploader.py" />
+    <Compile Include="common\net\htdigestparser.py" />
+    <Compile Include="common\net\htdigestparser_unittest.py" />
+    <Compile Include="common\net\irc\ircbot.py" />
+    <Compile Include="common\net\irc\ircproxy.py" />
+    <Compile Include="common\net\irc\ircproxy_unittest.py" />
+    <Compile Include="common\net\irc\irc_mock.py" />
+    <Compile Include="common\net\irc\__init__.py" />
+    <Compile Include="common\net\layouttestresults.py" />
+    <Compile Include="common\net\layouttestresults_unittest.py" />
+    <Compile Include="common\net\networktransaction.py" />
+    <Compile Include="common\net\networktransaction_unittest.py" />
+    <Compile Include="common\net\omahaproxy.py" />
+    <Compile Include="common\net\omahaproxy_unittest.py" />
+    <Compile Include="common\net\regressionwindow.py" />
+    <Compile Include="common\net\resultsjsonparser.py" />
+    <Compile Include="common\net\resultsjsonparser_unittest.py" />
+    <Compile Include="common\net\statusserver.py" />
+    <Compile Include="common\net\statusserver_mock.py" />
+    <Compile Include="common\net\statusserver_unittest.py" />
+    <Compile Include="common\net\unittestresults.py" />
+    <Compile Include="common\net\unittestresults_unittest.py" />
+    <Compile Include="common\net\web.py" />
+    <Compile Include="common\net\web_mock.py" />
+    <Compile Include="common\net\__init__.py" />
+    <Compile Include="common\newstringio.py" />
+    <Compile Include="common\newstringio_unittest.py" />
+    <Compile Include="common\prettypatch.py" />
+    <Compile Include="common\prettypatch_unittest.py" />
+    <Compile Include="common\read_checksum_from_png.py" />
+    <Compile Include="common\read_checksum_from_png_unittest.py" />
+    <Compile Include="common\system\autoinstall.py" />
+    <Compile Include="common\system\crashlogs.py" />
+    <Compile Include="common\system\crashlogs_unittest.py" />
+    <Compile Include="common\system\deprecated_logging.py" />
+    <Compile Include="common\system\deprecated_logging_unittest.py" />
+    <Compile Include="common\system\environment.py" />
+    <Compile Include="common\system\environment_unittest.py" />
+    <Compile Include="common\system\executive.py" />
+    <Compile Include="common\system\executive_mock.py" />
+    <Compile Include="common\system\executive_unittest.py" />
+    <Compile Include="common\system\fileset.py" />
+    <Compile Include="common\system\filesystem.py" />
+    <Compile Include="common\system\filesystem_mock.py" />
+    <Compile Include="common\system\filesystem_mock_unittest.py" />
+    <Compile Include="common\system\filesystem_unittest.py" />
+    <Compile Include="common\system\file_lock.py" />
+    <Compile Include="common\system\file_lock_integrationtest.py" />
+    <Compile Include="common\system\logtesting.py" />
+    <Compile Include="common\system\logutils.py" />
+    <Compile Include="common\system\logutils_unittest.py" />
+    <Compile Include="common\system\outputcapture.py" />
+    <Compile Include="common\system\outputcapture_unittest.py" />
+    <Compile Include="common\system\path.py" />
+    <Compile Include="common\system\path_unittest.py" />
+    <Compile Include="common\system\platforminfo.py" />
+    <Compile Include="common\system\platforminfo_mock.py" />
+    <Compile Include="common\system\platforminfo_unittest.py" />
+    <Compile Include="common\system\stack_utils.py" />
+    <Compile Include="common\system\stack_utils_unittest.py" />
+    <Compile Include="common\system\systemhost.py" />
+    <Compile Include="common\system\systemhost_mock.py" />
+    <Compile Include="common\system\urlfetcher.py" />
+    <Compile Include="common\system\urlfetcher_mock.py" />
+    <Compile Include="common\system\user.py" />
+    <Compile Include="common\system\user_mock.py" />
+    <Compile Include="common\system\user_unittest.py" />
+    <Compile Include="common\system\workspace.py" />
+    <Compile Include="common\system\workspace_mock.py" />
+    <Compile Include="common\system\workspace_unittest.py" />
+    <Compile Include="common\system\zipfileset.py" />
+    <Compile Include="common\system\zipfileset_mock.py" />
+    <Compile Include="common\system\zipfileset_unittest.py" />
+    <Compile Include="common\system\zip_mock.py" />
+    <Compile Include="common\system\__init__.py" />
+    <Compile Include="common\thread\messagepump.py" />
+    <Compile Include="common\thread\messagepump_unittest.py" />
+    <Compile Include="common\thread\threadedmessagequeue.py" />
+    <Compile Include="common\thread\threadedmessagequeue_unittest.py" />
+    <Compile Include="common\thread\__init__.py" />
+    <Compile Include="common\version_check.py" />
+    <Compile Include="common\watchlist\amountchangedpattern.py" />
+    <Compile Include="common\watchlist\amountchangedpattern_unittest.py" />
+    <Compile Include="common\watchlist\changedlinepattern.py" />
+    <Compile Include="common\watchlist\changedlinepattern_unittest.py" />
+    <Compile Include="common\watchlist\filenamepattern.py" />
+    <Compile Include="common\watchlist\filenamepattern_unittest.py" />
+    <Compile Include="common\watchlist\watchlist.py" />
+    <Compile Include="common\watchlist\watchlistloader.py" />
+    <Compile Include="common\watchlist\watchlistloader_unittest.py" />
+    <Compile Include="common\watchlist\watchlistparser.py" />
+    <Compile Include="common\watchlist\watchlistparser_unittest.py" />
+    <Compile Include="common\watchlist\watchlistrule.py" />
+    <Compile Include="common\watchlist\watchlistrule_unittest.py" />
+    <Compile Include="common\watchlist\watchlist_mock.py" />
+    <Compile Include="common\watchlist\watchlist_unittest.py" />
+    <Compile Include="common\watchlist\__init__.py" />
+    <Compile Include="common\webkitunittest.py" />
+    <Compile Include="common\__init__.py" />
+    <Compile Include="layout_tests\controllers\manager.py" />
+    <Compile Include="layout_tests\controllers\manager_unittest.py" />
+    <Compile Include="layout_tests\controllers\single_test_runner.py" />
+    <Compile Include="layout_tests\controllers\test_expectations_editor.py" />
+    <Compile Include="layout_tests\controllers\test_expectations_editor_unittest.py" />
+    <Compile Include="layout_tests\controllers\test_result_writer.py" />
+    <Compile Include="layout_tests\controllers\test_result_writer_unittest.py" />
+    <Compile Include="layout_tests\controllers\worker.py" />
+    <Compile Include="layout_tests\controllers\__init__.py" />
+    <Compile Include="layout_tests\layout_package\json_layout_results_generator.py" />
+    <Compile Include="layout_tests\layout_package\json_results_generator.py" />
+    <Compile Include="layout_tests\layout_package\json_results_generator_unittest.py" />
+    <Compile Include="layout_tests\layout_package\__init__.py" />
+    <Compile Include="layout_tests\models\result_summary.py" />
+    <Compile Include="layout_tests\models\test_configuration.py" />
+    <Compile Include="layout_tests\models\test_configuration_unittest.py" />
+    <Compile Include="layout_tests\models\test_expectations.py" />
+    <Compile Include="layout_tests\models\test_expectations_unittest.py" />
+    <Compile Include="layout_tests\models\test_failures.py" />
+    <Compile Include="layout_tests\models\test_failures_unittest.py" />
+    <Compile Include="layout_tests\models\test_input.py" />
+    <Compile Include="layout_tests\models\test_results.py" />
+    <Compile Include="layout_tests\models\test_results_unittest.py" />
+    <Compile Include="layout_tests\models\__init__.py" />
+    <Compile Include="layout_tests\port\apple.py" />
+    <Compile Include="layout_tests\port\base.py" />
+    <Compile Include="layout_tests\port\base_unittest.py" />
+    <Compile Include="layout_tests\port\builders.py" />
+    <Compile Include="layout_tests\port\builders_unittest.py" />
+    <Compile Include="layout_tests\port\chromium.py" />
+    <Compile Include="layout_tests\port\chromium_android.py" />
+    <Compile Include="layout_tests\port\chromium_android_unittest.py" />
+    <Compile Include="layout_tests\port\chromium_linux.py" />
+    <Compile Include="layout_tests\port\chromium_linux_unittest.py" />
+    <Compile Include="layout_tests\port\chromium_mac.py" />
+    <Compile Include="layout_tests\port\chromium_mac_unittest.py" />
+    <Compile Include="layout_tests\port\chromium_port_testcase.py" />
+    <Compile Include="layout_tests\port\chromium_unittest.py" />
+    <Compile Include="layout_tests\port\chromium_win.py" />
+    <Compile Include="layout_tests\port\chromium_win_unittest.py" />
+    <Compile Include="layout_tests\port\config.py" />
+    <Compile Include="layout_tests\port\config_mock.py" />
+    <Compile Include="layout_tests\port\config_standalone.py" />
+    <Compile Include="layout_tests\port\config_unittest.py" />
+    <Compile Include="layout_tests\port\driver.py" />
+    <Compile Include="layout_tests\port\driver_unittest.py" />
+    <Compile Include="layout_tests\port\efl.py" />
+    <Compile Include="layout_tests\port\efl_unittest.py" />
+    <Compile Include="layout_tests\port\factory.py" />
+    <Compile Include="layout_tests\port\factory_unittest.py" />
+    <Compile Include="layout_tests\port\gtk.py" />
+    <Compile Include="layout_tests\port\gtk_unittest.py" />
+    <Compile Include="layout_tests\port\http_lock.py" />
+    <Compile Include="layout_tests\port\http_lock_unittest.py" />
+    <Compile Include="layout_tests\port\leakdetector.py" />
+    <Compile Include="layout_tests\port\leakdetector_unittest.py" />
+    <Compile Include="layout_tests\port\mac.py" />
+    <Compile Include="layout_tests\port\mac_unittest.py" />
+    <Compile Include="layout_tests\port\mock_drt.py" />
+    <Compile Include="layout_tests\port\mock_drt_unittest.py" />
+    <Compile Include="layout_tests\port\port_testcase.py" />
+    <Compile Include="layout_tests\port\pulseaudio_sanitizer.py" />
+    <Compile Include="layout_tests\port\qt.py" />
+    <Compile Include="layout_tests\port\qt_unittest.py" />
+    <Compile Include="layout_tests\port\server_process.py" />
+    <Compile Include="layout_tests\port\server_process_unittest.py" />
+    <Compile Include="layout_tests\port\test.py" />
+    <Compile Include="layout_tests\port\webkit.py" />
+    <Compile Include="layout_tests\port\webkit_unittest.py" />
+    <Compile Include="layout_tests\port\win.py" />
+    <Compile Include="layout_tests\port\win_unittest.py" />
+    <Compile Include="layout_tests\port\xvfbdriver.py" />
+    <Compile Include="layout_tests\port\__init__.py" />
+    <Compile Include="layout_tests\reftests\extract_reference_link.py" />
+    <Compile Include="layout_tests\reftests\extract_reference_link_unittest.py" />
+    <Compile Include="layout_tests\reftests\__init__.py" />
+    <Compile Include="layout_tests\run_webkit_tests.py" />
+    <Compile Include="layout_tests\run_webkit_tests_integrationtest.py" />
+    <Compile Include="layout_tests\servers\apache_http_server.py" />
+    <Compile Include="layout_tests\servers\apache_http_server_unittest.py" />
+    <Compile Include="layout_tests\servers\http_server.py" />
+    <Compile Include="layout_tests\servers\http_server_base.py" />
+    <Compile Include="layout_tests\servers\http_server_integrationtest.py" />
+    <Compile Include="layout_tests\servers\http_server_unittest.py" />
+    <Compile Include="layout_tests\servers\websocket_server.py" />
+    <Compile Include="layout_tests\servers\__init__.py" />
+    <Compile Include="layout_tests\views\metered_stream.py" />
+    <Compile Include="layout_tests\views\metered_stream_unittest.py" />
+    <Compile Include="layout_tests\views\printing.py" />
+    <Compile Include="layout_tests\views\printing_unittest.py" />
+    <Compile Include="layout_tests\views\__init__.py" />
+    <Compile Include="layout_tests\__init__.py" />
+    <Compile Include="performance_tests\perftest.py" />
+    <Compile Include="performance_tests\perftestsrunner.py" />
+    <Compile Include="performance_tests\perftestsrunner_unittest.py" />
+    <Compile Include="performance_tests\perftest_unittest.py" />
+    <Compile Include="performance_tests\__init__.py" />
+    <Compile Include="style\checker.py" />
+    <Compile Include="style\checkers\changelog.py" />
+    <Compile Include="style\checkers\changelog_unittest.py" />
+    <Compile Include="style\checkers\common.py" />
+    <Compile Include="style\checkers\common_unittest.py" />
+    <Compile Include="style\checkers\cpp.py" />
+    <Compile Include="style\checkers\cpp_unittest.py" />
+    <Compile Include="style\checkers\jsonchecker.py" />
+    <Compile Include="style\checkers\jsonchecker_unittest.py" />
+    <Compile Include="style\checkers\png.py" />
+    <Compile Include="style\checkers\png_unittest.py" />
+    <Compile Include="style\checkers\python.py" />
+    <Compile Include="style\checkers\python_unittest.py" />
+    <Compile Include="style\checkers\python_unittest_input.py" />
+    <Compile Include="style\checkers\test_expectations.py" />
+    <Compile Include="style\checkers\test_expectations_unittest.py" />
+    <Compile Include="style\checkers\text.py" />
+    <Compile Include="style\checkers\text_unittest.py" />
+    <Compile Include="style\checkers\watchlist.py" />
+    <Compile Include="style\checkers\watchlist_unittest.py" />
+    <Compile Include="style\checkers\xcodeproj.py" />
+    <Compile Include="style\checkers\xcodeproj_unittest.py" />
+    <Compile Include="style\checkers\xml.py" />
+    <Compile Include="style\checkers\xml_unittest.py" />
+    <Compile Include="style\checkers\__init__.py" />
+    <Compile Include="style\checker_unittest.py" />
+    <Compile Include="style\error_handlers.py" />
+    <Compile Include="style\error_handlers_unittest.py" />
+    <Compile Include="style\filereader.py" />
+    <Compile Include="style\filereader_unittest.py" />
+    <Compile Include="style\filter.py" />
+    <Compile Include="style\filter_unittest.py" />
+    <Compile Include="style\main.py" />
+    <Compile Include="style\main_unittest.py" />
+    <Compile Include="style\optparser.py" />
+    <Compile Include="style\optparser_unittest.py" />
+    <Compile Include="style\patchreader.py" />
+    <Compile Include="style\patchreader_unittest.py" />
+    <Compile Include="style\__init__.py" />
+    <Compile Include="test\finder.py" />
+    <Compile Include="test\finder_unittest.py" />
+    <Compile Include="test\main.py" />
+    <Compile Include="test\main_unittest.py" />
+    <Compile Include="test\printer.py" />
+    <Compile Include="test\runner.py" />
+    <Compile Include="test\runner_unittest.py" />
+    <Compile Include="test\skip.py" />
+    <Compile Include="test\skip_unittest.py" />
+    <Compile Include="test\__init__.py" />
+    <Compile Include="thirdparty\BeautifulSoup.py" />
+    <Compile Include="thirdparty\mock.py" />
+    <Compile Include="thirdparty\mod_pywebsocket\common.py" />
+    <Compile Include="thirdparty\mod_pywebsocket\dispatch.py" />
+    <Compile Include="thirdparty\mod_pywebsocket\extensions.py" />
+    <Compile Include="thirdparty\mod_pywebsocket\handshake\draft75.py" />
+    <Compile Include="thirdparty\mod_pywebsocket\handshake\hybi.py" />
+    <Compile Include="thirdparty\mod_pywebsocket\handshake\hybi00.py" />
+    <Compile Include="thirdparty\mod_pywebsocket\handshake\_base.py" />
+    <Compile Include="thirdparty\mod_pywebsocket\handshake\__init__.py" />
+    <Compile Include="thirdparty\mod_pywebsocket\headerparserhandler.py" />
+    <Compile Include="thirdparty\mod_pywebsocket\http_header_util.py" />
+    <Compile Include="thirdparty\mod_pywebsocket\memorizingfile.py" />
+    <Compile Include="thirdparty\mod_pywebsocket\msgutil.py" />
+    <Compile Include="thirdparty\mod_pywebsocket\standalone.py" />
+    <Compile Include="thirdparty\mod_pywebsocket\stream.py" />
+    <Compile Include="thirdparty\mod_pywebsocket\util.py" />
+    <Compile Include="thirdparty\mod_pywebsocket\_stream_base.py" />
+    <Compile Include="thirdparty\mod_pywebsocket\_stream_hixie75.py" />
+    <Compile Include="thirdparty\mod_pywebsocket\_stream_hybi.py" />
+    <Compile Include="thirdparty\mod_pywebsocket\__init__.py" />
+    <Compile Include="thirdparty\ordered_dict.py" />
+    <Compile Include="thirdparty\__init__.py" />
+    <Compile Include="thirdparty\__init___unittest.py" />
+    <Compile Include="tool\bot\botinfo.py" />
+    <Compile Include="tool\bot\botinfo_unittest.py" />
+    <Compile Include="tool\bot\commitqueuetask.py" />
+    <Compile Include="tool\bot\commitqueuetask_unittest.py" />
+    <Compile Include="tool\bot\earlywarningsystemtask.py" />
+    <Compile Include="tool\bot\expectedfailures.py" />
+    <Compile Include="tool\bot\expectedfailures_unittest.py" />
+    <Compile Include="tool\bot\feeders.py" />
+    <Compile Include="tool\bot\feeders_unittest.py" />
+    <Compile Include="tool\bot\flakytestreporter.py" />
+    <Compile Include="tool\bot\flakytestreporter_unittest.py" />
+    <Compile Include="tool\bot\irc_command.py" />
+    <Compile Include="tool\bot\irc_command_unittest.py" />
+    <Compile Include="tool\bot\ircbot.py" />
+    <Compile Include="tool\bot\ircbot_unittest.py" />
+    <Compile Include="tool\bot\layouttestresultsreader.py" />
+    <Compile Include="tool\bot\layouttestresultsreader_unittest.py" />
+    <Compile Include="tool\bot\patchanalysistask.py" />
+    <Compile Include="tool\bot\queueengine.py" />
+    <Compile Include="tool\bot\queueengine_unittest.py" />
+    <Compile Include="tool\bot\sheriff.py" />
+    <Compile Include="tool\bot\sheriff_unittest.py" />
+    <Compile Include="tool\bot\stylequeuetask.py" />
+    <Compile Include="tool\bot\__init__.py" />
+    <Compile Include="tool\commands\abstractlocalservercommand.py" />
+    <Compile Include="tool\commands\abstractsequencedcommand.py" />
+    <Compile Include="tool\commands\adduserstogroups.py" />
+    <Compile Include="tool\commands\analyzechangelog.py" />
+    <Compile Include="tool\commands\analyzechangelog_unittest.py" />
+    <Compile Include="tool\commands\applywatchlistlocal.py" />
+    <Compile Include="tool\commands\applywatchlistlocal_unittest.py" />
+    <Compile Include="tool\commands\bugfortest.py" />
+    <Compile Include="tool\commands\bugsearch.py" />
+    <Compile Include="tool\commands\chromechannels.py" />
+    <Compile Include="tool\commands\chromechannels_unittest.py" />
+    <Compile Include="tool\commands\commandtest.py" />
+    <Compile Include="tool\commands\download.py" />
+    <Compile Include="tool\commands\download_unittest.py" />
+    <Compile Include="tool\commands\earlywarningsystem.py" />
+    <Compile Include="tool\commands\earlywarningsystem_unittest.py" />
+    <Compile Include="tool\commands\expectations.py" />
+    <Compile Include="tool\commands\findusers.py" />
+    <Compile Include="tool\commands\gardenomatic.py" />
+    <Compile Include="tool\commands\openbugs.py" />
+    <Compile Include="tool\commands\openbugs_unittest.py" />
+    <Compile Include="tool\commands\prettydiff.py" />
+    <Compile Include="tool\commands\queries.py" />
+    <Compile Include="tool\commands\queries_unittest.py" />
+    <Compile Include="tool\commands\queues.py" />
+    <Compile Include="tool\commands\queuestest.py" />
+    <Compile Include="tool\commands\queues_unittest.py" />
+    <Compile Include="tool\commands\rebaseline.py" />
+    <Compile Include="tool\commands\rebaselineserver.py" />
+    <Compile Include="tool\commands\rebaseline_unittest.py" />
+    <Compile Include="tool\commands\roll.py" />
+    <Compile Include="tool\commands\roll_unittest.py" />
+    <Compile Include="tool\commands\sheriffbot.py" />
+    <Compile Include="tool\commands\sheriffbot_unittest.py" />
+    <Compile Include="tool\commands\stepsequence.py" />
+    <Compile Include="tool\commands\suggestnominations.py" />
+    <Compile Include="tool\commands\suggestnominations_unittest.py" />
+    <Compile Include="tool\commands\upload.py" />
+    <Compile Include="tool\commands\upload_unittest.py" />
+    <Compile Include="tool\commands\__init__.py" />
+    <Compile Include="tool\comments.py" />
+    <Compile Include="tool\grammar.py" />
+    <Compile Include="tool\grammar_unittest.py" />
+    <Compile Include="tool\main.py" />
+    <Compile Include="tool\mocktool.py" />
+    <Compile Include="tool\mocktool_unittest.py" />
+    <Compile Include="tool\multicommandtool.py" />
+    <Compile Include="tool\multicommandtool_unittest.py" />
+    <Compile Include="tool\servers\gardeningserver.py" />
+    <Compile Include="tool\servers\gardeningserver_unittest.py" />
+    <Compile Include="tool\servers\rebaselineserver.py" />
+    <Compile Include="tool\servers\rebaselineserver_unittest.py" />
+    <Compile Include="tool\servers\reflectionhandler.py" />
+    <Compile Include="tool\servers\__init__.py" />
+    <Compile Include="tool\steps\abstractstep.py" />
+    <Compile Include="tool\steps\addsvnmimetypeforpng.py" />
+    <Compile Include="tool\steps\addsvnmimetypeforpng_unittest.py" />
+    <Compile Include="tool\steps\applypatch.py" />
+    <Compile Include="tool\steps\applypatchwithlocalcommit.py" />
+    <Compile Include="tool\steps\applywatchlist.py" />
+    <Compile Include="tool\steps\applywatchlist_unittest.py" />
+    <Compile Include="tool\steps\attachtobug.py" />
+    <Compile Include="tool\steps\build.py" />
+    <Compile Include="tool\steps\checkstyle.py" />
+    <Compile Include="tool\steps\cleanworkingdirectory.py" />
+    <Compile Include="tool\steps\cleanworkingdirectorywithlocalcommits.py" />
+    <Compile Include="tool\steps\cleanworkingdirectory_unittest.py" />
+    <Compile Include="tool\steps\closebug.py" />
+    <Compile Include="tool\steps\closebugforlanddiff.py" />
+    <Compile Include="tool\steps\closebugforlanddiff_unittest.py" />
+    <Compile Include="tool\steps\closepatch.py" />
+    <Compile Include="tool\steps\commit.py" />
+    <Compile Include="tool\steps\commit_unittest.py" />
+    <Compile Include="tool\steps\confirmdiff.py" />
+    <Compile Include="tool\steps\createbug.py" />
+    <Compile Include="tool\steps\editchangelog.py" />
+    <Compile Include="tool\steps\ensurebugisopenandassigned.py" />
+    <Compile Include="tool\steps\ensurelocalcommitifneeded.py" />
+    <Compile Include="tool\steps\metastep.py" />
+    <Compile Include="tool\steps\obsoletepatches.py" />
+    <Compile Include="tool\steps\options.py" />
+    <Compile Include="tool\steps\postdiff.py" />
+    <Compile Include="tool\steps\postdiffforcommit.py" />
+    <Compile Include="tool\steps\postdiffforrevert.py" />
+    <Compile Include="tool\steps\preparechangelog.py" />
+    <Compile Include="tool\steps\preparechangelogfordepsroll.py" />
+    <Compile Include="tool\steps\preparechangelogforrevert.py" />
+    <Compile Include="tool\steps\preparechangelogforrevert_unittest.py" />
+    <Compile Include="tool\steps\preparechangelog_unittest.py" />
+    <Compile Include="tool\steps\promptforbugortitle.py" />
+    <Compile Include="tool\steps\reopenbugafterrollout.py" />
+    <Compile Include="tool\steps\revertrevision.py" />
+    <Compile Include="tool\steps\runtests.py" />
+    <Compile Include="tool\steps\runtests_unittest.py" />
+    <Compile Include="tool\steps\steps_unittest.py" />
+    <Compile Include="tool\steps\suggestreviewers.py" />
+    <Compile Include="tool\steps\suggestreviewers_unittest.py" />
+    <Compile Include="tool\steps\update.py" />
+    <Compile Include="tool\steps\updatechangelogswithreviewer.py" />
+    <Compile Include="tool\steps\updatechangelogswithreview_unittest.py" />
+    <Compile Include="tool\steps\updatechromiumdeps.py" />
+    <Compile Include="tool\steps\update_unittest.py" />
+    <Compile Include="tool\steps\validatechangelogs.py" />
+    <Compile Include="tool\steps\validatechangelogs_unittest.py" />
+    <Compile Include="tool\steps\validatereviewer.py" />
+    <Compile Include="tool\steps\__init__.py" />
+    <Compile Include="tool\__init__.py" />
+    <Compile Include="to_be_moved\update_webgl_conformance_tests.py" />
+    <Compile Include="to_be_moved\update_webgl_conformance_tests_unittest.py" />
+    <Compile Include="to_be_moved\__init__.py" />
+    <Compile Include="__init__.py" />
+  </ItemGroup>
+  <ItemGroup>
+    <Folder Include="bindings\" />
+    <Folder Include="common\" />
+    <Folder Include="common\checkout\" />
+    <Folder Include="common\checkout\scm\" />
+    <Folder Include="common\config\" />
+    <Folder Include="common\net\" />
+    <Folder Include="common\net\bugzilla\" />
+    <Folder Include="common\net\buildbot\" />
+    <Folder Include="common\net\irc\" />
+    <Folder Include="common\system\" />
+    <Folder Include="common\thread\" />
+    <Folder Include="common\watchlist\" />
+    <Folder Include="layout_tests\" />
+    <Folder Include="layout_tests\controllers\" />
+    <Folder Include="layout_tests\layout_package\" />
+    <Folder Include="layout_tests\models\" />
+    <Folder Include="layout_tests\port\" />
+    <Folder Include="layout_tests\reftests\" />
+    <Folder Include="layout_tests\servers\" />
+    <Folder Include="layout_tests\views\" />
+    <Folder Include="performance_tests\" />
+    <Folder Include="style\" />
+    <Folder Include="style\checkers\" />
+    <Folder Include="test\" />
+    <Folder Include="thirdparty\" />
+    <Folder Include="thirdparty\mod_pywebsocket\" />
+    <Folder Include="thirdparty\mod_pywebsocket\handshake\" />
+    <Folder Include="tool\" />
+    <Folder Include="tool\bot\" />
+    <Folder Include="tool\commands\" />
+    <Folder Include="tool\servers\" />
+    <Folder Include="tool\steps\" />
+    <Folder Include="to_be_moved\" />
+  </ItemGroup>
+  <Import Project="$(MSBuildToolsPath)\Microsoft.Common.targets" />
+</Project>
\ No newline at end of file
diff --git a/Tools/Scripts/webkitpy/webkitpy.sln b/Tools/Scripts/webkitpy/webkitpy.sln
new file mode 100644
index 0000000..7648387
--- /dev/null
+++ b/Tools/Scripts/webkitpy/webkitpy.sln
@@ -0,0 +1,18 @@
+
+Microsoft Visual Studio Solution File, Format Version 11.00
+# Visual Studio 2010
+Project("{888888A0-9F3D-457C-B088-3A5042F75D52}") = "webkitpy", "webkitpy.pyproj", "{59B0A791-93FE-40F8-A52B-BA19B73E8FA6}"
+EndProject
+Global
+	GlobalSection(SolutionConfigurationPlatforms) = preSolution
+		Debug|Any CPU = Debug|Any CPU
+		Release|Any CPU = Release|Any CPU
+	EndGlobalSection
+	GlobalSection(ProjectConfigurationPlatforms) = postSolution
+		{59B0A791-93FE-40F8-A52B-BA19B73E8FA6}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+		{59B0A791-93FE-40F8-A52B-BA19B73E8FA6}.Release|Any CPU.ActiveCfg = Release|Any CPU
+	EndGlobalSection
+	GlobalSection(SolutionProperties) = preSolution
+		HideSolutionNode = FALSE
+	EndGlobalSection
+EndGlobal