Merge from Chromium at DEPS revision r212014

This commit was generated by merge_to_master.py.

Change-Id: Ib07d03553a1e81485ac4ffc92b91e977c4875409
diff --git a/Tools/DumpRenderTree/DumpRenderTree.gyp/DumpRenderTree.gyp b/Tools/DumpRenderTree/DumpRenderTree.gyp/DumpRenderTree.gyp
index 962c12f..5c82d46 100644
--- a/Tools/DumpRenderTree/DumpRenderTree.gyp/DumpRenderTree.gyp
+++ b/Tools/DumpRenderTree/DumpRenderTree.gyp/DumpRenderTree.gyp
@@ -151,8 +151,8 @@
             'dependencies': [
                 '<(DEPTH)/net/net.gyp:net_resources',
                 '<(DEPTH)/ui/ui.gyp:ui_resources',
-                '<(DEPTH)/webkit/support/webkit_support.gyp:webkit_resources',
-                '<(DEPTH)/webkit/support/webkit_support.gyp:webkit_strings',
+                '<(DEPTH)/webkit/webkit_resources.gyp:webkit_resources',
+                '<(DEPTH)/webkit/webkit_resources.gyp:webkit_strings',
             ],
             'actions': [{
                 'action_name': 'repack_local',
diff --git a/Tools/DumpRenderTree/DumpRenderTree.gypi b/Tools/DumpRenderTree/DumpRenderTree.gypi
index fae5d1e..79f3ade 100644
--- a/Tools/DumpRenderTree/DumpRenderTree.gypi
+++ b/Tools/DumpRenderTree/DumpRenderTree.gypi
@@ -23,6 +23,8 @@
             'chromium/TestRunner/src/MockGrammarCheck.h',
             'chromium/TestRunner/src/MockSpellCheck.cpp',
             'chromium/TestRunner/src/MockSpellCheck.h',
+            'chromium/TestRunner/src/MockWebAudioDevice.cpp',
+            'chromium/TestRunner/src/MockWebAudioDevice.h',
             'chromium/TestRunner/src/MockWebMediaStreamCenter.cpp',
             'chromium/TestRunner/src/MockWebMediaStreamCenter.h',
             'chromium/TestRunner/src/MockWebMIDIAccessor.cpp',
diff --git a/Tools/DumpRenderTree/TestNetscapePlugIn/TestObject.cpp b/Tools/DumpRenderTree/TestNetscapePlugIn/TestObject.cpp
index ac2df7a..c731494 100644
--- a/Tools/DumpRenderTree/TestNetscapePlugIn/TestObject.cpp
+++ b/Tools/DumpRenderTree/TestNetscapePlugIn/TestObject.cpp
@@ -69,7 +69,6 @@
 typedef struct {
     NPObject header;
     NPObject* testObject;
-    NPP owner;
 } TestObject;
 
 static bool identifiersInitialized = false;
@@ -112,7 +111,6 @@
 {
     TestObject* newInstance = static_cast<TestObject*>(malloc(sizeof(TestObject)));
     newInstance->testObject = 0;
-    newInstance->owner = npp;
     ++testObjectCount;
 
     if (!identifiersInitialized) {
@@ -178,7 +176,7 @@
     if (name == testIdentifiers[ID_PROPERTY_TEST_OBJECT]) {
         TestObject* testObject = reinterpret_cast<TestObject*>(npobj);
         if (!testObject->testObject)
-            testObject->testObject = browser->createobject(testObject->owner, &testClass);
+            testObject->testObject = browser->createobject(0, &testClass);
         browser->retainobject(testObject->testObject);
         OBJECT_TO_NPVARIANT(testObject->testObject, *result);
         return true;
diff --git a/Tools/DumpRenderTree/TestNetscapePlugIn/main.cpp b/Tools/DumpRenderTree/TestNetscapePlugIn/main.cpp
index 730b842..8b9e5bf 100644
--- a/Tools/DumpRenderTree/TestNetscapePlugIn/main.cpp
+++ b/Tools/DumpRenderTree/TestNetscapePlugIn/main.cpp
@@ -46,8 +46,9 @@
     ((void(*)())0)(); /* More reliable, but doesn't say BBADBEEF */ \
 } while(false)
 
-static bool getEntryPointsWasCalled;
-static bool initializeWasCalled;
+static bool getEntryPointsWasCalled = false;
+static bool initializeWasCalled = false;
+static NPClass* pluginObjectClass = 0;
 
 #if defined(XP_WIN)
 #define STDCALL __stdcall
@@ -73,6 +74,9 @@
 #endif
                               )
 {
+    // Create a copy of the PluginObject NPClass that we can trash on shutdown.
+    pluginObjectClass = createPluginClass();
+
     initializeWasCalled = true;
 
 #if defined(XP_WIN)
@@ -125,6 +129,13 @@
 extern "C"
 void STDCALL NP_Shutdown(void)
 {
+    // Trash the PluginObject NPClass so that the process will deterministically
+    // crash if Blink tries to call into the plugin's NPObjects after unloading
+    // it, rather than relying on OS-specific DLL unload behaviour.
+    // Note that we leak the NPClass copy, to act as a guard for the lifetime of
+    // the process.
+    memset(pluginObjectClass, 0xf00dbeef, sizeof(NPClass));
+
     PluginTest::NP_Shutdown();
 }
 
@@ -176,7 +187,7 @@
      browser->setvalue(instance, NPPVpluginEventModel, (void *)eventModel);
 #endif // XP_MACOSX
 
-    PluginObject* obj = (PluginObject*)browser->createobject(instance, createPluginClass());
+    PluginObject* obj = (PluginObject*)browser->createobject(instance, pluginObjectClass);
     instance->pdata = obj;
 
 #ifdef XP_MACOSX
@@ -337,15 +348,7 @@
         if (obj->pluginTest)
             obj->pluginTest->NPP_Destroy(save);
 
-        // Save the object's class, release the object, then trash the class.
-        NPClass* scriptClass = obj->header._class;
         browser->releaseobject(&obj->header);
-
-        // FIXME: This breaks
-        // plugins/npruntime/delete-plugin-within-invoke.html on Mac, since the
-        // plugin object is released only after NPP_Destroy completes. Verify
-        // that that behaviour is sane, and move this to NPP_Shutdown if so.
-        // memset(scriptClass, 0xf00dbeef, sizeof(NPClass));
     }
     return NPERR_NO_ERROR;
 }
diff --git a/Tools/DumpRenderTree/chromium/TestRunner/src/AccessibilityControllerChromium.cpp b/Tools/DumpRenderTree/chromium/TestRunner/src/AccessibilityControllerChromium.cpp
index 5ec6147..4100457 100644
--- a/Tools/DumpRenderTree/chromium/TestRunner/src/AccessibilityControllerChromium.cpp
+++ b/Tools/DumpRenderTree/chromium/TestRunner/src/AccessibilityControllerChromium.cpp
@@ -143,7 +143,7 @@
         arguments[0].set(*element->getAsCppVariant());
         arguments[1].set(notificationName);
         CppVariant invokeResult;
-        m_notificationCallbacks[i].invokeDefault(npp(), arguments, 2, invokeResult);
+        m_notificationCallbacks[i].invokeDefault(arguments, 2, invokeResult);
     }
 }
 
diff --git a/Tools/DumpRenderTree/chromium/TestRunner/src/AccessibilityUIElementChromium.cpp b/Tools/DumpRenderTree/chromium/TestRunner/src/AccessibilityUIElementChromium.cpp
index 7baef64..1b34a2c 100644
--- a/Tools/DumpRenderTree/chromium/TestRunner/src/AccessibilityUIElementChromium.cpp
+++ b/Tools/DumpRenderTree/chromium/TestRunner/src/AccessibilityUIElementChromium.cpp
@@ -474,7 +474,7 @@
         CppVariant notificationNameArgument;
         notificationNameArgument.set(notificationName);
         CppVariant invokeResult;
-        m_notificationCallbacks[i].invokeDefault(npp(), &notificationNameArgument, 1, invokeResult);
+        m_notificationCallbacks[i].invokeDefault(&notificationNameArgument, 1, invokeResult);
     }
 }
 
diff --git a/Tools/DumpRenderTree/chromium/TestRunner/src/CppBoundClass.cpp b/Tools/DumpRenderTree/chromium/TestRunner/src/CppBoundClass.cpp
index 91fe8d6..37ca948 100644
--- a/Tools/DumpRenderTree/chromium/TestRunner/src/CppBoundClass.cpp
+++ b/Tools/DumpRenderTree/chromium/TestRunner/src/CppBoundClass.cpp
@@ -205,11 +205,6 @@
     return obj->boundClass->setProperty(ident, value);
 }
 
-CppBoundClass::CppBoundClass() : m_npp(new NPP_t)
-{
-    WebBindings::registerObjectOwner(npp());
-}
-
 CppBoundClass::~CppBoundClass()
 {
     for (MethodList::iterator i = m_methods.begin(); i != m_methods.end(); ++i)
@@ -218,7 +213,9 @@
     for (PropertyList::iterator i = m_properties.begin(); i != m_properties.end(); ++i)
         delete i->second;
 
-    WebBindings::unregisterObjectOwner(npp());
+    // Unregister ourselves if we were bound to a frame.
+    if (m_boundToFrame)
+        WebBindings::unregisterObject(NPVARIANT_TO_OBJECT(m_selfVariant));
 }
 
 bool CppBoundClass::hasMethod(NPIdentifier ident) const
@@ -337,8 +334,10 @@
 CppVariant* CppBoundClass::getAsCppVariant()
 {
     if (!m_selfVariant.isObject()) {
-        // Create an NPObject using our static NPClass.
-        NPObject* npObj = WebBindings::createObject(npp(), &CppNPObject::npClass);
+        // Create an NPObject using our static NPClass. The first argument (a
+        // plugin's instance handle) is passed through to the allocate function
+        // directly, and we don't use it, so it's ok to be 0.
+        NPObject* npObj = WebBindings::createObject(0, &CppNPObject::npClass);
         CppNPObject* obj = reinterpret_cast<CppNPObject*>(npObj);
         obj->boundClass = this;
         m_selfVariant.set(npObj);
@@ -352,9 +351,9 @@
 {
     // BindToWindowObject will take its own reference to the NPObject, and clean
     // up after itself. It will also (indirectly) register the object with V8,
-    // against an owner pointer we supply, so we must register as an owner,
-    // and unregister when we teardown.
-    frame->bindToWindowObject(classname, NPVARIANT_TO_OBJECT(*getAsCppVariant()));
+    // so we must remember this so we can unregister it when we're destroyed.
+    frame->bindToWindowObject(classname, NPVARIANT_TO_OBJECT(*getAsCppVariant()), 0);
+    m_boundToFrame = true;
 }
 
 }
diff --git a/Tools/DumpRenderTree/chromium/TestRunner/src/CppBoundClass.h b/Tools/DumpRenderTree/chromium/TestRunner/src/CppBoundClass.h
index 94188d1..952a41c 100644
--- a/Tools/DumpRenderTree/chromium/TestRunner/src/CppBoundClass.h
+++ b/Tools/DumpRenderTree/chromium/TestRunner/src/CppBoundClass.h
@@ -83,7 +83,7 @@
     // The constructor should call BindMethod, BindProperty, and
     // SetFallbackMethod as needed to set up the methods, properties, and
     // fallback method.
-    CppBoundClass();
+    CppBoundClass() : m_boundToFrame(false) { }
     virtual ~CppBoundClass();
 
     // Return a CppVariant representing this class, for use with BindProperty().
@@ -208,9 +208,6 @@
             bindFallbackCallback(std::auto_ptr<Callback>());
     }
 
-    // Returns the NPP value to pass to WebBindings calls.
-    NPP npp() { return m_npp.get(); }
-
     // Some fields are protected because some tests depend on accessing them,
     // but otherwise they should be considered private.
 
@@ -238,8 +235,9 @@
     // reference to this object, and it is released on deletion.
     CppVariant m_selfVariant;
 
-    // Dummy NPP to use to register as owner for NPObjects.
-    std::auto_ptr<struct _NPP> m_npp;
+    // True if our np_object has been bound to a WebFrame, in which case it must
+    // be unregistered with V8 when we delete it.
+    bool m_boundToFrame;
 
 private:
     CppBoundClass(CppBoundClass&);
diff --git a/Tools/DumpRenderTree/chromium/TestRunner/src/CppVariant.cpp b/Tools/DumpRenderTree/chromium/TestRunner/src/CppVariant.cpp
index ac8cad7..09545d3 100644
--- a/Tools/DumpRenderTree/chromium/TestRunner/src/CppVariant.cpp
+++ b/Tools/DumpRenderTree/chromium/TestRunner/src/CppVariant.cpp
@@ -295,27 +295,27 @@
     return stringVector;
 }
 
-bool CppVariant::invoke(NPP npp, const string& method, const CppVariant* arguments,
+bool CppVariant::invoke(const string& method, const CppVariant* arguments,
                         uint32_t argumentCount, CppVariant& result) const
 {
     WEBKIT_ASSERT(isObject());
     NPIdentifier methodName = WebBindings::getStringIdentifier(method.c_str());
     NPObject* npObject = value.objectValue;
-    if (!WebBindings::hasMethod(npp, npObject, methodName))
+    if (!WebBindings::hasMethod(0, npObject, methodName))
         return false;
     NPVariant r;
-    bool status = WebBindings::invoke(npp, npObject, methodName, arguments, argumentCount, &r);
+    bool status = WebBindings::invoke(0, npObject, methodName, arguments, argumentCount, &r);
     result.set(r);
     return status;
 }
 
-bool CppVariant::invokeDefault(NPP npp, const CppVariant* arguments, uint32_t argumentCount,
+bool CppVariant::invokeDefault(const CppVariant* arguments, uint32_t argumentCount,
                                CppVariant& result) const
 {
     WEBKIT_ASSERT(isObject());
     NPObject* npObject = value.objectValue;
     NPVariant r;
-    bool status = WebBindings::invokeDefault(npp, npObject, arguments, argumentCount, &r);
+    bool status = WebBindings::invokeDefault(0, npObject, arguments, argumentCount, &r);
     result.set(r);
     return status;
 }
diff --git a/Tools/DumpRenderTree/chromium/TestRunner/src/CppVariant.h b/Tools/DumpRenderTree/chromium/TestRunner/src/CppVariant.h
index 2240ff5..937e7fb 100644
--- a/Tools/DumpRenderTree/chromium/TestRunner/src/CppVariant.h
+++ b/Tools/DumpRenderTree/chromium/TestRunner/src/CppVariant.h
@@ -127,7 +127,7 @@
     // invoked. Returns whether the method was successfully invoked. If the
     // method was invoked successfully, any return value is stored in the
     // CppVariant specified by result.
-    bool invoke(NPP, const std::string&, const CppVariant* arguments,
+    bool invoke(const std::string&, const CppVariant* arguments,
                 uint32_t argumentCount, CppVariant& result) const;
 
     // Invoke an object's default method with the supplied arguments.
@@ -135,7 +135,7 @@
     // invoked. Returns whether the method was successfully invoked. If the
     // method was invoked successfully, any return value is stored in the
     // CppVariant specified by result.
-    bool invokeDefault(NPP, const CppVariant* arguments,
+    bool invokeDefault(const CppVariant* arguments,
                        uint32_t argumentCount, CppVariant& result) const;
 };
 
diff --git a/Tools/DumpRenderTree/chromium/TestRunner/src/MockWebAudioDevice.cpp b/Tools/DumpRenderTree/chromium/TestRunner/src/MockWebAudioDevice.cpp
new file mode 100644
index 0000000..8633e02
--- /dev/null
+++ b/Tools/DumpRenderTree/chromium/TestRunner/src/MockWebAudioDevice.cpp
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) 2013 Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *     * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "MockWebAudioDevice.h"
+
+using namespace WebKit;
+
+namespace WebTestRunner {
+
+MockWebAudioDevice::MockWebAudioDevice(double sampleRate)
+    : m_sampleRate(sampleRate)
+{
+}
+
+MockWebAudioDevice::~MockWebAudioDevice()
+{
+}
+
+void MockWebAudioDevice::start()
+{
+}
+
+void MockWebAudioDevice::stop()
+{
+}
+
+double MockWebAudioDevice::sampleRate()
+{
+    return m_sampleRate;
+}
+
+} // namespace WebTestRunner
diff --git a/Tools/DumpRenderTree/chromium/TestRunner/src/MockWebAudioDevice.h b/Tools/DumpRenderTree/chromium/TestRunner/src/MockWebAudioDevice.h
new file mode 100644
index 0000000..7801d48
--- /dev/null
+++ b/Tools/DumpRenderTree/chromium/TestRunner/src/MockWebAudioDevice.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2013 Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *     * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef MockWebAudioDevice_h
+#define MockWebAudioDevice_h
+
+#include "TestCommon.h"
+#include "public/platform/WebAudioDevice.h"
+
+namespace WebTestRunner {
+
+class MockWebAudioDevice : public WebKit::WebAudioDevice {
+public:
+    explicit MockWebAudioDevice(double sampleRate);
+    virtual ~MockWebAudioDevice();
+
+    virtual void start();
+    virtual void stop();
+    virtual double sampleRate();
+
+private:
+    double m_sampleRate;
+};
+
+} // namespace WebTestRunner
+
+#endif // MockWebAudioDevice_h
diff --git a/Tools/DumpRenderTree/chromium/TestRunner/src/TestPlugin.cpp b/Tools/DumpRenderTree/chromium/TestRunner/src/TestPlugin.cpp
index 09c8a1c..e16edfd 100644
--- a/Tools/DumpRenderTree/chromium/TestRunner/src/TestPlugin.cpp
+++ b/Tools/DumpRenderTree/chromium/TestRunner/src/TestPlugin.cpp
@@ -153,6 +153,9 @@
     , m_delegate(delegate)
     , m_container(0)
     , m_context(0)
+    , m_colorTexture(0)
+    , m_mailboxChanged(false)
+    , m_framebuffer(0)
     , m_touchEventRequest(WebPluginContainer::TouchEventRequestTypeNone)
     , m_reRequestTouchEvents(false)
     , m_printEventDetails(false)
@@ -227,6 +230,8 @@
 
 void TestPlugin::destroy()
 {
+    if (m_layer.get())
+        m_layer->clearTexture();
     if (m_container)
         m_container->setWebLayer(0);
     m_layer.reset();
@@ -263,8 +268,25 @@
 
     drawScene();
 
+    m_context->genMailboxCHROMIUM(m_mailbox.name);
+    m_context->produceTextureCHROMIUM(GL_TEXTURE_2D, m_mailbox.name);
+
     m_context->flush();
     m_layer->layer()->invalidate();
+    m_mailboxChanged = true;
+}
+
+bool TestPlugin::prepareMailbox(WebKit::WebExternalTextureMailbox* mailbox, WebKit::WebExternalBitmap*)
+{
+    if (!m_mailboxChanged)
+        return false;
+    *mailbox = m_mailbox;
+    m_mailboxChanged = false;
+    return true;
+}
+
+void TestPlugin::mailboxReleased(const WebKit::WebExternalTextureMailbox&)
+{
 }
 
 TestPlugin::Primitive TestPlugin::parsePrimitive(const WebString& string)
diff --git a/Tools/DumpRenderTree/chromium/TestRunner/src/TestPlugin.h b/Tools/DumpRenderTree/chromium/TestRunner/src/TestPlugin.h
index b9131b7..a71350a 100644
--- a/Tools/DumpRenderTree/chromium/TestRunner/src/TestPlugin.h
+++ b/Tools/DumpRenderTree/chromium/TestRunner/src/TestPlugin.h
@@ -28,6 +28,7 @@
 
 #include "public/platform/WebExternalTextureLayer.h"
 #include "public/platform/WebExternalTextureLayerClient.h"
+#include "public/platform/WebExternalTextureMailbox.h"
 #include "public/web/WebPlugin.h"
 #include "public/web/WebPluginContainer.h"
 #include <memory>
@@ -60,7 +61,6 @@
     virtual bool initialize(WebKit::WebPluginContainer*);
     virtual void destroy();
     virtual NPObject* scriptableObject() { return 0; }
-    virtual struct _NPP* pluginNPP() { return 0; }
     virtual bool canProcessDrag() const { return m_canProcessDrag; }
     virtual void paint(WebKit::WebCanvas*, const WebKit::WebRect&) { }
     virtual void updateGeometry(const WebKit::WebRect& frameRect, const WebKit::WebRect& clipRect, const WebKit::WebVector<WebKit::WebRect>& cutOutsRects, bool isVisible);
@@ -78,10 +78,9 @@
     virtual bool isPlaceholder() { return false; }
 
     // WebExternalTextureLayerClient methods:
-    virtual unsigned prepareTexture(WebKit::WebTextureUpdater&) { return m_colorTexture; }
-    virtual WebKit::WebGraphicsContext3D* context() { return m_context; }
-    virtual bool prepareMailbox(WebKit::WebExternalTextureMailbox*) { return false; };
-    virtual void mailboxReleased(const WebKit::WebExternalTextureMailbox&) { }
+    virtual WebKit::WebGraphicsContext3D* context() { return 0; }
+    virtual bool prepareMailbox(WebKit::WebExternalTextureMailbox*, WebKit::WebExternalBitmap*);
+    virtual void mailboxReleased(const WebKit::WebExternalTextureMailbox&);
 
 private:
     TestPlugin(WebKit::WebFrame*, const WebKit::WebPluginParams&, WebTestDelegate*);
@@ -138,6 +137,8 @@
     WebKit::WebRect m_rect;
     WebKit::WebGraphicsContext3D* m_context;
     unsigned m_colorTexture;
+    WebKit::WebExternalTextureMailbox m_mailbox;
+    bool m_mailboxChanged;
     unsigned m_framebuffer;
     Scene m_scene;
     std::auto_ptr<WebKit::WebExternalTextureLayer> m_layer;
diff --git a/Tools/DumpRenderTree/chromium/TestRunner/src/TestRunner.cpp b/Tools/DumpRenderTree/chromium/TestRunner/src/TestRunner.cpp
index 808d099..78a1c77 100644
--- a/Tools/DumpRenderTree/chromium/TestRunner/src/TestRunner.cpp
+++ b/Tools/DumpRenderTree/chromium/TestRunner/src/TestRunner.cpp
@@ -77,9 +77,8 @@
 
 class InvokeCallbackTask : public WebMethodTask<TestRunner> {
 public:
-    InvokeCallbackTask(TestRunner* object, NPP npp, auto_ptr<CppVariant> callbackArguments)
+    InvokeCallbackTask(TestRunner* object, auto_ptr<CppVariant> callbackArguments)
         : WebMethodTask<TestRunner>(object)
-        , m_npp(npp)
         , m_callbackArguments(callbackArguments)
     {
     }
@@ -87,11 +86,10 @@
     virtual void runIfValid()
     {
         CppVariant invokeResult;
-        m_callbackArguments->invokeDefault(m_npp, m_callbackArguments.get(), 1, invokeResult);
+        m_callbackArguments->invokeDefault(m_callbackArguments.get(), 1, invokeResult);
     }
 
 private:
-    NPP m_npp;
     auto_ptr<CppVariant> m_callbackArguments;
 };
 
@@ -201,7 +199,6 @@
     bindMethod("setFixedLayoutSize", &TestRunner::setFixedLayoutSize);
     bindMethod("selectionAsMarkup", &TestRunner::selectionAsMarkup);
     bindMethod("setTextSubpixelPositioning", &TestRunner::setTextSubpixelPositioning);
-    bindMethod("resetPageVisibility", &TestRunner::resetPageVisibility);
     bindMethod("setPageVisibility", &TestRunner::setPageVisibility);
     bindMethod("setTextDirection", &TestRunner::setTextDirection);
     bindMethod("textSurroundingNode", &TestRunner::textSurroundingNode);
@@ -296,6 +293,7 @@
     bindProperty("titleTextDirection", &m_titleTextDirection);
     bindProperty("platformName", &m_platformName);
     bindProperty("tooltipText", &m_tooltipText);
+    bindProperty("disableNotifyDone", &m_disableNotifyDone);
 
     // webHistoryItemCount is used by tests in LayoutTests\http\tests\history
     bindProperty("webHistoryItemCount", &m_webHistoryItemCount);
@@ -360,7 +358,9 @@
         m_webView->setSelectionColors(0xff1e90ff, 0xff000000, 0xffc8c8c8, 0xff323232);
 #endif
         m_webView->removeAllUserContent();
+        m_webView->setVisibilityState(WebPageVisibilityStateVisible, true);
     }
+
     m_topLoadingFrame = 0;
     m_waitUntilDone = false;
     m_policyDelegateEnabled = false;
@@ -417,6 +417,7 @@
     m_interceptPostMessage.set(false);
     m_platformName.set("chromium");
     m_tooltipText.set("");
+    m_disableNotifyDone.set(false);
 
     m_userStyleSheetLocation = WebURL();
 
@@ -753,6 +754,9 @@
 
 void TestRunner::notifyDone(const CppArgumentList&, CppVariant* result)
 {
+    if (m_disableNotifyDone.toBoolean())
+        return;
+
     // Test didn't timeout. Kill the timeout timer.
     taskList()->revokeAll();
 
@@ -1373,11 +1377,6 @@
     result->setNull();
 }
 
-void TestRunner::resetPageVisibility(const CppArgumentList& arguments, CppVariant* result)
-{
-    m_webView->setVisibilityState(WebPageVisibilityStateVisible, true);
-}
-
 void TestRunner::setPageVisibility(const CppArgumentList& arguments, CppVariant* result)
 {
     if (arguments.size() > 0 && arguments[0].isString()) {
@@ -1710,7 +1709,7 @@
     auto_ptr<CppVariant> callbackArguments(new CppVariant());
     callbackArguments->set(arguments[1]);
     result->setNull();
-    m_delegate->postTask(new InvokeCallbackTask(this, npp(), callbackArguments));
+    m_delegate->postTask(new InvokeCallbackTask(this, callbackArguments));
 }
 
 void TestRunner::setPOSIXLocale(const CppArgumentList& arguments, CppVariant* result)
diff --git a/Tools/DumpRenderTree/chromium/TestRunner/src/TestRunner.h b/Tools/DumpRenderTree/chromium/TestRunner/src/TestRunner.h
index ee3e9e4..5e21add 100644
--- a/Tools/DumpRenderTree/chromium/TestRunner/src/TestRunner.h
+++ b/Tools/DumpRenderTree/chromium/TestRunner/src/TestRunner.h
@@ -257,7 +257,6 @@
 
     // Switch the visibility of the page.
     void setPageVisibility(const CppArgumentList&, CppVariant*);
-    void resetPageVisibility(const CppArgumentList&, CppVariant*);
 
     // Changes the direction of the focused element.
     void setTextDirection(const CppArgumentList&, CppVariant*);
@@ -564,6 +563,12 @@
     // Bound variable to store the last tooltip text
     CppVariant m_tooltipText;
 
+    // Bound variable to disable notifyDone calls. This is used in GC leak
+    // tests, where existing LayoutTests are loaded within an iframe. The GC
+    // test harness will set this flag to ignore the notifyDone calls from the
+    // target LayoutTest.
+    CppVariant m_disableNotifyDone;
+
     // If true, the test_shell will write a descriptive line for each editing
     // command.
     bool m_dumpEditingCallbacks;
diff --git a/Tools/DumpRenderTree/chromium/TestRunner/src/WebTestInterfaces.cpp b/Tools/DumpRenderTree/chromium/TestRunner/src/WebTestInterfaces.cpp
index 1ad8824..5f34563 100644
--- a/Tools/DumpRenderTree/chromium/TestRunner/src/WebTestInterfaces.cpp
+++ b/Tools/DumpRenderTree/chromium/TestRunner/src/WebTestInterfaces.cpp
@@ -30,6 +30,7 @@
 
 #include "public/testing/WebTestInterfaces.h"
 
+#include "MockWebAudioDevice.h"
 #include "MockWebMIDIAccessor.h"
 #include "MockWebMediaStreamCenter.h"
 #include "MockWebRTCPeerConnectionHandler.h"
@@ -109,4 +110,9 @@
     return new MockWebMIDIAccessor(client);
 }
 
+WebAudioDevice* WebTestInterfaces::createAudioDevice(double sampleRate)
+{
+    return new MockWebAudioDevice(sampleRate);
+}
+
 }
diff --git a/Tools/DumpRenderTree/chromium/TestRunner/src/WebTestProxy.cpp b/Tools/DumpRenderTree/chromium/TestRunner/src/WebTestProxy.cpp
index 486cf83..0610585 100644
--- a/Tools/DumpRenderTree/chromium/TestRunner/src/WebTestProxy.cpp
+++ b/Tools/DumpRenderTree/chromium/TestRunner/src/WebTestProxy.cpp
@@ -1151,25 +1151,6 @@
     return 0 < m_chooserCount;
 }
 
-void WebTestProxyBase::willPerformClientRedirect(WebFrame* frame, const WebURL&, const WebURL& to, double, double)
-{
-    if (m_testInterfaces->testRunner()->shouldDumpFrameLoadCallbacks()) {
-        printFrameDescription(m_delegate, frame);
-        m_delegate->printMessage(string(" - willPerformClientRedirectToURL: ") + to.spec().data() + " \n");
-    }
-
-    if (m_testInterfaces->testRunner()->shouldDumpUserGestureInFrameLoadCallbacks())
-        printFrameUserGestureStatus(m_delegate, frame, " - in willPerformClientRedirect\n");
-}
-
-void WebTestProxyBase::didCancelClientRedirect(WebFrame* frame)
-{
-    if (m_testInterfaces->testRunner()->shouldDumpFrameLoadCallbacks()) {
-        printFrameDescription(m_delegate, frame);
-        m_delegate->printMessage(" - didCancelClientRedirectForFrame\n");
-    }
-}
-
 void WebTestProxyBase::didStartProvisionalLoad(WebFrame* frame)
 {
     if (!m_testInterfaces->testRunner()->topLoadingFrame())
diff --git a/Tools/GardeningServer/scripts/base.js b/Tools/GardeningServer/scripts/base.js
index fb05c76..d96c3b8 100644
--- a/Tools/GardeningServer/scripts/base.js
+++ b/Tools/GardeningServer/scripts/base.js
@@ -27,34 +27,6 @@
 
 (function(){
 
-// Safari 5.1 lacks Function.prototype.bind.
-if (!('bind' in Function.prototype)) {
-    Function.prototype.bind = function(thisObject) {
-        var method = this;
-        var boundArguments = [];
-        for (var i = 1; i < arguments.length; ++i) {
-            boundArguments.push(arguments[i]);
-        }
-        return function() {
-            var actualParameters = [];
-            for (var i = 0; i < boundArguments.length; ++i) {
-                actualParameters.push(boundArguments[i]);
-            }
-            for (var i = 0; i < arguments.length; ++i) {
-                actualParameters.push(arguments[i]);
-            }
-            return method.apply(thisObject, actualParameters);
-        }
-    }
-}
-
-base.asInteger = function(stringOrInteger)
-{
-    if (typeof stringOrInteger == 'string')
-        return parseInt(stringOrInteger);
-    return stringOrInteger;
-};
-
 base.endsWith = function(string, suffix)
 {
     if (suffix.length > string.length)
@@ -63,11 +35,6 @@
     return string.lastIndexOf(suffix) == expectedIndex;
 };
 
-base.repeatString = function(string, count)
-{
-    return new Array(count + 1).join(string);
-};
-
 base.joinPath = function(parent, child)
 {
     if (parent.length == 0)
@@ -221,58 +188,6 @@
     });
 };
 
-base.callInSequence = function(func, argumentList, callback)
-{
-    var nextIndex = 0;
-
-    function callNext()
-    {
-        if (nextIndex >= argumentList.length) {
-            callback();
-            return;
-        }
-
-        func(argumentList[nextIndex++], callNext);
-    }
-
-    callNext();
-};
-
-base.CallbackIterator = function(callback, listOfArgumentArrays)
-{
-    this._callback = callback;
-    this._nextIndex = 0;
-    this._listOfArgumentArrays = listOfArgumentArrays;
-};
-
-base.CallbackIterator.prototype.hasNext = function()
-{
-    return this._nextIndex < this._listOfArgumentArrays.length;
-};
-
-base.CallbackIterator.prototype.hasPrevious = function()
-{
-    return this._nextIndex - 2 >= 0;
-};
-
-base.CallbackIterator.prototype.callNext = function()
-{
-    if (!this.hasNext())
-        return;
-    var args = this._listOfArgumentArrays[this._nextIndex];
-    this._nextIndex++;
-    this._callback.apply(null, args);
-};
-
-base.CallbackIterator.prototype.callPrevious = function()
-{
-    if (!this.hasPrevious())
-        return;
-    var args = this._listOfArgumentArrays[this._nextIndex - 2];
-    this._nextIndex--;
-    this._callback.apply(null, args);
-};
-
 base.AsynchronousCache = function(fetch)
 {
     this._fetch = fetch;
diff --git a/Tools/GardeningServer/scripts/base_unittests.js b/Tools/GardeningServer/scripts/base_unittests.js
index d5d182c..3a69d0a 100644
--- a/Tools/GardeningServer/scripts/base_unittests.js
+++ b/Tools/GardeningServer/scripts/base_unittests.js
@@ -27,22 +27,6 @@
 
 module("base");
 
-test("bind", 3, function() {
-    function func(a, b) {
-        equals(this.prop, 5);
-        equals(a, "banana");
-        deepEqual(b, [2, 3, 4]);
-    }
-
-    var thisObject = {
-        "prop": 5
-    };
-
-    var bound = func.bind(thisObject, "banana");
-    bound([2, 3, 4]);
-});
-
-
 test("joinPath", 1, function() {
     var value = base.joinPath("path/to", "test.html");
     equals(value, "path/to/test.html");
@@ -122,20 +106,6 @@
     })
 });
 
-test("callInSequence", 7, function() {
-    var expectedArg = 42;
-    var expectCompletionCallback = true;
-
-    base.callInSequence(function(arg, callback) {
-        ok(arg < 45);
-        equals(arg, expectedArg++);
-        callback();
-    }, [42, 43, 44], function() {
-        ok(expectCompletionCallback);
-        expectCompletionCallback = false;
-    })
-});
-
 test("RequestTracker", 5, function() {
     var ready = false;
     var tracker = new base.RequestTracker(1, function() {
@@ -165,38 +135,6 @@
     ok(true);
 });
 
-test("CallbackIterator", 22, function() {
-    var expected = 0;
-    var iterator = new base.CallbackIterator(function(a, b) {
-        equals(a, 'ArgA' + expected);
-        equals(b, 'ArgB' + expected);
-        ++expected;
-    }, [
-        ['ArgA0', 'ArgB0'],
-        ['ArgA1', 'ArgB1'],
-        ['ArgA2', 'ArgB2'],
-    ]);
-    ok(iterator.hasNext())
-    ok(!iterator.hasPrevious())
-    iterator.callNext();
-    ok(iterator.hasNext())
-    ok(!iterator.hasPrevious())
-    iterator.callNext();
-    ok(iterator.hasNext())
-    ok(iterator.hasPrevious())
-    iterator.callNext();
-    ok(!iterator.hasNext())
-    ok(iterator.hasPrevious())
-    expected = 1;
-    iterator.callPrevious();
-    ok(iterator.hasNext())
-    ok(iterator.hasPrevious())
-    expected = 0;
-    iterator.callPrevious();
-    ok(iterator.hasNext())
-    ok(!iterator.hasPrevious())
-});
-
 test("filterDictionary", 3, function() {
     var dictionary = {
         'foo': 43,
diff --git a/Tools/GardeningServer/scripts/builders_unittests.js b/Tools/GardeningServer/scripts/builders_unittests.js
index c80a584..ff4bf9a 100644
--- a/Tools/GardeningServer/scripts/builders_unittests.js
+++ b/Tools/GardeningServer/scripts/builders_unittests.js
@@ -90,7 +90,7 @@
 var kExampleBuildInfoJSON = {
     "blame": ["abarth@webkit.org"],
     "builderName": "WebKit Linux",
-    "changes": ["Files:\n Tools/BuildSlaveSupport/build.webkit.org-config/public_html/TestFailures/main.js\n Tools/ChangeLog\nAt: Thu 04 Aug 2011 00:50:38\nChanged By: abarth@webkit.org\nComments: Fix types.  Sadly, main.js has no test coverage.  (I need to think\nabout how to test this part of the code.)\n\n* BuildSlaveSupport/build.webkit.org-config/public_html/TestFailures/main.js:Properties: \n\n\n", "Files:\n LayoutTests/ChangeLog\n LayoutTests/platform/chromium-mac/fast/box-shadow/inset-box-shadows-expected.png\n LayoutTests/platform/chromium-mac/fast/repaint/shadow-multiple-horizontal-expected.png\n LayoutTests/platform/chromium-mac/fast/repaint/shadow-multiple-strict-horizontal-expected.png\n LayoutTests/platform/chromium-mac/fast/repaint/shadow-multiple-strict-vertical-expected.png\n LayoutTests/platform/chromium-mac/fast/repaint/shadow-multiple-vertical-expected.png\nAt: Thu 04 Aug 2011 00:50:38\nChanged By: abarth@webkit.org\nComments: Update baselines after <http://trac.webkit.org/changeset/92340>.\n\n* platform/chromium-mac/fast/box-shadow/inset-box-shadows-expected.png:\n* platform/chromium-mac/fast/repaint/shadow-multiple-horizontal-expected.png:\n* platform/chromium-mac/fast/repaint/shadow-multiple-strict-horizontal-expected.png:\n* platform/chromium-mac/fast/repaint/shadow-multiple-strict-vertical-expected.png:\n* platform/chromium-mac/fast/repaint/shadow-multiple-vertical-expected.png:Properties: \n\n\n"],
+    "changes": ["Files:\n Tools/BuildSlaveSupport/build.webkit.org-config/public_html/TestFailures/main.js\n Tools/ChangeLog\nAt: Thu 04 Aug 2011 00:50:38\nChanged By: abarth@webkit.org\nComments: Fix types.  Sadly, main.js has no test coverage.  (I need to think\nabout how to test this part of the code.)\n\n* BuildSlaveSupport/build.webkit.org-config/public_html/TestFailures/main.js:Properties: \n\n\n", "Files:\n LayoutTests/ChangeLog\n LayoutTests/platform/mac/fast/box-shadow/inset-box-shadows-expected.png\n LayoutTests/platform/mac/fast/repaint/shadow-multiple-horizontal-expected.png\n LayoutTests/platform/mac/fast/repaint/shadow-multiple-strict-horizontal-expected.png\n LayoutTests/platform/mac/fast/repaint/shadow-multiple-strict-vertical-expected.png\n LayoutTests/platform/mac/fast/repaint/shadow-multiple-vertical-expected.png\nAt: Thu 04 Aug 2011 00:50:38\nChanged By: abarth@webkit.org\nComments: Update baselines after <http://trac.webkit.org/changeset/92340>.\n\n* platform/mac/fast/box-shadow/inset-box-shadows-expected.png:\n* platform/mac/fast/repaint/shadow-multiple-horizontal-expected.png:\n* platform/mac/fast/repaint/shadow-multiple-strict-horizontal-expected.png:\n* platform/mac/fast/repaint/shadow-multiple-strict-vertical-expected.png:\n* platform/mac/fast/repaint/shadow-multiple-vertical-expected.png:Properties: \n\n\n"],
     "currentStep": null,
     "eta": null,
     "logs": [
@@ -139,8 +139,8 @@
             }, {
                 "branch": "trunk",
                 "category": null,
-                "comments": "Update baselines after <http://trac.webkit.org/changeset/92340>.\n\n* platform/chromium-mac/fast/box-shadow/inset-box-shadows-expected.png:\n* platform/chromium-mac/fast/repaint/shadow-multiple-horizontal-expected.png:\n* platform/chromium-mac/fast/repaint/shadow-multiple-strict-horizontal-expected.png:\n* platform/chromium-mac/fast/repaint/shadow-multiple-strict-vertical-expected.png:\n* platform/chromium-mac/fast/repaint/shadow-multiple-vertical-expected.png:",
-                "files": ["LayoutTests/ChangeLog", "LayoutTests/platform/chromium-mac/fast/box-shadow/inset-box-shadows-expected.png", "LayoutTests/platform/chromium-mac/fast/repaint/shadow-multiple-horizontal-expected.png", "LayoutTests/platform/chromium-mac/fast/repaint/shadow-multiple-strict-horizontal-expected.png", "LayoutTests/platform/chromium-mac/fast/repaint/shadow-multiple-strict-vertical-expected.png", "LayoutTests/platform/chromium-mac/fast/repaint/shadow-multiple-vertical-expected.png"],
+                "comments": "Update baselines after <http://trac.webkit.org/changeset/92340>.\n\n* platform/mac/fast/box-shadow/inset-box-shadows-expected.png:\n* platform/mac/fast/repaint/shadow-multiple-horizontal-expected.png:\n* platform/mac/fast/repaint/shadow-multiple-strict-horizontal-expected.png:\n* platform/mac/fast/repaint/shadow-multiple-strict-vertical-expected.png:\n* platform/mac/fast/repaint/shadow-multiple-vertical-expected.png:",
+                "files": ["LayoutTests/ChangeLog", "LayoutTests/platform/mac/fast/box-shadow/inset-box-shadows-expected.png", "LayoutTests/platform/mac/fast/repaint/shadow-multiple-horizontal-expected.png", "LayoutTests/platform/mac/fast/repaint/shadow-multiple-strict-horizontal-expected.png", "LayoutTests/platform/mac/fast/repaint/shadow-multiple-strict-vertical-expected.png", "LayoutTests/platform/mac/fast/repaint/shadow-multiple-vertical-expected.png"],
                 "number": 43708,
                 "properties": [],
                 "repository": "",
@@ -173,8 +173,8 @@
         }, {
             "branch": "trunk",
             "category": null,
-            "comments": "Update baselines after <http://trac.webkit.org/changeset/92340>.\n\n* platform/chromium-mac/fast/box-shadow/inset-box-shadows-expected.png:\n* platform/chromium-mac/fast/repaint/shadow-multiple-horizontal-expected.png:\n* platform/chromium-mac/fast/repaint/shadow-multiple-strict-horizontal-expected.png:\n* platform/chromium-mac/fast/repaint/shadow-multiple-strict-vertical-expected.png:\n* platform/chromium-mac/fast/repaint/shadow-multiple-vertical-expected.png:",
-            "files": ["LayoutTests/ChangeLog", "LayoutTests/platform/chromium-mac/fast/box-shadow/inset-box-shadows-expected.png", "LayoutTests/platform/chromium-mac/fast/repaint/shadow-multiple-horizontal-expected.png", "LayoutTests/platform/chromium-mac/fast/repaint/shadow-multiple-strict-horizontal-expected.png", "LayoutTests/platform/chromium-mac/fast/repaint/shadow-multiple-strict-vertical-expected.png", "LayoutTests/platform/chromium-mac/fast/repaint/shadow-multiple-vertical-expected.png"],
+            "comments": "Update baselines after <http://trac.webkit.org/changeset/92340>.\n\n* platform/mac/fast/box-shadow/inset-box-shadows-expected.png:\n* platform/mac/fast/repaint/shadow-multiple-horizontal-expected.png:\n* platform/mac/fast/repaint/shadow-multiple-strict-horizontal-expected.png:\n* platform/mac/fast/repaint/shadow-multiple-strict-vertical-expected.png:\n* platform/mac/fast/repaint/shadow-multiple-vertical-expected.png:",
+            "files": ["LayoutTests/ChangeLog", "LayoutTests/platform/mac/fast/box-shadow/inset-box-shadows-expected.png", "LayoutTests/platform/mac/fast/repaint/shadow-multiple-horizontal-expected.png", "LayoutTests/platform/mac/fast/repaint/shadow-multiple-strict-horizontal-expected.png", "LayoutTests/platform/mac/fast/repaint/shadow-multiple-strict-vertical-expected.png", "LayoutTests/platform/mac/fast/repaint/shadow-multiple-vertical-expected.png"],
             "number": 43708,
             "properties": [],
             "repository": "",
@@ -715,7 +715,7 @@
 var kExamplePerfBuildInfoJSON = {
     "blame": ["abarth@webkit.org"],
     "builderName": "Mac10.6 Perf",
-    "changes": ["Files:\n Tools/BuildSlaveSupport/build.webkit.org-config/public_html/TestFailures/main.js\n Tools/ChangeLog\nAt: Thu 04 Aug 2011 00:50:38\nChanged By: abarth@webkit.org\nComments: Fix types.  Sadly, main.js has no test coverage.  (I need to think\nabout how to test this part of the code.)\n\n* BuildSlaveSupport/build.webkit.org-config/public_html/TestFailures/main.js:Properties: \n\n\n", "Files:\n LayoutTests/ChangeLog\n LayoutTests/platform/chromium-mac/fast/box-shadow/inset-box-shadows-expected.png\n LayoutTests/platform/chromium-mac/fast/repaint/shadow-multiple-horizontal-expected.png\n LayoutTests/platform/chromium-mac/fast/repaint/shadow-multiple-strict-horizontal-expected.png\n LayoutTests/platform/chromium-mac/fast/repaint/shadow-multiple-strict-vertical-expected.png\n LayoutTests/platform/chromium-mac/fast/repaint/shadow-multiple-vertical-expected.png\nAt: Thu 04 Aug 2011 00:50:38\nChanged By: abarth@webkit.org\nComments: Update baselines after <http://trac.webkit.org/changeset/92340>.\n\n* platform/chromium-mac/fast/box-shadow/inset-box-shadows-expected.png:\n* platform/chromium-mac/fast/repaint/shadow-multiple-horizontal-expected.png:\n* platform/chromium-mac/fast/repaint/shadow-multiple-strict-horizontal-expected.png:\n* platform/chromium-mac/fast/repaint/shadow-multiple-strict-vertical-expected.png:\n* platform/chromium-mac/fast/repaint/shadow-multiple-vertical-expected.png:Properties: \n\n\n"],
+    "changes": ["Files:\n Tools/BuildSlaveSupport/build.webkit.org-config/public_html/TestFailures/main.js\n Tools/ChangeLog\nAt: Thu 04 Aug 2011 00:50:38\nChanged By: abarth@webkit.org\nComments: Fix types.  Sadly, main.js has no test coverage.  (I need to think\nabout how to test this part of the code.)\n\n* BuildSlaveSupport/build.webkit.org-config/public_html/TestFailures/main.js:Properties: \n\n\n", "Files:\n LayoutTests/ChangeLog\n LayoutTests/platform/mac/fast/box-shadow/inset-box-shadows-expected.png\n LayoutTests/platform/mac/fast/repaint/shadow-multiple-horizontal-expected.png\n LayoutTests/platform/mac/fast/repaint/shadow-multiple-strict-horizontal-expected.png\n LayoutTests/platform/mac/fast/repaint/shadow-multiple-strict-vertical-expected.png\n LayoutTests/platform/mac/fast/repaint/shadow-multiple-vertical-expected.png\nAt: Thu 04 Aug 2011 00:50:38\nChanged By: abarth@webkit.org\nComments: Update baselines after <http://trac.webkit.org/changeset/92340>.\n\n* platform/mac/fast/box-shadow/inset-box-shadows-expected.png:\n* platform/mac/fast/repaint/shadow-multiple-horizontal-expected.png:\n* platform/mac/fast/repaint/shadow-multiple-strict-horizontal-expected.png:\n* platform/mac/fast/repaint/shadow-multiple-strict-vertical-expected.png:\n* platform/mac/fast/repaint/shadow-multiple-vertical-expected.png:Properties: \n\n\n"],
     "currentStep": null,
     "eta": null,
     "logs": [
@@ -764,8 +764,8 @@
             }, {
                 "branch": "trunk",
                 "category": null,
-                "comments": "Update baselines after <http://trac.webkit.org/changeset/92340>.\n\n* platform/chromium-mac/fast/box-shadow/inset-box-shadows-expected.png:\n* platform/chromium-mac/fast/repaint/shadow-multiple-horizontal-expected.png:\n* platform/chromium-mac/fast/repaint/shadow-multiple-strict-horizontal-expected.png:\n* platform/chromium-mac/fast/repaint/shadow-multiple-strict-vertical-expected.png:\n* platform/chromium-mac/fast/repaint/shadow-multiple-vertical-expected.png:",
-                "files": ["LayoutTests/ChangeLog", "LayoutTests/platform/chromium-mac/fast/box-shadow/inset-box-shadows-expected.png", "LayoutTests/platform/chromium-mac/fast/repaint/shadow-multiple-horizontal-expected.png", "LayoutTests/platform/chromium-mac/fast/repaint/shadow-multiple-strict-horizontal-expected.png", "LayoutTests/platform/chromium-mac/fast/repaint/shadow-multiple-strict-vertical-expected.png", "LayoutTests/platform/chromium-mac/fast/repaint/shadow-multiple-vertical-expected.png"],
+                "comments": "Update baselines after <http://trac.webkit.org/changeset/92340>.\n\n* platform/mac/fast/box-shadow/inset-box-shadows-expected.png:\n* platform/mac/fast/repaint/shadow-multiple-horizontal-expected.png:\n* platform/mac/fast/repaint/shadow-multiple-strict-horizontal-expected.png:\n* platform/mac/fast/repaint/shadow-multiple-strict-vertical-expected.png:\n* platform/mac/fast/repaint/shadow-multiple-vertical-expected.png:",
+                "files": ["LayoutTests/ChangeLog", "LayoutTests/platform/mac/fast/box-shadow/inset-box-shadows-expected.png", "LayoutTests/platform/mac/fast/repaint/shadow-multiple-horizontal-expected.png", "LayoutTests/platform/mac/fast/repaint/shadow-multiple-strict-horizontal-expected.png", "LayoutTests/platform/mac/fast/repaint/shadow-multiple-strict-vertical-expected.png", "LayoutTests/platform/mac/fast/repaint/shadow-multiple-vertical-expected.png"],
                 "number": 43708,
                 "properties": [],
                 "repository": "",
@@ -798,8 +798,8 @@
         }, {
             "branch": "trunk",
             "category": null,
-            "comments": "Update baselines after <http://trac.webkit.org/changeset/92340>.\n\n* platform/chromium-mac/fast/box-shadow/inset-box-shadows-expected.png:\n* platform/chromium-mac/fast/repaint/shadow-multiple-horizontal-expected.png:\n* platform/chromium-mac/fast/repaint/shadow-multiple-strict-horizontal-expected.png:\n* platform/chromium-mac/fast/repaint/shadow-multiple-strict-vertical-expected.png:\n* platform/chromium-mac/fast/repaint/shadow-multiple-vertical-expected.png:",
-            "files": ["LayoutTests/ChangeLog", "LayoutTests/platform/chromium-mac/fast/box-shadow/inset-box-shadows-expected.png", "LayoutTests/platform/chromium-mac/fast/repaint/shadow-multiple-horizontal-expected.png", "LayoutTests/platform/chromium-mac/fast/repaint/shadow-multiple-strict-horizontal-expected.png", "LayoutTests/platform/chromium-mac/fast/repaint/shadow-multiple-strict-vertical-expected.png", "LayoutTests/platform/chromium-mac/fast/repaint/shadow-multiple-vertical-expected.png"],
+            "comments": "Update baselines after <http://trac.webkit.org/changeset/92340>.\n\n* platform/mac/fast/box-shadow/inset-box-shadows-expected.png:\n* platform/mac/fast/repaint/shadow-multiple-horizontal-expected.png:\n* platform/mac/fast/repaint/shadow-multiple-strict-horizontal-expected.png:\n* platform/mac/fast/repaint/shadow-multiple-strict-vertical-expected.png:\n* platform/mac/fast/repaint/shadow-multiple-vertical-expected.png:",
+            "files": ["LayoutTests/ChangeLog", "LayoutTests/platform/mac/fast/box-shadow/inset-box-shadows-expected.png", "LayoutTests/platform/mac/fast/repaint/shadow-multiple-horizontal-expected.png", "LayoutTests/platform/mac/fast/repaint/shadow-multiple-strict-horizontal-expected.png", "LayoutTests/platform/mac/fast/repaint/shadow-multiple-strict-vertical-expected.png", "LayoutTests/platform/mac/fast/repaint/shadow-multiple-vertical-expected.png"],
             "number": 43708,
             "properties": [],
             "repository": "",
diff --git a/Tools/ImageDiff/efl/ImageDiff.cpp b/Tools/ImageDiff/efl/ImageDiff.cpp
deleted file mode 100644
index 129b6ca..0000000
--- a/Tools/ImageDiff/efl/ImageDiff.cpp
+++ /dev/null
@@ -1,368 +0,0 @@
-/*
- * Copyright (C) 2009 Zan Dobersek <zandobersek@gmail.com>
- * Copyright (C) 2010 Igalia S.L.
- * Copyright (C) 2011 ProFUSION Embedded Systems
- * Copyright (C) 2011 Samsung Electronics
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1.  Redistributions of source code must retain the above copyright
- *     notice, this list of conditions and the following disclaimer.
- * 2.  Redistributions in binary form must reproduce the above copyright
- *     notice, this list of conditions and the following disclaimer in the
- *     documentation and/or other materials provided with the distribution.
- * 3.  Neither the name of Apple Computer, Inc. ("Apple") nor the names of
- *     its contributors may be used to endorse or promote products derived
- *     from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-
-#include <Ecore.h>
-#include <Ecore_Evas.h>
-#include <Evas.h>
-#include <algorithm>
-#include <cmath>
-#include <cstdio>
-#include <cstdlib>
-#include <getopt.h>
-#include <sys/stat.h>
-#include <sys/types.h>
-#include <unistd.h>
-#include <wtf/OwnArrayPtr.h>
-#include <wtf/OwnPtr.h>
-#include <wtf/PassOwnPtr.h>
-#include <wtf/PassRefPtr.h>
-#include <wtf/efl/RefPtrEfl.h>
-
-enum PixelComponent {
-    Red,
-    Green,
-    Blue,
-    Alpha
-};
-
-static OwnPtr<Ecore_Evas> gEcoreEvas;
-static double gTolerance = 0;
-
-static void abortWithErrorMessage(const char* errorMessage);
-
-static unsigned char* pixelFromImageData(unsigned char* imageData, int rowStride, int x, int y)
-{
-    return imageData + (y * rowStride) + (x << 2);
-}
-
-static Evas_Object* differenceImageFromDifferenceBuffer(Evas* evas, unsigned char* buffer, int width, int height)
-{
-    Evas_Object* image = evas_object_image_filled_add(evas);
-    if (!image)
-        abortWithErrorMessage("could not create difference image");
-
-    evas_object_image_size_set(image, width, height);
-    evas_object_image_colorspace_set(image, EVAS_COLORSPACE_ARGB8888);
-
-    unsigned char* diffPixels = static_cast<unsigned char*>(evas_object_image_data_get(image, EINA_TRUE));
-    const int rowStride = evas_object_image_stride_get(image);
-    for (int x = 0; x < width; x++) {
-        for (int y = 0; y < height; y++) {
-            unsigned char* diffPixel = pixelFromImageData(diffPixels, rowStride, x, y);
-            diffPixel[Red] = diffPixel[Green] = diffPixel[Blue] = *buffer++;
-            diffPixel[Alpha] = 0xff;
-        }
-    }
-
-    evas_object_image_data_set(image, diffPixels);
-
-    return image;
-}
-
-static float computeDistanceBetweenPixelComponents(unsigned char actualComponent, unsigned char baseComponent)
-{
-    return (actualComponent - baseComponent) / std::max<float>(255 - baseComponent, baseComponent);
-}
-
-static float computeDistanceBetweenPixelComponents(unsigned char* actualPixel, unsigned char* basePixel, PixelComponent component)
-{
-    return computeDistanceBetweenPixelComponents(actualPixel[component], basePixel[component]);
-}
-
-static float calculatePixelDifference(unsigned char* basePixel, unsigned char* actualPixel)
-{
-    const float red = computeDistanceBetweenPixelComponents(actualPixel, basePixel, Red);
-    const float green = computeDistanceBetweenPixelComponents(actualPixel, basePixel, Green);
-    const float blue = computeDistanceBetweenPixelComponents(actualPixel, basePixel, Blue);
-    const float alpha = computeDistanceBetweenPixelComponents(actualPixel, basePixel, Alpha);
-    return sqrtf(red * red + green * green + blue * blue + alpha * alpha) / 2.0f;
-}
-
-static float calculateDifference(Evas_Object* baselineImage, Evas_Object* actualImage, RefPtr<Evas_Object>& differenceImage)
-{
-    int width, height, baselineWidth, baselineHeight;
-    evas_object_image_size_get(actualImage, &width, &height);
-    evas_object_image_size_get(baselineImage, &baselineWidth, &baselineHeight);
-
-    if (width != baselineWidth || height != baselineHeight) {
-        printf("Error, test and reference image have different sizes.\n");
-        return 100; // Completely different.
-    }
-
-    OwnArrayPtr<unsigned char> diffBuffer = adoptArrayPtr(new unsigned char[width * height]);
-    if (!diffBuffer)
-        abortWithErrorMessage("could not create difference buffer");
-
-    const int actualRowStride = evas_object_image_stride_get(actualImage);
-    const int baseRowStride = evas_object_image_stride_get(baselineImage);
-    unsigned numberOfDifferentPixels = 0;
-    float totalDistance = 0;
-    float maxDistance = 0;
-    unsigned char* actualPixels = static_cast<unsigned char*>(evas_object_image_data_get(actualImage, EINA_FALSE));
-    unsigned char* basePixels = static_cast<unsigned char*>(evas_object_image_data_get(baselineImage, EINA_FALSE));
-    unsigned char* currentDiffPixel = diffBuffer.get();
-
-    for (int x = 0; x < width; x++) {
-        for (int y = 0; y < height; y++) {
-            unsigned char* actualPixel = pixelFromImageData(actualPixels, actualRowStride, x, y);
-            unsigned char* basePixel = pixelFromImageData(basePixels, baseRowStride, x, y);
-
-            const float distance = calculatePixelDifference(basePixel, actualPixel);
-            *currentDiffPixel++ = static_cast<unsigned char>(distance * 255.0f);
-
-            if (distance >= 1.0f / 255.0f) {
-                ++numberOfDifferentPixels;
-                totalDistance += distance;
-                maxDistance = std::max<float>(maxDistance, distance);
-            }
-        }
-    }
-
-    // When using evas_object_image_data_get(), a complementary evas_object_data_set() must be
-    // issued to balance the reference count, even if the image hasn't been changed.
-    evas_object_image_data_set(baselineImage, basePixels);
-    evas_object_image_data_set(actualImage, actualPixels);
-
-    // Compute the difference as a percentage combining both the number of
-    // different pixels and their difference amount i.e. the average distance
-    // over the entire image
-    float difference = 0;
-    if (numberOfDifferentPixels)
-        difference = 100.0f * totalDistance / (height * width);
-    if (difference <= gTolerance)
-        difference = 0;
-    else {
-        difference = roundf(difference * 100.0f) / 100.0f;
-        difference = std::max(difference, 0.01f); // round to 2 decimal places
-
-        differenceImage = adoptRef(differenceImageFromDifferenceBuffer(evas_object_evas_get(baselineImage), diffBuffer.get(), width, height));
-    }
-
-    return difference;
-}
-
-static int getTemporaryFile(char *fileName, size_t fileNameLength)
-{
-    char* tempDirectory = getenv("TMPDIR");
-    if (!tempDirectory)
-        tempDirectory = getenv("TEMP");
-
-    if (tempDirectory)
-        snprintf(fileName, fileNameLength, "%s/ImageDiffXXXXXX.png", tempDirectory);
-    else {
-#if __linux__
-        strcpy(fileName, "/dev/shm/ImageDiffXXXXXX.png");
-        const int fileDescriptor = mkstemps(fileName, sizeof(".png") - 1);
-        if (fileDescriptor >= 0)
-            return fileDescriptor;
-#endif // __linux__
-
-        strcpy(fileName, "ImageDiffXXXXXX.png");
-    }
-
-    return mkstemps(fileName, sizeof(".png") - 1);
-}
-
-static void printImage(Evas_Object* image)
-{
-    char fileName[PATH_MAX];
-
-    const int tempImageFd = getTemporaryFile(fileName, PATH_MAX);
-    if (tempImageFd == -1)
-        abortWithErrorMessage("could not create temporary file");
-
-    evas_render(evas_object_evas_get(image));
-
-    if (evas_object_image_save(image, fileName, 0, 0)) {
-        struct stat fileInfo;
-        if (!stat(fileName, &fileInfo)) {
-            printf("Content-Length: %ld\n", fileInfo.st_size);
-            fflush(stdout);
-
-            unsigned char buffer[2048];
-            ssize_t bytesRead;
-            while ((bytesRead = read(tempImageFd, buffer, sizeof(buffer))) > 0) {
-                ssize_t bytesWritten = 0;
-                ssize_t count;
-                do {
-                    if ((count = write(1, buffer + bytesWritten, bytesRead - bytesWritten)) <= 0)
-                        break;
-                    bytesWritten += count;
-                } while (bytesWritten < bytesRead);
-            }
-        }
-    }
-    close(tempImageFd);
-    unlink(fileName);
-}
-
-static void printImageDifferences(Evas_Object* baselineImage, Evas_Object* actualImage)
-{
-    RefPtr<Evas_Object> differenceImage;
-    const float difference = calculateDifference(baselineImage, actualImage, differenceImage);
-
-    if (difference > 0.0f) {
-        if (differenceImage)
-            printImage(differenceImage.get());
-
-        printf("diff: %01.2f%% failed\n", difference);
-    } else
-        printf("diff: %01.2f%% passed\n", difference);
-}
-
-static void resizeEcoreEvasIfNeeded(Evas_Object* image)
-{
-    int newWidth, newHeight;
-    evas_object_image_size_get(image, &newWidth, &newHeight);
-
-    int currentWidth, currentHeight;
-    ecore_evas_screen_geometry_get(gEcoreEvas.get(), 0, 0, &currentWidth, &currentHeight);
-
-    if (newWidth > currentWidth)
-        currentWidth = newWidth;
-    if (newHeight > currentHeight)
-        currentHeight = newHeight;
-
-    ecore_evas_resize(gEcoreEvas.get(), currentWidth, currentHeight);
-}
-
-static PassRefPtr<Evas_Object> readImageFromStdin(Evas* evas, long imageSize)
-{
-    OwnArrayPtr<unsigned char> imageBuffer = adoptArrayPtr(new unsigned char[imageSize]);
-    if (!imageBuffer)
-        abortWithErrorMessage("cannot allocate image");
-
-    const size_t bytesRead = fread(imageBuffer.get(), 1, imageSize, stdin);
-    if (!bytesRead)
-        return PassRefPtr<Evas_Object>();
-
-    Evas_Object* image = evas_object_image_filled_add(evas);
-    evas_object_image_colorspace_set(image, EVAS_COLORSPACE_ARGB8888);
-    evas_object_image_memfile_set(image, imageBuffer.get(), bytesRead, 0, 0);
-
-    resizeEcoreEvasIfNeeded(image);
-
-    return adoptRef(image);
-}
-
-static bool parseCommandLineOptions(int argc, char** argv)
-{
-    static const option options[] = {
-        { "tolerance", required_argument, 0, 't' },
-        { 0, 0, 0, 0 }
-    };
-    int option;
-
-    while ((option = getopt_long(argc, (char* const*)argv, "t:", options, 0)) != -1) {
-        switch (option) {
-        case 't':
-            gTolerance = atof(optarg);
-            break;
-        case '?':
-        case ':':
-            return false;
-        }
-    }
-
-    return true;
-}
-
-static void shutdownEfl()
-{
-    ecore_evas_shutdown();
-    ecore_shutdown();
-    evas_shutdown();
-}
-
-static void abortWithErrorMessage(const char* errorMessage)
-{
-    shutdownEfl();
-
-    printf("Error, %s.\n", errorMessage);
-    exit(EXIT_FAILURE);
-}
-
-static Evas* initEfl()
-{
-    evas_init();
-    ecore_init();
-    ecore_evas_init();
-
-    gEcoreEvas = adoptPtr(ecore_evas_buffer_new(1, 1));
-    Evas* evas = ecore_evas_get(gEcoreEvas.get());
-    if (!evas)
-        abortWithErrorMessage("could not create Ecore_Evas buffer");
-
-    return evas;
-}
-
-int main(int argc, char* argv[])
-{
-    if (!parseCommandLineOptions(argc, argv))
-        return EXIT_FAILURE;
-
-    Evas* evas = initEfl();
-
-    RefPtr<Evas_Object> actualImage;
-    RefPtr<Evas_Object> baselineImage;
-
-    char buffer[2048];
-    while (fgets(buffer, sizeof(buffer), stdin)) {
-        char* contentLengthStart = strstr(buffer, "Content-Length: ");
-        if (!contentLengthStart)
-            continue;
-        long imageSize;
-        if (sscanf(contentLengthStart, "Content-Length: %ld", &imageSize) == 1) {
-            if (imageSize <= 0)
-                abortWithErrorMessage("image size must be specified");
-
-            if (!actualImage)
-                actualImage = readImageFromStdin(evas, imageSize);
-            else if (!baselineImage) {
-                baselineImage = readImageFromStdin(evas, imageSize);
-
-                printImageDifferences(baselineImage.get(), actualImage.get());
-
-                actualImage.clear();
-                baselineImage.clear();
-            }
-        }
-
-        fflush(stdout);
-    }
-
-    gEcoreEvas.clear(); // Make sure ecore_evas_free is called before the EFL are shut down
-
-    shutdownEfl();
-    return EXIT_SUCCESS;
-}
diff --git a/Tools/ImageDiff/gtk/ImageDiff.cpp b/Tools/ImageDiff/gtk/ImageDiff.cpp
deleted file mode 100644
index 7baa25e..0000000
--- a/Tools/ImageDiff/gtk/ImageDiff.cpp
+++ /dev/null
@@ -1,240 +0,0 @@
-/*
- * Copyright (C) 2009 Zan Dobersek <zandobersek@gmail.com>
- * Copyright (C) 2010 Igalia S.L.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1.  Redistributions of source code must retain the above copyright
- *     notice, this list of conditions and the following disclaimer.
- * 2.  Redistributions in binary form must reproduce the above copyright
- *     notice, this list of conditions and the following disclaimer in the
- *     documentation and/or other materials provided with the distribution.
- * 3.  Neither the name of Apple Computer, Inc. ("Apple") nor the names of
- *     its contributors may be used to endorse or promote products derived
- *     from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <algorithm>
-#include <cmath>
-#include <cstdio>
-#include <cstring>
-#include <gdk/gdk.h>
-
-using namespace std;
-
-static double tolerance = 0;
-static GOptionEntry commandLineOptionEntries[] = 
-{
-    { "tolerance", 0, 0, G_OPTION_ARG_DOUBLE, &tolerance, "Percentage difference between images before considering them different", "T" },
-    { 0, 0, 0, G_OPTION_ARG_NONE, 0, 0, 0 },
-};
-
-GdkPixbuf* readPixbufFromStdin(long imageSize)
-{
-    unsigned char imageBuffer[2048];
-    GdkPixbufLoader* loader = gdk_pixbuf_loader_new_with_type("png", 0);
-    GError* error = 0;
-
-    while (imageSize > 0) {
-        size_t bytesToRead = min<int>(imageSize, 2048);
-        size_t bytesRead = fread(imageBuffer, 1, bytesToRead, stdin);
-
-        if (!gdk_pixbuf_loader_write(loader, reinterpret_cast<const guchar*>(imageBuffer), bytesRead, &error)) {
-            g_error_free(error);
-            gdk_pixbuf_loader_close(loader, 0);
-            g_object_unref(loader);
-            return 0;
-        }
-
-        imageSize -= static_cast<int>(bytesRead);
-    }
-
-    gdk_pixbuf_loader_close(loader, 0);
-    GdkPixbuf* decodedImage = gdk_pixbuf_loader_get_pixbuf(loader);
-    g_object_ref(decodedImage);
-    return decodedImage;
-}
-
-GdkPixbuf* differenceImageFromDifferenceBuffer(unsigned char* buffer, int width, int height)
-{
-    GdkPixbuf* image = gdk_pixbuf_new(GDK_COLORSPACE_RGB, FALSE, 8, width, height);
-    if (!image)
-        return image;
-
-    int rowStride = gdk_pixbuf_get_rowstride(image);
-    unsigned char* diffPixels = gdk_pixbuf_get_pixels(image);
-    for (int x = 0; x < width; x++) {
-        for (int y = 0; y < height; y++) {
-            unsigned char* diffPixel = diffPixels + (y * rowStride) + (x * 3);
-            diffPixel[0] = diffPixel[1] = diffPixel[2] = *buffer++;
-        }
-    }
-
-    return image;
-}
-
-float calculateDifference(GdkPixbuf* baselineImage, GdkPixbuf* actualImage, GdkPixbuf** differenceImage)
-{
-    int width = gdk_pixbuf_get_width(actualImage);
-    int height = gdk_pixbuf_get_height(actualImage);
-    int numberOfChannels = gdk_pixbuf_get_n_channels(actualImage);
-    if ((width != gdk_pixbuf_get_width(baselineImage))
-        || (height != gdk_pixbuf_get_height(baselineImage))
-        || (numberOfChannels != gdk_pixbuf_get_n_channels(baselineImage))
-        || (gdk_pixbuf_get_has_alpha(actualImage) != gdk_pixbuf_get_has_alpha(baselineImage))) {
-        fprintf(stderr, "Error, test and reference image have different properties.\n");
-        return 100; // Completely different.
-    }
-
-    unsigned char* diffBuffer = static_cast<unsigned char*>(malloc(width * height));
-    float count = 0;
-    float sum = 0;
-    float maxDistance = 0;
-    int actualRowStride = gdk_pixbuf_get_rowstride(actualImage);
-    int baseRowStride = gdk_pixbuf_get_rowstride(baselineImage);
-    unsigned char* actualPixels = gdk_pixbuf_get_pixels(actualImage);
-    unsigned char* basePixels = gdk_pixbuf_get_pixels(baselineImage);
-    unsigned char* currentDiffPixel = diffBuffer;
-    for (int x = 0; x < width; x++) {
-        for (int y = 0; y < height; y++) {
-            unsigned char* actualPixel = actualPixels + (y * actualRowStride) + (x * numberOfChannels);
-            unsigned char* basePixel = basePixels + (y * baseRowStride) + (x * numberOfChannels);
-
-            float red = (actualPixel[0] - basePixel[0]) / max<float>(255 - basePixel[0], basePixel[0]);
-            float green = (actualPixel[1] - basePixel[1]) / max<float>(255 - basePixel[1], basePixel[1]);
-            float blue = (actualPixel[2] - basePixel[2]) / max<float>(255 - basePixel[2], basePixel[2]);
-            float alpha = (actualPixel[3] - basePixel[3]) / max<float>(255 - basePixel[3], basePixel[3]);
-            float distance = sqrtf(red * red + green * green + blue * blue + alpha * alpha) / 2.0f;
-
-            *currentDiffPixel++ = (unsigned char)(distance * 255.0f);
-
-            if (distance >= 1.0f / 255.0f) {
-                count += 1.0f;
-                sum += distance;
-                maxDistance = max<float>(maxDistance, distance);
-            }
-        }
-    }
-
-    // Compute the difference as a percentage combining both the number of
-    // different pixels and their difference amount i.e. the average distance
-    // over the entire image
-    float difference = 0;
-    if (count > 0.0f)
-        difference = 100.0f * sum / (height * width);
-    if (difference <= tolerance)
-        difference = 0;
-    else {
-        difference = roundf(difference * 100.0f) / 100.0f;
-        difference = max(difference, 0.01f); // round to 2 decimal places
-        *differenceImage = differenceImageFromDifferenceBuffer(diffBuffer, width, height);
-    }
-
-    free(diffBuffer);
-    return difference;
-}
-
-void printImage(GdkPixbuf* image)
-{
-    char* buffer;
-    gsize bufferSize;
-    GError* error = 0;
-    if (!gdk_pixbuf_save_to_buffer(image, &buffer, &bufferSize, "png", &error, NULL)) {
-        g_error_free(error);
-        return; // Don't bail out, as we can still use the percentage output.
-    }
-
-    printf("Content-Length: %"G_GSIZE_FORMAT"\n", bufferSize);
-    fwrite(buffer, 1, bufferSize, stdout);
-}
-
-void printImageDifferences(GdkPixbuf* baselineImage, GdkPixbuf* actualImage)
-{
-    GdkPixbuf* differenceImage = 0;
-    float difference = calculateDifference(baselineImage, actualImage, &differenceImage);
-    if (difference > 0.0f) {
-        if (differenceImage) {
-            printImage(differenceImage);
-            g_object_unref(differenceImage);
-        }
-        printf("diff: %01.2f%% failed\n", difference);
-    } else
-        printf("diff: %01.2f%% passed\n", difference);
-}
-
-int main(int argc, char* argv[])
-{
-#if !GLIB_CHECK_VERSION(2, 35, 0)
-    g_type_init();
-#endif
-
-    GError* error = 0;
-    GOptionContext* context = g_option_context_new("- compare two image files, printing their percentage difference and the difference image to stdout");
-    g_option_context_add_main_entries(context, commandLineOptionEntries, 0);
-    if (!g_option_context_parse(context, &argc, &argv, &error)) {
-        printf("Option parsing failed: %s\n", error->message);
-        g_error_free(error);
-        return 1;
-    }
-
-    GdkPixbuf* actualImage = 0;
-    GdkPixbuf* baselineImage = 0;
-    char buffer[2048];
-    while (fgets(buffer, sizeof(buffer), stdin)) {
-        // Convert the first newline into a NUL character so that strtok doesn't produce it.
-        char* newLineCharacter = strchr(buffer, '\n');
-        if (newLineCharacter)
-            *newLineCharacter = '\0';
-
-        if (!strncmp("Content-Length: ", buffer, 16)) {
-            gchar** tokens = g_strsplit(buffer, " ", 0);
-            if (!tokens[1]) {
-                g_strfreev(tokens);
-                printf("Error, image size must be specified..\n");
-                return 1;
-            }
-
-            long imageSize = strtol(tokens[1], 0, 10);
-            g_strfreev(tokens);
-            if (imageSize > 0 && !actualImage) {
-                if (!(actualImage = readPixbufFromStdin(imageSize))) {
-                    printf("Error, could not read actual image.\n");
-                    return 1;
-                }
-            } else if (imageSize > 0 && !baselineImage) {
-                if (!(baselineImage = readPixbufFromStdin(imageSize))) {
-                    printf("Error, could not read baseline image.\n");
-                    return 1;
-                }
-            } else {
-                printf("Error, image size must be specified..\n");
-                return 1;
-            }
-        }
-
-        if (actualImage && baselineImage) {
-            printImageDifferences(baselineImage, actualImage);
-            g_object_unref(actualImage);
-            g_object_unref(baselineImage);
-            actualImage = 0;
-            baselineImage = 0;
-        }
-
-        fflush(stdout);
-    }
-
-    return 0;
-}
diff --git a/Tools/ImageDiff/qt/ImageDiff.cpp b/Tools/ImageDiff/qt/ImageDiff.cpp
deleted file mode 100644
index 816086a..0000000
--- a/Tools/ImageDiff/qt/ImageDiff.cpp
+++ /dev/null
@@ -1,144 +0,0 @@
-/*
-    Copyright (C) 2009 Nokia Corporation and/or its subsidiary(-ies)
-
-    This library is free software; you can redistribute it and/or
-    modify it under the terms of the GNU Library General Public
-    License as published by the Free Software Foundation; either
-    version 2 of the License, or (at your option) any later version.
-
-    This library is distributed in the hope that it will be useful,
-    but WITHOUT ANY WARRANTY; without even the implied warranty of
-    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
-    Library General Public License for more details.
-
-    You should have received a copy of the GNU Library General Public License
-    along with this library; see the file COPYING.LIB.  If not, write to
-    the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
-    Boston, MA 02110-1301, USA.
-*/
-
-#include <QtCore/qmath.h>
-
-#include <QApplication>
-#include <QBuffer>
-#include <QByteArray>
-#include <QImage>
-#include <QStringList>
-
-#include <stdio.h>
-
-int main(int argc, char* argv[])
-{
-    QCoreApplication app(argc, argv);
-
-    qreal tolerance = 0; // Tolerated percentage of error pixels.
-
-    QStringList args = app.arguments();
-    for (int i = 0; i < argc; ++i)
-        if (args[i] == "-t" || args[i] == "--tolerance")
-            tolerance = args[i + 1].toDouble();
-
-    char buffer[2048];
-    QImage actualImage;
-    QImage baselineImage;
-
-    while (fgets(buffer, sizeof(buffer), stdin)) {
-        // remove the CR
-        char* newLineCharacter = strchr(buffer, '\n');
-        if (newLineCharacter)
-            *newLineCharacter = '\0';
-
-        if (!strncmp("Content-Length: ", buffer, 16)) {
-            strtok(buffer, " ");
-            int imageSize = strtol(strtok(0, " "), 0, 10);
-
-            if (imageSize <= 0) {
-                fputs("error, image size must be specified.\n", stdout);
-            } else {
-                unsigned char buffer[2048];
-                QBuffer data;
-
-                // Read all the incoming chunks
-                data.open(QBuffer::WriteOnly);
-                while (imageSize > 0) {
-                    size_t bytesToRead = qMin(imageSize, 2048);
-                    size_t bytesRead = fread(buffer, 1, bytesToRead, stdin);
-                    data.write(reinterpret_cast<const char*>(buffer), bytesRead);
-                    imageSize -= static_cast<int>(bytesRead);
-                }
-
-                // Convert into QImage
-                QImage decodedImage;
-                decodedImage.loadFromData(data.data(), "PNG");
-                decodedImage = decodedImage.convertToFormat(QImage::Format_ARGB32);
-
-                // Place it in the right place
-                if (actualImage.isNull())
-                    actualImage = decodedImage;
-                else
-                    baselineImage = decodedImage;
-            }
-        }
-
-        if (!actualImage.isNull() && !baselineImage.isNull()) {
-
-            if (actualImage.size() != baselineImage.size()) {
-                fprintf(stdout, "diff: 100%% failed\n");
-                fprintf(stderr, "error, test and reference image have different properties.\n");
-                fflush(stderr);
-                fflush(stdout);
-            } else {
-
-                int w = actualImage.width();
-                int h = actualImage.height();
-                QImage diffImage(w, h, QImage::Format_ARGB32);
-
-                int errorCount = 0;
-
-                for (int x = 0; x < w; ++x) {
-                    for (int y = 0; y < h; ++y) {
-                        QRgb pixel = actualImage.pixel(x, y);
-                        QRgb basePixel = baselineImage.pixel(x, y);
-                        qreal red = (qRed(pixel) - qRed(basePixel)) / static_cast<float>(qMax(255 - qRed(basePixel), qRed(basePixel)));
-                        qreal green = (qGreen(pixel) - qGreen(basePixel)) / static_cast<float>(qMax(255 - qGreen(basePixel), qGreen(basePixel)));
-                        qreal blue = (qBlue(pixel) - qBlue(basePixel)) / static_cast<float>(qMax(255 - qBlue(basePixel), qBlue(basePixel)));
-                        qreal alpha = (qAlpha(pixel) - qAlpha(basePixel)) / static_cast<float>(qMax(255 - qAlpha(basePixel), qAlpha(basePixel)));
-                        qreal distance = qSqrt(red * red + green * green + blue * blue + alpha * alpha) / 2.0f;
-                        if (distance >= 1 / qreal(255)) {
-                            errorCount++;
-                            diffImage.setPixel(x, y, qRgb(255, 0, 0));
-                        } else
-                            diffImage.setPixel(x, y, qRgba(qRed(basePixel), qGreen(basePixel), qBlue(basePixel), qAlpha(basePixel) * 0.5));
-                    }
-                }
-
-                qreal difference = 0;
-                if (errorCount)
-                    difference = 100 * errorCount / static_cast<qreal>(w * h);
-
-                if (difference <= tolerance)
-                    fprintf(stdout, "diff: %01.2f%% passed\n", difference);
-                else {
-                    QBuffer buffer;
-                    buffer.open(QBuffer::WriteOnly);
-                    diffImage.save(&buffer, "PNG");
-                    buffer.close();
-                    const QByteArray &data = buffer.data();
-                    printf("Content-Length: %lu\n", static_cast<unsigned long>(data.length()));
-
-                    // We have to use the return value of fwrite to avoid "ignoring return value" gcc warning
-                    // See https://bugs.webkit.org/show_bug.cgi?id=45384 for details.
-                    if (fwrite(data.constData(), 1, data.length(), stdout)) {}
-
-                    fprintf(stdout, "diff: %01.2f%% failed\n", difference);
-                }
-
-                fflush(stdout);
-            }
-            actualImage = QImage();
-            baselineImage = QImage();
-        }
-    }
-
-    return 0;
-}
diff --git a/Tools/Scripts/webkitpy/bindings/main.py b/Tools/Scripts/webkitpy/bindings/main.py
index 52959e7..1e7519a 100644
--- a/Tools/Scripts/webkitpy/bindings/main.py
+++ b/Tools/Scripts/webkitpy/bindings/main.py
@@ -52,16 +52,16 @@
         self.reset_results = reset_results
         self.executive = executive
 
-    def generate_from_idl(self, idl_file, output_directory, supplemental_dependency_file):
+    def generate_from_idl(self, idl_file, output_directory, interface_dependencies_file):
         cmd = ['perl', '-w',
                '-Ibindings/scripts',
                '-Icore/scripts',
                '-I../../JSON/out/lib/perl5',
-               'bindings/scripts/generate-bindings.pl',
+               'bindings/scripts/deprecated_generate_bindings.pl',
                # idl include directories (path relative to generate-bindings.pl)
                '--include', '.',
                '--outputDir', output_directory,
-               '--supplementalDependencyFile', supplemental_dependency_file,
+               '--interfaceDependenciesFile', interface_dependencies_file,
                '--idlAttributesFile', 'bindings/scripts/IDLAttributes.txt',
                idl_file]
 
@@ -75,7 +75,7 @@
             exit_code = e.exit_code
         return exit_code
 
-    def generate_supplemental_dependency(self, input_directory, supplemental_dependency_file, window_constructors_file, workerglobalscope_constructors_file, sharedworkerglobalscope_constructors_file, dedicatedworkerglobalscope_constructors_file, event_names_file):
+    def generate_interface_dependencies(self, input_directory, interface_dependencies_file, window_constructors_file, workerglobalscope_constructors_file, sharedworkerglobalscope_constructors_file, dedicatedworkerglobalscope_constructors_file, event_names_file):
         idl_files_list = tempfile.mkstemp()
         for input_file in os.listdir(input_directory):
             (name, extension) = os.path.splitext(input_file)
@@ -85,9 +85,9 @@
         os.close(idl_files_list[0])
 
         cmd = ['python',
-               'bindings/scripts/preprocess_idls.py',
+               'bindings/scripts/compute_dependencies.py',
                '--idl-files-list', idl_files_list[1],
-               '--supplemental-dependency-file', supplemental_dependency_file,
+               '--interface-dependencies-file', interface_dependencies_file,
                '--window-constructors-file', window_constructors_file,
                '--workerglobalscope-constructors-file', workerglobalscope_constructors_file,
                '--sharedworkerglobalscope-constructors-file', sharedworkerglobalscope_constructors_file,
@@ -141,7 +141,7 @@
                 changes_found = True
         return changes_found
 
-    def run_tests(self, input_directory, reference_directory, supplemental_dependency_file, event_names_file):
+    def run_tests(self, input_directory, reference_directory, interface_dependencies_file, event_names_file):
         work_directory = reference_directory
 
         passed = True
@@ -160,7 +160,7 @@
 
             if self.generate_from_idl(os.path.join(input_directory, input_file),
                                       work_directory,
-                                      supplemental_dependency_file):
+                                      interface_dependencies_file):
                 passed = False
 
             if self.reset_results:
@@ -185,7 +185,7 @@
         input_directory = os.path.join('bindings', 'tests', 'idls')
         reference_directory = os.path.join('bindings', 'tests', 'results')
 
-        supplemental_dependency_file = provider.newtempfile()
+        interface_dependencies_file = provider.newtempfile()
         window_constructors_file = provider.newtempfile()
         workerglobalscope_constructors_file = provider.newtempfile()
         sharedworkerglobalscope_constructors_file = provider.newtempfile()
@@ -196,11 +196,11 @@
         else:
             event_names_file = provider.newtempfile()
 
-        if self.generate_supplemental_dependency(input_directory, supplemental_dependency_file, window_constructors_file, workerglobalscope_constructors_file, sharedworkerglobalscope_constructors_file, dedicatedworkerglobalscope_constructors_file, event_names_file):
-            print 'Failed to generate a supplemental dependency file.'
+        if self.generate_interface_dependencies(input_directory, interface_dependencies_file, window_constructors_file, workerglobalscope_constructors_file, sharedworkerglobalscope_constructors_file, dedicatedworkerglobalscope_constructors_file, event_names_file):
+            print 'Failed to generate interface dependencies file.'
             return -1
 
-        if not self.run_tests(input_directory, reference_directory, supplemental_dependency_file, event_names_file):
+        if not self.run_tests(input_directory, reference_directory, interface_dependencies_file, event_names_file):
             all_tests_passed = False
 
         print ''
diff --git a/Tools/Scripts/webkitpy/common/checkout/baselineoptimizer.py b/Tools/Scripts/webkitpy/common/checkout/baselineoptimizer.py
index 02e985c..d6430fc 100644
--- a/Tools/Scripts/webkitpy/common/checkout/baselineoptimizer.py
+++ b/Tools/Scripts/webkitpy/common/checkout/baselineoptimizer.py
@@ -53,48 +53,74 @@
         self._port_factory = host.port_factory
         self._scm = host.scm()
         self._port_names = port_names
+        # Only used by unittests.
+        self.new_results_by_directory = []
+
+    def _baseline_root(self, port, baseline_name):
+        virtual_suite = port.lookup_virtual_suite(baseline_name)
+        if virtual_suite:
+            return self._filesystem.join(self.ROOT_LAYOUT_TESTS_DIRECTORY, virtual_suite.name)
+        return self.ROOT_LAYOUT_TESTS_DIRECTORY
+
+    def _baseline_search_path(self, port, baseline_name):
+        virtual_suite = port.lookup_virtual_suite(baseline_name)
+        if virtual_suite:
+            return port.virtual_baseline_search_path(baseline_name)
+        return port.baseline_search_path()
 
     @memoized
-    def _relative_baseline_search_paths(self, port_name):
+    def _relative_baseline_search_paths(self, port_name, baseline_name):
         port = self._port_factory.get(port_name)
-        relative_paths = [self._filesystem.relpath(path, port.webkit_base()) for path in port.baseline_search_path()]
-        return relative_paths + [self.ROOT_LAYOUT_TESTS_DIRECTORY]
+        relative_paths = [self._filesystem.relpath(path, port.webkit_base()) for path in self._baseline_search_path(port, baseline_name)]
+        return relative_paths + [self._baseline_root(port, baseline_name)]
 
     def read_results_by_directory(self, baseline_name):
         results_by_directory = {}
-        directories = reduce(set.union, map(set, [self._relative_baseline_search_paths(port_name) for port_name in self._port_names]))
+        directories = reduce(set.union, map(set, [self._relative_baseline_search_paths(port_name, baseline_name) for port_name in self._port_names]))
+
+        # This code is complicated because both the directory name and the baseline_name have the virtual
+        # test suite in the name and the virtual baseline name is not a strict superset of the non-virtual name.
+        # For example, virtual/softwarecompositing/foo-expected.png corresponds to compostiting/foo-expected.png and
+        # the baseline directories are like platform/mac/virtual/softwarecompositing. So, to get the path
+        # to the baseline in the platform directory, we need to append jsut foo-expected.png to the directory.
+        virtual_suite = self._port_factory.get().lookup_virtual_suite(baseline_name)
+        if virtual_suite:
+            baseline_name_without_virtual = baseline_name[len(virtual_suite.name) + 1:]
+        else:
+            baseline_name_without_virtual = baseline_name
+
         for directory in directories:
-            path = self._filesystem.join(self._scm.checkout_root, directory, baseline_name)
+            path = self._filesystem.join(self._scm.checkout_root, directory, baseline_name_without_virtual)
             if self._filesystem.exists(path):
                 results_by_directory[directory] = self._filesystem.sha1(path)
         return results_by_directory
 
-    def _results_by_port_name(self, results_by_directory):
+    def _results_by_port_name(self, results_by_directory, baseline_name):
         results_by_port_name = {}
         for port_name in self._port_names:
-            for directory in self._relative_baseline_search_paths(port_name):
+            for directory in self._relative_baseline_search_paths(port_name, baseline_name):
                 if directory in results_by_directory:
                     results_by_port_name[port_name] = results_by_directory[directory]
                     break
         return results_by_port_name
 
     @memoized
-    def _directories_immediately_preceding_root(self):
+    def _directories_immediately_preceding_root(self, baseline_name):
         directories = set()
         for port_name in self._port_names:
             port = self._port_factory.get(port_name)
-            directory = self._filesystem.relpath(port.baseline_search_path()[-1], port.webkit_base())
+            directory = self._filesystem.relpath(self._baseline_search_path(port, baseline_name)[-1], port.webkit_base())
             directories.add(directory)
         return directories
 
-    def _optimize_result_for_root(self, new_results_by_directory):
+    def _optimize_result_for_root(self, new_results_by_directory, baseline_name):
         # The root directory (i.e. LayoutTests) is the only one that doesn't correspond
         # to a specific platform. As such, it's the only one where the baseline in fallback directories
-        # immediately before it can be promoted up, i.e. if win and chromium-mac
+        # immediately before it can be promoted up, i.e. if win and mac
         # have the same baseline, then it can be promoted up to be the LayoutTests baseline.
         # All other baselines can only be removed if they're redundant with a baseline earlier
         # in the fallback order. They can never promoted up.
-        directories_immediately_preceding_root = self._directories_immediately_preceding_root()
+        directories_immediately_preceding_root = self._directories_immediately_preceding_root(baseline_name)
 
         shared_result = None
         root_baseline_unused = False
@@ -110,28 +136,30 @@
             elif shared_result != this_result:
                 root_baseline_unused = True
 
+        baseline_root = self._baseline_root(self._port_factory.get(), baseline_name)
+
         # The root baseline is unused if all the directories immediately preceding the root
         # have a baseline, but have different baselines, so the baselines can't be promoted up.
         if root_baseline_unused:
-            if self.ROOT_LAYOUT_TESTS_DIRECTORY in new_results_by_directory:
-                del new_results_by_directory[self.ROOT_LAYOUT_TESTS_DIRECTORY]
+            if baseline_root in new_results_by_directory:
+                del new_results_by_directory[baseline_root]
             return
 
-        new_results_by_directory[self.ROOT_LAYOUT_TESTS_DIRECTORY] = shared_result
+        new_results_by_directory[baseline_root] = shared_result
         for directory in directories_immediately_preceding_root:
             del new_results_by_directory[directory]
 
     def _find_optimal_result_placement(self, baseline_name):
         results_by_directory = self.read_results_by_directory(baseline_name)
-        results_by_port_name = self._results_by_port_name(results_by_directory)
+        results_by_port_name = self._results_by_port_name(results_by_directory, baseline_name)
         port_names_by_result = _invert_dictionary(results_by_port_name)
 
-        new_results_by_directory = self._remove_redundant_results(results_by_directory, results_by_port_name, port_names_by_result)
-        self._optimize_result_for_root(new_results_by_directory)
+        new_results_by_directory = self._remove_redundant_results(results_by_directory, results_by_port_name, port_names_by_result, baseline_name)
+        self._optimize_result_for_root(new_results_by_directory, baseline_name)
 
         return results_by_directory, new_results_by_directory
 
-    def _remove_redundant_results(self, results_by_directory, results_by_port_name, port_names_by_result):
+    def _remove_redundant_results(self, results_by_directory, results_by_port_name, port_names_by_result, baseline_name):
         new_results_by_directory = copy.copy(results_by_directory)
         for port_name in self._port_names:
             current_result = results_by_port_name.get(port_name)
@@ -140,7 +168,7 @@
             if not current_result:
                 continue;
 
-            fallback_path = self._relative_baseline_search_paths(port_name)
+            fallback_path = self._relative_baseline_search_paths(port_name, baseline_name)
             current_index, current_directory = self._find_in_fallbackpath(fallback_path, current_result, new_results_by_directory)
             for index in range(current_index + 1, len(fallback_path)):
                 new_directory = fallback_path[index]
@@ -210,7 +238,7 @@
         for path in sorted(results_by_directory):
             writer("%s%s: %s" % (indent, self._platform(path), results_by_directory[path][0:6]))
 
-    def optimize(self, baseline_name):
+    def _optimize_subtree(self, baseline_name):
         basename = self._filesystem.basename(baseline_name)
         results_by_directory, new_results_by_directory = self._find_optimal_result_placement(baseline_name)
 
@@ -221,10 +249,10 @@
             else:
                 _log.debug("  %s: (no baselines found)" % basename)
             # This is just used for unittests. Intentionally set it to the old data if we don't modify anything.
-            self.new_results_by_directory = results_by_directory
+            self.new_results_by_directory.append(results_by_directory)
             return True
 
-        if self._results_by_port_name(results_by_directory) != self._results_by_port_name(new_results_by_directory):
+        if self._results_by_port_name(results_by_directory, baseline_name) != self._results_by_port_name(new_results_by_directory, baseline_name):
             # This really should never happen. Just a sanity check to make sure the script fails in the case of bugs
             # instead of committing incorrect baselines.
             _log.error("  %s: optimization failed" % basename)
@@ -239,3 +267,43 @@
 
         self._move_baselines(baseline_name, results_by_directory, new_results_by_directory)
         return True
+
+    def _optimize_virtual_root(self, baseline_name, non_virtual_baseline_name):
+        default_port = self._port_factory.get()
+        virtual_root_expected_baseline_path = self._filesystem.join(default_port.layout_tests_dir(), baseline_name)
+        if not self._filesystem.exists(virtual_root_expected_baseline_path):
+            return
+        root_sha1 = self._filesystem.sha1(virtual_root_expected_baseline_path)
+
+        results_by_directory = self.read_results_by_directory(non_virtual_baseline_name)
+        # See if all the immediate predecessors of the virtual root have the same expected result.
+        for port_name in self._port_names:
+            directories = self._relative_baseline_search_paths(port_name, baseline_name)
+            for directory in directories:
+                if directory not in results_by_directory:
+                    continue
+                if results_by_directory[directory] != root_sha1:
+                    return
+                break
+
+        _log.debug("Deleting redundant firtual root expected result.")
+        self._scm.delete(virtual_root_expected_baseline_path)
+
+    def optimize(self, baseline_name):
+        # The virtual fallback path is the same as the non-virtual one tacked on to the bottom of the non-virtual path.
+        # See https://docs.google.com/a/chromium.org/drawings/d/1eGdsIKzJ2dxDDBbUaIABrN4aMLD1bqJTfyxNGZsTdmg/edit for
+        # a visual representation of this.
+        #
+        # So, we can optimize the virtual path, then the virtual root and then the regular path.
+
+        _log.debug("Optimizing regular fallback path.")
+        result = self._optimize_subtree(baseline_name)
+        non_virtual_baseline_name = self._port_factory.get().lookup_virtual_test_base(baseline_name)
+        if not non_virtual_baseline_name:
+            return result
+
+        self._optimize_virtual_root(baseline_name, non_virtual_baseline_name)
+
+        _log.debug("Optimizing non-virtual fallback path.")
+        result |= self._optimize_subtree(non_virtual_baseline_name)
+        return result
diff --git a/Tools/Scripts/webkitpy/common/checkout/baselineoptimizer_unittest.py b/Tools/Scripts/webkitpy/common/checkout/baselineoptimizer_unittest.py
index fe09dc4..b194975 100644
--- a/Tools/Scripts/webkitpy/common/checkout/baselineoptimizer_unittest.py
+++ b/Tools/Scripts/webkitpy/common/checkout/baselineoptimizer_unittest.py
@@ -26,6 +26,7 @@
 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+import hashlib
 import sys
 import unittest2 as unittest
 
@@ -35,35 +36,81 @@
 
 
 class TestBaselineOptimizer(BaselineOptimizer):
-    def __init__(self, mock_results_by_directory):
+    def __init__(self, mock_results_by_directory, create_mock_files, baseline_name):
         host = MockHost()
         BaselineOptimizer.__init__(self, host, host.port_factory.all_port_names())
         self._mock_results_by_directory = mock_results_by_directory
+        self._filesystem = host.filesystem
+        self._port_factory = host.port_factory
+        self._created_mock_files = create_mock_files
+        self._baseline_name = baseline_name
+
+        self._create_mock_files(mock_results_by_directory)
 
     # We override this method for testing so we don't have to construct an
     # elaborate mock file system.
     def read_results_by_directory(self, baseline_name):
+        if self._created_mock_files:
+            return super(TestBaselineOptimizer, self).read_results_by_directory(baseline_name)
         return self._mock_results_by_directory
 
     def _move_baselines(self, baseline_name, results_by_directory, new_results_by_directory):
-        self.new_results_by_directory = new_results_by_directory
+        self.new_results_by_directory.append(new_results_by_directory)
+
+        if self._created_mock_files:
+            super(TestBaselineOptimizer, self)._move_baselines(baseline_name, results_by_directory, new_results_by_directory)
+            return
+        self._mock_results_by_directory = new_results_by_directory
+
+    def _create_mock_files(self, results_by_directory):
+        root = self._port_factory.get().webkit_base()
+        for directory in results_by_directory:
+            if 'virtual' in directory:
+                virtual_suite = self._port_factory.get().lookup_virtual_suite(self._baseline_name)
+                if virtual_suite:
+                    baseline_name = self._baseline_name[len(virtual_suite.name) + 1:]
+                else:
+                    baseline_name = self._baseline_name
+            else:
+                baseline_name = self._port_factory.get().lookup_virtual_test_base(self._baseline_name)
+            path = self._filesystem.join(root, directory, baseline_name)
+            self._filesystem.write_text_file(path, results_by_directory[directory])
 
 
 class BaselineOptimizerTest(unittest.TestCase):
-    def _assertOptimization(self, results_by_directory, expected_new_results_by_directory):
-        baseline_optimizer = TestBaselineOptimizer(results_by_directory)
-        self.assertTrue(baseline_optimizer.optimize('mock-baseline.png'))
+    VIRTUAL_DIRECTORY = 'virtual/softwarecompositing'
+
+    def _appendVirtualSuffix(self, results_by_directory):
+        new_results_by_directory = {}
+        for directory in results_by_directory:
+            new_results_by_directory[directory + '/' + self.VIRTUAL_DIRECTORY] = results_by_directory[directory]
+        return new_results_by_directory
+
+    def _assertOneLevelOptimization(self, results_by_directory, expected_new_results_by_directory, baseline_name, create_mock_files=False):
+        baseline_optimizer = TestBaselineOptimizer(results_by_directory, create_mock_files, baseline_name)
+        self.assertTrue(baseline_optimizer.optimize(baseline_name))
+        if type(expected_new_results_by_directory) != list:
+            expected_new_results_by_directory = [expected_new_results_by_directory]
         self.assertEqual(baseline_optimizer.new_results_by_directory, expected_new_results_by_directory)
 
+    def _assertOptimization(self, results_by_directory, expected_new_results_by_directory):
+        baseline_name = 'mock-baseline.png'
+        self._assertOneLevelOptimization(results_by_directory, expected_new_results_by_directory, baseline_name)
+
+        results_by_directory = self._appendVirtualSuffix(results_by_directory)
+        expected_new_results_by_directory = self._appendVirtualSuffix(expected_new_results_by_directory)
+        baseline_name = self.VIRTUAL_DIRECTORY + '/' + baseline_name
+        self._assertOneLevelOptimization(results_by_directory, [expected_new_results_by_directory, expected_new_results_by_directory], baseline_name)
+
     def test_move_baselines(self):
         host = MockHost()
         host.filesystem.write_binary_file('/mock-checkout/LayoutTests/platform/win/another/test-expected.txt', 'result A')
-        host.filesystem.write_binary_file('/mock-checkout/LayoutTests/platform/chromium-mac/another/test-expected.txt', 'result A')
+        host.filesystem.write_binary_file('/mock-checkout/LayoutTests/platform/mac/another/test-expected.txt', 'result A')
         host.filesystem.write_binary_file('/mock-checkout/LayoutTests/another/test-expected.txt', 'result B')
         baseline_optimizer = BaselineOptimizer(host, host.port_factory.all_port_names())
         baseline_optimizer._move_baselines('another/test-expected.txt', {
             'LayoutTests/platform/win': 'aaa',
-            'LayoutTests/platform/chromium-mac': 'aaa',
+            'LayoutTests/platform/mac': 'aaa',
             'LayoutTests': 'bbb',
         }, {
             'LayoutTests': 'aaa',
@@ -80,7 +127,7 @@
 
     def test_covers_mac_win_linux(self):
         self._assertOptimization({
-            'LayoutTests/platform/chromium-mac': '462d03b9c025db1b0392d7453310dbee5f9a9e74',
+            'LayoutTests/platform/mac': '462d03b9c025db1b0392d7453310dbee5f9a9e74',
             'LayoutTests/platform/win': '462d03b9c025db1b0392d7453310dbee5f9a9e74',
             'LayoutTests/platform/linux': '462d03b9c025db1b0392d7453310dbee5f9a9e74',
         }, {
@@ -89,7 +136,7 @@
 
     def test_overwrites_root(self):
         self._assertOptimization({
-            'LayoutTests/platform/chromium-mac': '462d03b9c025db1b0392d7453310dbee5f9a9e74',
+            'LayoutTests/platform/mac': '462d03b9c025db1b0392d7453310dbee5f9a9e74',
             'LayoutTests/platform/win': '462d03b9c025db1b0392d7453310dbee5f9a9e74',
             'LayoutTests/platform/linux': '462d03b9c025db1b0392d7453310dbee5f9a9e74',
             'LayoutTests': '1',
@@ -99,11 +146,11 @@
 
     def test_no_new_common_directory(self):
         self._assertOptimization({
-            'LayoutTests/platform/chromium-mac': '462d03b9c025db1b0392d7453310dbee5f9a9e74',
+            'LayoutTests/platform/mac': '462d03b9c025db1b0392d7453310dbee5f9a9e74',
             'LayoutTests/platform/linux': '462d03b9c025db1b0392d7453310dbee5f9a9e74',
             'LayoutTests': '1',
         }, {
-            'LayoutTests/platform/chromium-mac': '462d03b9c025db1b0392d7453310dbee5f9a9e74',
+            'LayoutTests/platform/mac': '462d03b9c025db1b0392d7453310dbee5f9a9e74',
             'LayoutTests/platform/linux': '462d03b9c025db1b0392d7453310dbee5f9a9e74',
             'LayoutTests': '1',
         })
@@ -111,58 +158,86 @@
 
     def test_no_common_directory(self):
         self._assertOptimization({
-            'LayoutTests/platform/chromium-mac': '462d03b9c025db1b0392d7453310dbee5f9a9e74',
-            'LayoutTests/platform/chromium-android': '462d03b9c025db1b0392d7453310dbee5f9a9e74',
+            'LayoutTests/platform/mac': '462d03b9c025db1b0392d7453310dbee5f9a9e74',
+            'LayoutTests/platform/chromium': '462d03b9c025db1b0392d7453310dbee5f9a9e74',
         }, {
-            'LayoutTests/platform/chromium-mac': '462d03b9c025db1b0392d7453310dbee5f9a9e74',
-            'LayoutTests/platform/chromium-android': '462d03b9c025db1b0392d7453310dbee5f9a9e74',
+            'LayoutTests/platform/mac': '462d03b9c025db1b0392d7453310dbee5f9a9e74',
+            'LayoutTests/platform/chromium': '462d03b9c025db1b0392d7453310dbee5f9a9e74',
         })
 
     def test_local_optimization(self):
         self._assertOptimization({
-            'LayoutTests/platform/chromium-mac': '1',
+            'LayoutTests/platform/mac': '1',
             'LayoutTests/platform/linux': '1',
             'LayoutTests/platform/linux-x86': '1',
         }, {
-            'LayoutTests/platform/chromium-mac': '1',
+            'LayoutTests/platform/mac': '1',
             'LayoutTests/platform/linux': '1',
         })
 
     def test_local_optimization_skipping_a_port_in_the_middle(self):
         self._assertOptimization({
-            'LayoutTests/platform/chromium-mac-snowleopard': '1',
+            'LayoutTests/platform/mac-snowleopard': '1',
             'LayoutTests/platform/win': '1',
             'LayoutTests/platform/linux-x86': '1',
         }, {
-            'LayoutTests/platform/chromium-mac-snowleopard': '1',
+            'LayoutTests/platform/mac-snowleopard': '1',
             'LayoutTests/platform/win': '1',
         })
 
     def test_baseline_redundant_with_root(self):
         self._assertOptimization({
-            'LayoutTests/platform/chromium-mac': '1',
+            'LayoutTests/platform/mac': '1',
             'LayoutTests/platform/win': '2',
             'LayoutTests': '2',
         }, {
-            'LayoutTests/platform/chromium-mac': '1',
+            'LayoutTests/platform/mac': '1',
             'LayoutTests': '2',
         })
 
     def test_root_baseline_unused(self):
         self._assertOptimization({
-            'LayoutTests/platform/chromium-mac': '1',
+            'LayoutTests/platform/mac': '1',
             'LayoutTests/platform/win': '2',
             'LayoutTests': '3',
         }, {
-            'LayoutTests/platform/chromium-mac': '1',
+            'LayoutTests/platform/mac': '1',
             'LayoutTests/platform/win': '2',
         })
 
     def test_root_baseline_unused_and_non_existant(self):
         self._assertOptimization({
-            'LayoutTests/platform/chromium-mac': '1',
+            'LayoutTests/platform/mac': '1',
             'LayoutTests/platform/win': '2',
         }, {
-            'LayoutTests/platform/chromium-mac': '1',
+            'LayoutTests/platform/mac': '1',
             'LayoutTests/platform/win': '2',
         })
+
+    def test_virtual_root_redundant_with_actual_root(self):
+        baseline_name = self.VIRTUAL_DIRECTORY + '/mock-baseline.png'
+        hash_of_two = hashlib.sha1('2').hexdigest()
+        expected_result = [{'LayoutTests/virtual/softwarecompositing': hash_of_two}, {'LayoutTests': hash_of_two}]
+        self._assertOneLevelOptimization({
+            'LayoutTests/' + self.VIRTUAL_DIRECTORY: '2',
+            'LayoutTests': '2',
+        }, expected_result, baseline_name, create_mock_files=True)
+
+    def test_virtual_root_redundant_with_ancestors(self):
+        baseline_name = self.VIRTUAL_DIRECTORY + '/mock-baseline.png'
+        hash_of_two = hashlib.sha1('2').hexdigest()
+        expected_result = [{'LayoutTests/virtual/softwarecompositing': hash_of_two}, {'LayoutTests': hash_of_two}]
+        self._assertOneLevelOptimization({
+            'LayoutTests/' + self.VIRTUAL_DIRECTORY: '2',
+            'LayoutTests/platform/mac': '2',
+            'LayoutTests/platform/win': '2',
+        }, expected_result, baseline_name, create_mock_files=True)
+
+    def test_virtual_root_not_redundant_with_ancestors(self):
+        baseline_name = self.VIRTUAL_DIRECTORY + '/mock-baseline.png'
+        hash_of_two = hashlib.sha1('2').hexdigest()
+        expected_result = [{'LayoutTests/virtual/softwarecompositing': hash_of_two}, {'LayoutTests/platform/mac': hash_of_two}]
+        self._assertOneLevelOptimization({
+            'LayoutTests/' + self.VIRTUAL_DIRECTORY: '2',
+            'LayoutTests/platform/mac': '2',
+        }, expected_result, baseline_name, create_mock_files=True)
diff --git a/Tools/Scripts/webkitpy/common/config/ports.py b/Tools/Scripts/webkitpy/common/config/ports.py
index ce2b78f..abea220 100644
--- a/Tools/Scripts/webkitpy/common/config/ports.py
+++ b/Tools/Scripts/webkitpy/common/config/ports.py
@@ -62,7 +62,7 @@
     def port(port_name):
         ports = {
             "chromium": ChromiumPort,
-            "chromium-android": ChromiumAndroidPort,
+            "chromium-android": AndroidPort,
             "chromium-xvfb": ChromiumXVFBPort,
         }
         return ports.get(port_name, ChromiumPort)()
@@ -107,7 +107,7 @@
         return self.script_shell_command("run-chromium-webkit-unit-tests")
 
 
-class ChromiumAndroidPort(ChromiumPort):
+class AndroidPort(ChromiumPort):
     port_flag_name = "chromium-android"
 
 
diff --git a/Tools/Scripts/webkitpy/common/net/buildbot/buildbot.py b/Tools/Scripts/webkitpy/common/net/buildbot/buildbot.py
index 4951596..36c6213 100644
--- a/Tools/Scripts/webkitpy/common/net/buildbot/buildbot.py
+++ b/Tools/Scripts/webkitpy/common/net/buildbot/buildbot.py
@@ -34,10 +34,8 @@
 
 import webkitpy.common.config.urls as config_urls
 from webkitpy.common.memoized import memoized
-from webkitpy.common.net.failuremap import FailureMap
 from webkitpy.common.net.layouttestresults import LayoutTestResults
 from webkitpy.common.net.networktransaction import NetworkTransaction
-from webkitpy.common.net.regressionwindow import RegressionWindow
 from webkitpy.common.system.logutils import get_logger
 from webkitpy.thirdparty.BeautifulSoup import BeautifulSoup
 
@@ -197,54 +195,6 @@
             )
         return build
 
-    def find_regression_window(self, red_build, look_back_limit=30):
-        if not red_build or red_build.is_green():
-            return RegressionWindow(None, None)
-        common_failures = None
-        current_build = red_build
-        build_after_current_build = None
-        look_back_count = 0
-        while current_build:
-            if current_build.is_green():
-                # current_build can't possibly have any failures in common
-                # with red_build because it's green.
-                break
-            results = current_build.layout_test_results()
-            # We treat a lack of results as if all the test failed.
-            # This occurs, for example, when we can't compile at all.
-            if results:
-                failures = set(results.failing_tests())
-                if common_failures == None:
-                    common_failures = failures
-                else:
-                    common_failures = common_failures.intersection(failures)
-                    if not common_failures:
-                        # current_build doesn't have any failures in common with
-                        # the red build we're worried about.  We assume that any
-                        # failures in current_build were due to flakiness.
-                        break
-            look_back_count += 1
-            if look_back_count > look_back_limit:
-                return RegressionWindow(None, current_build, failing_tests=common_failures)
-            build_after_current_build = current_build
-            current_build = current_build.previous_build()
-        # We must iterate at least once because red_build is red.
-        assert(build_after_current_build)
-        # Current build must either be green or have no failures in common
-        # with red build, so we've found our failure transition.
-        return RegressionWindow(current_build, build_after_current_build, failing_tests=common_failures)
-
-    def find_blameworthy_regression_window(self, red_build_number, look_back_limit=30, avoid_flakey_tests=True):
-        red_build = self.build(red_build_number)
-        regression_window = self.find_regression_window(red_build, look_back_limit)
-        if not regression_window.build_before_failure():
-            return None  # We ran off the limit of our search
-        # If avoid_flakey_tests, require at least 2 bad builds before we
-        # suspect a real failure transition.
-        if avoid_flakey_tests and regression_window.failing_build() == red_build:
-            return None
-        return regression_window
-
 
 class Build(object):
     def __init__(self, builder, build_number, revision, is_green):
@@ -267,10 +217,6 @@
     def results_zip_url(self):
         return "%s.zip" % self.results_url()
 
-    @memoized
-    def layout_test_results(self):
-        return self._builder.fetch_layout_test_results(self.results_url())
-
     def builder(self):
         return self._builder
 
@@ -415,18 +361,6 @@
             self._builder_by_name[name] = builder
         return builder
 
-    def failure_map(self):
-        failure_map = FailureMap()
-        revision_to_failing_bots = {}
-        for builder_status in self.builder_statuses():
-            if builder_status["is_green"]:
-                continue
-            builder = self.builder_with_name(builder_status["name"])
-            regression_window = builder.find_blameworthy_regression_window(builder_status["build_number"])
-            if regression_window:
-                failure_map.add_regression_window(builder, regression_window)
-        return failure_map
-
     # This makes fewer requests than calling Builder.latest_build would.  It grabs all builder
     # statuses in one request using self.builder_statuses (fetching /one_box_per_builder instead of builder pages).
     def _latest_builds_from_builders(self):
diff --git a/Tools/Scripts/webkitpy/common/net/buildbot/buildbot_mock.py b/Tools/Scripts/webkitpy/common/net/buildbot/buildbot_mock.py
index d20bdb7..10d03d6 100644
--- a/Tools/Scripts/webkitpy/common/net/buildbot/buildbot_mock.py
+++ b/Tools/Scripts/webkitpy/common/net/buildbot/buildbot_mock.py
@@ -61,29 +61,6 @@
             self._name, username, comments))
 
 
-class MockFailureMap(object):
-    def __init__(self, buildbot):
-        self._buildbot = buildbot
-
-    def is_empty(self):
-        return False
-
-    def filter_out_old_failures(self, is_old_revision):
-        pass
-
-    def failing_revisions(self):
-        return [29837]
-
-    def builders_failing_for(self, revision):
-        return [self._buildbot.builder_with_name("Builder1")]
-
-    def tests_failing_for(self, revision):
-        return ["mock-test-1"]
-
-    def failing_tests(self):
-        return set(["mock-test-1"])
-
-
 class MockBuildBot(object):
     def __init__(self):
         self._mock_builder1_status = {
@@ -108,6 +85,3 @@
 
     def light_tree_on_fire(self):
         self._mock_builder2_status["is_green"] = False
-
-    def failure_map(self):
-        return MockFailureMap(self)
diff --git a/Tools/Scripts/webkitpy/common/net/buildbot/buildbot_unittest.py b/Tools/Scripts/webkitpy/common/net/buildbot/buildbot_unittest.py
index 26b7b97..743e8e5 100644
--- a/Tools/Scripts/webkitpy/common/net/buildbot/buildbot_unittest.py
+++ b/Tools/Scripts/webkitpy/common/net/buildbot/buildbot_unittest.py
@@ -47,11 +47,6 @@
                 revision=build_number + 1000,
                 is_green=build_number < 4
             )
-            results = [self._mock_test_result(testname) for testname in failure(build_number)]
-            layout_test_results = LayoutTestResults(results)
-            def mock_layout_test_results():
-                return layout_test_results
-            build.layout_test_results = mock_layout_test_results
             return build
         self.builder._fetch_build = _mock_fetch_build
 
@@ -61,58 +56,10 @@
         self._install_fetch_build(lambda build_number: ["test1", "test2"])
 
     def test_latest_layout_test_results(self):
-        self.builder.fetch_layout_test_results = lambda results_url: LayoutTestResults([self._mock_test_result(testname) for testname in ["test1", "test2"]])
+        self.builder.fetch_layout_test_results = lambda results_url: LayoutTestResults(None)
         self.builder.accumulated_results_url = lambda: "http://dummy_url.org"
         self.assertTrue(self.builder.latest_layout_test_results())
 
-    def test_find_regression_window(self):
-        regression_window = self.builder.find_regression_window(self.builder.build(10))
-        self.assertEqual(regression_window.build_before_failure().revision(), 1003)
-        self.assertEqual(regression_window.failing_build().revision(), 1004)
-
-        regression_window = self.builder.find_regression_window(self.builder.build(10), look_back_limit=2)
-        self.assertIsNone(regression_window.build_before_failure())
-        self.assertEqual(regression_window.failing_build().revision(), 1008)
-
-    def test_none_build(self):
-        self.builder._fetch_build = lambda build_number: None
-        regression_window = self.builder.find_regression_window(self.builder.build(10))
-        self.assertIsNone(regression_window.build_before_failure())
-        self.assertIsNone(regression_window.failing_build())
-
-    def test_flaky_tests(self):
-        self._install_fetch_build(lambda build_number: ["test1"] if build_number % 2 else ["test2"])
-        regression_window = self.builder.find_regression_window(self.builder.build(10))
-        self.assertEqual(regression_window.build_before_failure().revision(), 1009)
-        self.assertEqual(regression_window.failing_build().revision(), 1010)
-
-    def test_failure_and_flaky(self):
-        self._install_fetch_build(lambda build_number: ["test1", "test2"] if build_number % 2 else ["test2"])
-        regression_window = self.builder.find_regression_window(self.builder.build(10))
-        self.assertEqual(regression_window.build_before_failure().revision(), 1003)
-        self.assertEqual(regression_window.failing_build().revision(), 1004)
-
-    def test_no_results(self):
-        self._install_fetch_build(lambda build_number: ["test1", "test2"] if build_number % 2 else ["test2"])
-        regression_window = self.builder.find_regression_window(self.builder.build(10))
-        self.assertEqual(regression_window.build_before_failure().revision(), 1003)
-        self.assertEqual(regression_window.failing_build().revision(), 1004)
-
-    def test_failure_after_flaky(self):
-        self._install_fetch_build(lambda build_number: ["test1", "test2"] if build_number > 6 else ["test3"])
-        regression_window = self.builder.find_regression_window(self.builder.build(10))
-        self.assertEqual(regression_window.build_before_failure().revision(), 1006)
-        self.assertEqual(regression_window.failing_build().revision(), 1007)
-
-    def test_find_blameworthy_regression_window(self):
-        self.assertEqual(self.builder.find_blameworthy_regression_window(10).revisions(), [1004])
-        self.assertIsNone(self.builder.find_blameworthy_regression_window(10, look_back_limit=2))
-        # Flakey test avoidance requires at least 2 red builds:
-        self.assertIsNone(self.builder.find_blameworthy_regression_window(4))
-        self.assertEqual(self.builder.find_blameworthy_regression_window(4, avoid_flakey_tests=False).revisions(), [1004])
-        # Green builder:
-        self.assertIsNone(self.builder.find_blameworthy_regression_window(3))
-
     def test_build_caching(self):
         self.assertEqual(self.builder.build(10), self.builder.build(10))
 
@@ -151,16 +98,6 @@
         self.assertIsNotNone(builder._fetch_build(1))
 
 
-class BuildTest(unittest.TestCase):
-    def test_layout_test_results(self):
-        buildbot = BuildBot()
-        builder = Builder(u"Foo Builder (test)", buildbot)
-        builder._fetch_file_from_results = lambda results_url, file_name: None
-        build = Build(builder, None, None, None)
-        # Test that layout_test_results() returns None if the fetch fails.
-        self.assertIsNone(build.layout_test_results())
-
-
 class BuildBotTest(unittest.TestCase):
 
     _example_one_box_status = '''
diff --git a/Tools/Scripts/webkitpy/common/net/failuremap.py b/Tools/Scripts/webkitpy/common/net/failuremap.py
deleted file mode 100644
index 746242e..0000000
--- a/Tools/Scripts/webkitpy/common/net/failuremap.py
+++ /dev/null
@@ -1,88 +0,0 @@
-# Copyright (C) 2010 Google Inc. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-#     * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-#     * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-#     * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-# FIXME: This probably belongs in the buildbot module.
-class FailureMap(object):
-    def __init__(self):
-        self._failures = []
-
-    def add_regression_window(self, builder, regression_window):
-        self._failures.append({
-            'builder': builder,
-            'regression_window': regression_window,
-        })
-
-    def is_empty(self):
-        return not self._failures
-
-    def failing_revisions(self):
-        failing_revisions = [failure_info['regression_window'].revisions()
-                             for failure_info in self._failures]
-        return sorted(set(sum(failing_revisions, [])))
-
-    def builders_failing_for(self, revision):
-        return self._builders_failing_because_of([revision])
-
-    def tests_failing_for(self, revision):
-        tests = [failure_info['regression_window'].failing_tests()
-                 for failure_info in self._failures
-                 if revision in failure_info['regression_window'].revisions()
-                    and failure_info['regression_window'].failing_tests()]
-        result = set()
-        for test in tests:
-            result = result.union(test)
-        return sorted(result)
-
-    def failing_tests(self):
-        return set(sum([self.tests_failing_for(revision) for revision in self.failing_revisions()], []))
-
-    def _old_failures(self, is_old_failure):
-        return filter(lambda revision: is_old_failure(revision),
-                      self.failing_revisions())
-
-    def _builders_failing_because_of(self, revisions):
-        revision_set = set(revisions)
-        return [failure_info['builder'] for failure_info in self._failures
-                if revision_set.intersection(
-                    failure_info['regression_window'].revisions())]
-
-    # FIXME: We should re-process old failures after some time delay.
-    # https://bugs.webkit.org/show_bug.cgi?id=36581
-    def filter_out_old_failures(self, is_old_failure):
-        old_failures = self._old_failures(is_old_failure)
-        old_failing_builder_names = set([builder.name()
-            for builder in self._builders_failing_because_of(old_failures)])
-
-        # We filter out all the failing builders that could have been caused
-        # by old_failures.  We could miss some new failures this way, but
-        # emperically, this reduces the amount of spam we generate.
-        failures = self._failures
-        self._failures = [failure_info for failure_info in failures
-            if failure_info['builder'].name() not in old_failing_builder_names]
-        self._cache = {}
diff --git a/Tools/Scripts/webkitpy/common/net/failuremap_unittest.py b/Tools/Scripts/webkitpy/common/net/failuremap_unittest.py
deleted file mode 100644
index 0bede97..0000000
--- a/Tools/Scripts/webkitpy/common/net/failuremap_unittest.py
+++ /dev/null
@@ -1,80 +0,0 @@
-# Copyright (c) 2010 Google Inc. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-#     * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-#     * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-#     * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-import unittest2 as unittest
-
-from webkitpy.common.net.buildbot import Build
-from webkitpy.common.net.failuremap import *
-from webkitpy.common.net.regressionwindow import RegressionWindow
-from webkitpy.common.net.buildbot.buildbot_mock import MockBuilder
-
-
-class FailureMapTest(unittest.TestCase):
-    builder1 = MockBuilder("Builder1")
-    builder2 = MockBuilder("Builder2")
-
-    build1a = Build(builder1, build_number=22, revision=1233, is_green=True)
-    build1b = Build(builder1, build_number=23, revision=1234, is_green=False)
-    build2a = Build(builder2, build_number=89, revision=1233, is_green=True)
-    build2b = Build(builder2, build_number=90, revision=1235, is_green=False)
-
-    regression_window1 = RegressionWindow(build1a, build1b, failing_tests=[u'test1', u'test1'])
-    regression_window2 = RegressionWindow(build2a, build2b, failing_tests=[u'test1'])
-
-    def _make_failure_map(self):
-        failure_map = FailureMap()
-        failure_map.add_regression_window(self.builder1, self.regression_window1)
-        failure_map.add_regression_window(self.builder2, self.regression_window2)
-        return failure_map
-
-    def test_failing_revisions(self):
-        failure_map = self._make_failure_map()
-        self.assertEqual(failure_map.failing_revisions(), [1234, 1235])
-
-    def test_new_failures(self):
-        failure_map = self._make_failure_map()
-        failure_map.filter_out_old_failures(lambda revision: False)
-        self.assertEqual(failure_map.failing_revisions(), [1234, 1235])
-
-    def test_new_failures_with_old_revisions(self):
-        failure_map = self._make_failure_map()
-        failure_map.filter_out_old_failures(lambda revision: revision == 1234)
-        self.assertEqual(failure_map.failing_revisions(), [])
-
-    def test_new_failures_with_more_old_revisions(self):
-        failure_map = self._make_failure_map()
-        failure_map.filter_out_old_failures(lambda revision: revision == 1235)
-        self.assertEqual(failure_map.failing_revisions(), [1234])
-
-    def test_tests_failing_for(self):
-        failure_map = self._make_failure_map()
-        self.assertEqual(failure_map.tests_failing_for(1234), [u'test1'])
-
-    def test_failing_tests(self):
-        failure_map = self._make_failure_map()
-        self.assertEqual(failure_map.failing_tests(), set([u'test1']))
diff --git a/Tools/Scripts/webkitpy/common/net/layouttestresults.py b/Tools/Scripts/webkitpy/common/net/layouttestresults.py
index b8cb157..af18a02 100644
--- a/Tools/Scripts/webkitpy/common/net/layouttestresults.py
+++ b/Tools/Scripts/webkitpy/common/net/layouttestresults.py
@@ -26,20 +26,58 @@
 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+import json
 import logging
 
-from webkitpy.common.net.resultsjsonparser import ResultsJSONParser
-from webkitpy.thirdparty.BeautifulSoup import BeautifulSoup, SoupStrainer
-from webkitpy.layout_tests.models import test_results
-from webkitpy.layout_tests.models import test_failures
+from webkitpy.common.memoized import memoized
+from webkitpy.layout_tests.layout_package import json_results_generator
+from webkitpy.layout_tests.models import test_expectations
+from webkitpy.layout_tests.models.test_expectations import TestExpectations
 
 _log = logging.getLogger(__name__)
 
 
-# FIXME: This should be unified with all the layout test results code in the layout_tests package
-# This doesn't belong in common.net, but we don't have a better place for it yet.
-def path_for_layout_test(test_name):
-    return "LayoutTests/%s" % test_name
+# These are helper functions for navigating the results json structure.
+def for_each_test(tree, handler, prefix=''):
+    for key in tree:
+        new_prefix = (prefix + '/' + key) if prefix else key
+        if 'actual' not in tree[key]:
+            for_each_test(tree[key], handler, new_prefix)
+        else:
+            handler(new_prefix, tree[key])
+
+
+def result_for_test(tree, test):
+    parts = test.split('/')
+    for part in parts:
+        tree = tree[part]
+    return tree
+
+
+class JSONTestResult(object):
+    def __init__(self, test_name, result_dict):
+        self._test_name = test_name
+        self._result_dict = result_dict
+
+    def did_pass_or_run_as_expected(self):
+        return self.did_pass() or self.did_run_as_expected()
+
+    def did_pass(self):
+        return test_expectations.PASS in self._actual_as_tokens()
+
+    def did_run_as_expected(self):
+        return 'is_unexpected' not in self._result_dict
+
+    def _tokenize(self, results_string):
+        tokens = map(TestExpectations.expectation_from_string, results_string.split(' '))
+        if None in tokens:
+            _log.warning("Unrecognized result in %s" % results_string)
+        return set(tokens)
+
+    @memoized
+    def _actual_as_tokens(self):
+        actual_results = self._result_dict['actual']
+        return self._tokenize(actual_results)
 
 
 # FIXME: This should be unified with ResultsSummary or other NRWT layout tests code
@@ -50,42 +88,18 @@
     def results_from_string(cls, string):
         if not string:
             return None
-        test_results = ResultsJSONParser.parse_results_json(string)
-        if not test_results:
+
+        content_string = json_results_generator.strip_json_wrapper(string)
+        json_dict = json.loads(content_string)
+        if not json_dict:
             return None
-        return cls(test_results)
+        return cls(json_dict)
 
-    def __init__(self, test_results):
-        self._test_results = test_results
-        self._failure_limit_count = None
-        self._unit_test_failures = []
+    def __init__(self, parsed_json):
+        self._results = parsed_json
 
-    # FIXME: run-webkit-tests should store the --exit-after-N-failures value
-    # (or some indication of early exit) somewhere in the results.json
-    # file.  Until it does, callers should set the limit to
-    # --exit-after-N-failures value used in that run.  Consumers of LayoutTestResults
-    # may use that value to know if absence from the failure list means PASS.
-    # https://bugs.webkit.org/show_bug.cgi?id=58481
-    def set_failure_limit_count(self, limit):
-        self._failure_limit_count = limit
+    def run_was_interrupted(self):
+        return self._results["interrupted"]
 
-    def failure_limit_count(self):
-        return self._failure_limit_count
-
-    def test_results(self):
-        return self._test_results
-
-    def results_matching_failure_types(self, failure_types):
-        return [result for result in self._test_results if result.has_failure_matching_types(*failure_types)]
-
-    def tests_matching_failure_types(self, failure_types):
-        return [result.test_name for result in self.results_matching_failure_types(failure_types)]
-
-    def failing_test_results(self):
-        return self.results_matching_failure_types(test_failures.ALL_FAILURE_CLASSES)
-
-    def failing_tests(self):
-        return [result.test_name for result in self.failing_test_results()] + self._unit_test_failures
-
-    def add_unit_test_failures(self, unit_test_results):
-        self._unit_test_failures = unit_test_results
+    def blink_revision(self):
+        return int(self._results["blink_revision"])
diff --git a/Tools/Scripts/webkitpy/common/net/layouttestresults_unittest.py b/Tools/Scripts/webkitpy/common/net/layouttestresults_unittest.py
index ea4e927..89f2c37 100644
--- a/Tools/Scripts/webkitpy/common/net/layouttestresults_unittest.py
+++ b/Tools/Scripts/webkitpy/common/net/layouttestresults_unittest.py
@@ -36,12 +36,67 @@
 
 
 class LayoutTestResultsTest(unittest.TestCase):
-    def test_set_failure_limit_count(self):
-        results = LayoutTestResults([])
-        self.assertIsNone(results.failure_limit_count())
-        results.set_failure_limit_count(10)
-        self.assertEqual(results.failure_limit_count(), 10)
+    # The real files have no whitespace, but newlines make this much more readable.
+    example_full_results_json = """ADD_RESULTS({
+    "tests": {
+        "fast": {
+            "dom": {
+                "prototype-inheritance.html": {
+                    "expected": "PASS",
+                    "actual": "FAIL",
+                    "is_unexpected": true
+                },
+                "prototype-banana.html": {
+                    "expected": "FAIL",
+                    "actual": "PASS",
+                    "is_unexpected": true
+                },
+                "prototype-taco.html": {
+                    "expected": "PASS",
+                    "actual": "PASS FAIL",
+                    "is_unexpected": true
+                },
+                "prototype-chocolate.html": {
+                    "expected": "FAIL",
+                    "actual": "FAIL"
+                },
+                "prototype-strawberry.html": {
+                    "expected": "PASS",
+                    "actual": "FAIL PASS",
+                    "is_unexpected": true
+                }
+            }
+        },
+        "svg": {
+            "dynamic-updates": {
+                "SVGFEDropShadowElement-dom-stdDeviation-attr.html": {
+                    "expected": "PASS",
+                    "actual": "IMAGE",
+                    "has_stderr": true,
+                    "is_unexpected": true
+                }
+            }
+        }
+    },
+    "skipped": 450,
+    "num_regressions": 15,
+    "layout_tests_dir": "\/b\/build\/slave\/Webkit_Mac10_5\/build\/src\/third_party\/WebKit\/LayoutTests",
+    "version": 3,
+    "num_passes": 77,
+    "has_pretty_patch": false,
+    "fixable": 1220,
+    "num_flaky": 0,
+    "blink_revision": "1234",
+    "has_wdiff": false
+});"""
 
     def test_results_from_string(self):
         self.assertIsNone(LayoutTestResults.results_from_string(None))
         self.assertIsNone(LayoutTestResults.results_from_string(""))
+
+    def test_was_interrupted(self):
+        self.assertTrue(LayoutTestResults.results_from_string('ADD_RESULTS({"tests":{},"interrupted":true});').run_was_interrupted())
+        self.assertFalse(LayoutTestResults.results_from_string('ADD_RESULTS({"tests":{},"interrupted":false});').run_was_interrupted())
+
+    def test_blink_revision(self):
+        self.assertEqual(LayoutTestResults.results_from_string(self.example_full_results_json).blink_revision(), 1234)
diff --git a/Tools/Scripts/webkitpy/common/net/resultsjsonparser.py b/Tools/Scripts/webkitpy/common/net/resultsjsonparser.py
deleted file mode 100644
index 51a6fe0..0000000
--- a/Tools/Scripts/webkitpy/common/net/resultsjsonparser.py
+++ /dev/null
@@ -1,154 +0,0 @@
-# Copyright (c) 2010, Google Inc. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-#     * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-#     * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-#     * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-import json
-import logging
-
-from webkitpy.common.memoized import memoized
-# FIXME: common should never import from new-run-webkit-tests, one of these files needs to move.
-from webkitpy.layout_tests.layout_package import json_results_generator
-from webkitpy.layout_tests.models import test_expectations, test_results, test_failures
-from webkitpy.layout_tests.models.test_expectations import TestExpectations
-
-_log = logging.getLogger(__name__)
-
-
-# These are helper functions for navigating the results json structure.
-def for_each_test(tree, handler, prefix=''):
-    for key in tree:
-        new_prefix = (prefix + '/' + key) if prefix else key
-        if 'actual' not in tree[key]:
-            for_each_test(tree[key], handler, new_prefix)
-        else:
-            handler(new_prefix, tree[key])
-
-
-def result_for_test(tree, test):
-    parts = test.split('/')
-    for part in parts:
-        tree = tree[part]
-    return tree
-
-
-# Wrapper around the dictionaries returned from the json.
-# Eventually the .json should just serialize the TestFailure objects
-# directly and we won't need this.
-class JSONTestResult(object):
-    def __init__(self, test_name, result_dict):
-        self._test_name = test_name
-        self._result_dict = result_dict
-
-    def did_pass_or_run_as_expected(self):
-        return self.did_pass() or self.did_run_as_expected()
-
-    def did_pass(self):
-        return test_expectations.PASS in self._actual_as_tokens()
-
-    def did_run_as_expected(self):
-        actual_results = self._actual_as_tokens()
-        expected_results = self._expected_as_tokens()
-        # FIXME: We should only call remove_pixel_failures when this JSONResult
-        # came from a test run without pixel tests!
-        if not TestExpectations.has_pixel_failures(actual_results):
-            expected_results = TestExpectations.remove_pixel_failures(expected_results)
-        for actual_result in actual_results:
-            if not TestExpectations.result_was_expected(actual_result, expected_results, False):
-                return False
-        return True
-
-    def _tokenize(self, results_string):
-        tokens = map(TestExpectations.expectation_from_string, results_string.split(' '))
-        if None in tokens:
-            _log.warning("Unrecognized result in %s" % results_string)
-        return set(tokens)
-
-    @memoized
-    def _actual_as_tokens(self):
-        actual_results = self._result_dict['actual']
-        return self._tokenize(actual_results)
-
-    @memoized
-    def _expected_as_tokens(self):
-        actual_results = self._result_dict['expected']
-        return self._tokenize(actual_results)
-
-    def _failure_types_from_actual_result(self, actual):
-        # FIXME: There doesn't seem to be a full list of all possible values of
-        # 'actual' anywhere.
-        #
-        # FIXME: TEXT, IMAGE_PLUS_TEXT, and AUDIO are obsolete but we keep them for
-        # now so that we can parse old results.json files.
-        if actual == test_expectations.PASS:
-            return []
-        elif actual == test_expectations.FAIL:
-            return [test_failures.FailureTextMismatch(), test_failures.FailureImageHashMismatch(), test_failures.FailureAudioMismatch()]
-        elif actual == test_expectations.TEXT:
-            return [test_failures.FailureTextMismatch()]
-        elif actual == test_expectations.IMAGE:
-            return [test_failures.FailureImageHashMismatch()]
-        elif actual == test_expectations.IMAGE_PLUS_TEXT:
-            return [test_failures.FailureImageHashMismatch(), test_failures.FailureTextMismatch()]
-        elif actual == test_expectations.AUDIO:
-            return [test_failures.FailureAudioMismatch()]
-        elif actual == test_expectations.TIMEOUT:
-            return [test_failures.FailureTimeout()]
-        elif actual == test_expectations.CRASH:
-            # NOTE: We don't know what process crashed from the json, just that a process crashed.
-            return [test_failures.FailureCrash()]
-        elif actual == test_expectations.MISSING:
-            return [test_failures.FailureMissingResult(), test_failures.FailureMissingImageHash(), test_failures.FailureMissingImage()]
-        else:
-            _log.warning("Failed to handle: %s" % self._result_dict['actual'])
-            return []
-
-    def _failures(self):
-        if self.did_pass():
-            return []
-        return sum(map(self._failure_types_from_actual_result, self._actual_as_tokens()), [])
-
-    def test_result(self):
-        # FIXME: Optionally pull in the test runtime from times_ms.json.
-        return test_results.TestResult(self._test_name, self._failures())
-
-
-class ResultsJSONParser(object):
-    @classmethod
-    def parse_results_json(cls, json_string):
-        if not json_results_generator.has_json_wrapper(json_string):
-            return None
-
-        content_string = json_results_generator.strip_json_wrapper(json_string)
-        json_dict = json.loads(content_string)
-
-        json_results = []
-        for_each_test(json_dict['tests'], lambda test, result: json_results.append(JSONTestResult(test, result)))
-
-        # FIXME: What's the short sexy python way to filter None?
-        # I would use [foo.bar() for foo in foos if foo.bar()] but bar() is expensive.
-        unexpected_failures = [result.test_result() for result in json_results if not result.did_pass_or_run_as_expected()]
-        return filter(lambda a: a, unexpected_failures)
diff --git a/Tools/Scripts/webkitpy/common/net/resultsjsonparser_unittest.py b/Tools/Scripts/webkitpy/common/net/resultsjsonparser_unittest.py
deleted file mode 100644
index d5bba66..0000000
--- a/Tools/Scripts/webkitpy/common/net/resultsjsonparser_unittest.py
+++ /dev/null
@@ -1,92 +0,0 @@
-# Copyright (c) 2010, Google Inc. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-#     * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-#     * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-#     * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-import unittest2 as unittest
-
-from webkitpy.common.net.resultsjsonparser import ResultsJSONParser
-from webkitpy.layout_tests.models import test_results
-from webkitpy.layout_tests.models import test_failures
-
-
-class ResultsJSONParserTest(unittest.TestCase):
-    # The real files have no whitespace, but newlines make this much more readable.
-
-    _example_full_results_json = """ADD_RESULTS({
-    "tests": {
-        "fast": {
-            "dom": {
-                "prototype-inheritance.html": {
-                    "expected": "PASS",
-                    "actual": "FAIL"
-                },
-                "prototype-banana.html": {
-                    "expected": "FAIL",
-                    "actual": "PASS"
-                },
-                "prototype-taco.html": {
-                    "expected": "PASS",
-                    "actual": "PASS FAIL"
-                },
-                "prototype-chocolate.html": {
-                    "expected": "FAIL",
-                    "actual": "FAIL"
-                },
-                "prototype-strawberry.html": {
-                    "expected": "PASS",
-                    "actual": "FAIL PASS"
-                }
-            }
-        },
-        "svg": {
-            "dynamic-updates": {
-                "SVGFEDropShadowElement-dom-stdDeviation-attr.html": {
-                    "expected": "PASS",
-                    "actual": "IMAGE",
-                    "has_stderr": true
-                }
-            }
-        }
-    },
-    "skipped": 450,
-    "num_regressions": 15,
-    "layout_tests_dir": "\/b\/build\/slave\/Webkit_Mac10_5\/build\/src\/third_party\/WebKit\/LayoutTests",
-    "version": 3,
-    "num_passes": 77,
-    "has_pretty_patch": false,
-    "fixable": 1220,
-    "num_flaky": 0,
-    "has_wdiff": false
-});"""
-
-    def test_basic(self):
-        expected_results = [
-            test_results.TestResult("svg/dynamic-updates/SVGFEDropShadowElement-dom-stdDeviation-attr.html", [test_failures.FailureImageHashMismatch()], 0),
-            test_results.TestResult("fast/dom/prototype-inheritance.html", [test_failures.FailureTextMismatch(), test_failures.FailureImageHashMismatch(), test_failures.FailureAudioMismatch()], 0),
-        ]
-        results = ResultsJSONParser.parse_results_json(self._example_full_results_json)
-        self.assertEqual(expected_results, results)
diff --git a/Tools/Scripts/webkitpy/common/system/autoinstall.py b/Tools/Scripts/webkitpy/common/system/autoinstall.py
index e5f1b54..90f5365 100644
--- a/Tools/Scripts/webkitpy/common/system/autoinstall.py
+++ b/Tools/Scripts/webkitpy/common/system/autoinstall.py
@@ -39,7 +39,7 @@
 import sys
 import tarfile
 import tempfile
-import urllib
+import urllib2 as urllib
 import urlparse
 import zipfile
 
diff --git a/Tools/Scripts/webkitpy/common/system/executive.py b/Tools/Scripts/webkitpy/common/system/executive.py
index 3b3b7ef..1af0831 100644
--- a/Tools/Scripts/webkitpy/common/system/executive.py
+++ b/Tools/Scripts/webkitpy/common/system/executive.py
@@ -51,7 +51,12 @@
                  script_args=None,
                  exit_code=None,
                  output=None,
-                 cwd=None):
+                 cwd=None,
+                 output_limit=500):
+        shortened_output = output
+        if output and output_limit and len(output) > output_limit:
+            shortened_output = "Last %s characters of output:\n%s" % (output_limit, output[-output_limit:])
+
         if not message:
             message = 'Failed to run "%s"' % repr(script_args)
             if exit_code:
@@ -59,18 +64,16 @@
             if cwd:
                 message += " cwd: %s" % cwd
 
+        if shortened_output:
+            message += "\n\noutput: %s" % shortened_output
+
         Exception.__init__(self, message)
         self.script_args = script_args # 'args' is already used by Exception
         self.exit_code = exit_code
         self.output = output
         self.cwd = cwd
 
-    def message_with_output(self, output_limit=500):
-        if self.output:
-            if output_limit and len(self.output) > output_limit:
-                return u"%s\n\nLast %s characters of output:\n%s" % \
-                    (self, output_limit, self.output[-output_limit:])
-            return u"%s\n\n%s" % (self, self.output)
+    def message_with_output(self):
         return unicode(self)
 
     def command_name(self):
diff --git a/Tools/Scripts/webkitpy/common/system/executive_unittest.py b/Tools/Scripts/webkitpy/common/system/executive_unittest.py
index bd13120..be3dac3 100644
--- a/Tools/Scripts/webkitpy/common/system/executive_unittest.py
+++ b/Tools/Scripts/webkitpy/common/system/executive_unittest.py
@@ -54,15 +54,15 @@
         error = ScriptError('My custom message!', '', -1)
         self.assertEqual(error.message_with_output(), 'My custom message!')
         error = ScriptError('My custom message!', '', -1, 'My output.')
-        self.assertEqual(error.message_with_output(), 'My custom message!\n\nMy output.')
+        self.assertEqual(error.message_with_output(), 'My custom message!\n\noutput: My output.')
         error = ScriptError('', 'my_command!', -1, 'My output.', '/Users/username/blah')
-        self.assertEqual(error.message_with_output(), 'Failed to run "\'my_command!\'" exit_code: -1 cwd: /Users/username/blah\n\nMy output.')
+        self.assertEqual(error.message_with_output(), 'Failed to run "\'my_command!\'" exit_code: -1 cwd: /Users/username/blah\n\noutput: My output.')
         error = ScriptError('', 'my_command!', -1, 'ab' + '1' * 499)
-        self.assertEqual(error.message_with_output(), 'Failed to run "\'my_command!\'" exit_code: -1\n\nLast 500 characters of output:\nb' + '1' * 499)
+        self.assertEqual(error.message_with_output(), 'Failed to run "\'my_command!\'" exit_code: -1\n\noutput: Last 500 characters of output:\nb' + '1' * 499)
 
     def test_message_with_tuple(self):
         error = ScriptError('', ('my', 'command'), -1, 'My output.', '/Users/username/blah')
-        self.assertEqual(error.message_with_output(), 'Failed to run "(\'my\', \'command\')" exit_code: -1 cwd: /Users/username/blah\n\nMy output.')
+        self.assertEqual(error.message_with_output(), 'Failed to run "(\'my\', \'command\')" exit_code: -1 cwd: /Users/username/blah\n\noutput: My output.')
 
 def never_ending_command():
     """Arguments for a command that will never end (useful for testing process
@@ -233,25 +233,6 @@
         pids = executive.running_pids()
         self.assertIn(os.getpid(), pids)
 
-    def serial_test_run_in_parallel(self):
-        # We run this test serially to avoid overloading the machine and throwing off the timing.
-
-        if sys.platform in ("win32", "cygwin"):
-            return  # This function isn't implemented properly on windows yet.
-        import multiprocessing
-
-        NUM_PROCESSES = 2
-        DELAY_SECS = 0.50
-        cmd_line = [sys.executable, '-c', 'import time; time.sleep(%f); print "hello"' % DELAY_SECS]
-        cwd = os.getcwd()
-        commands = [tuple([cmd_line, cwd])] * NUM_PROCESSES
-        start = time.time()
-        command_outputs = Executive().run_in_parallel(commands, processes=NUM_PROCESSES)
-        done = time.time()
-        self.assertTrue(done - start < NUM_PROCESSES * DELAY_SECS)
-        self.assertEqual([output[1] for output in command_outputs], ["hello\n"] * NUM_PROCESSES)
-        self.assertEqual([],  multiprocessing.active_children())
-
     def test_run_in_parallel_assert_nonempty(self):
         self.assertRaises(AssertionError, Executive().run_in_parallel, [])
 
diff --git a/Tools/Scripts/webkitpy/common/system/workspace_unittest.py b/Tools/Scripts/webkitpy/common/system/workspace_unittest.py
index 8262f6c..93b96cf 100644
--- a/Tools/Scripts/webkitpy/common/system/workspace_unittest.py
+++ b/Tools/Scripts/webkitpy/common/system/workspace_unittest.py
@@ -63,7 +63,7 @@
 Workspace.create_zip failed in /source/path:
 MOCK ScriptError
 
-MOCK output of child process
+output: MOCK output of child process
 """
         class MockZipFile(object):
             def __init__(self, path):
diff --git a/Tools/Scripts/webkitpy/layout_tests/controllers/manager.py b/Tools/Scripts/webkitpy/layout_tests/controllers/manager.py
index 85c2ed8..988b81b 100644
--- a/Tools/Scripts/webkitpy/layout_tests/controllers/manager.py
+++ b/Tools/Scripts/webkitpy/layout_tests/controllers/manager.py
@@ -34,6 +34,7 @@
 create a final report.
 """
 
+import datetime
 import json
 import logging
 import random
@@ -113,6 +114,10 @@
             tests_to_run.sort(key=self._port.test_key)
         elif self._options.order == 'random':
             random.shuffle(tests_to_run)
+        elif self._options.order == 'random-seeded':
+            rnd = random.Random()
+            rnd.seed(4) # http://xkcd.com/221/
+            rnd.shuffle(tests_to_run)
 
         tests_to_run, tests_in_other_chunks = self._finder.split_into_chunks(tests_to_run)
         self._expectations.add_extra_skipped_tests(tests_in_other_chunks)
diff --git a/Tools/Scripts/webkitpy/layout_tests/models/test_expectations.py b/Tools/Scripts/webkitpy/layout_tests/models/test_expectations.py
index 4475410..58c10b1 100644
--- a/Tools/Scripts/webkitpy/layout_tests/models/test_expectations.py
+++ b/Tools/Scripts/webkitpy/layout_tests/models/test_expectations.py
@@ -43,7 +43,7 @@
 # FIXME: range() starts with 0 which makes if expectation checks harder
 # as PASS is 0.
 (PASS, FAIL, TEXT, IMAGE, IMAGE_PLUS_TEXT, AUDIO, TIMEOUT, CRASH, SKIP, WONTFIX,
- SLOW, REBASELINE, NEEDS_REBASELINE, MISSING, FLAKY, NOW, NONE) = range(17)
+ SLOW, REBASELINE, NEEDS_REBASELINE, NEEDS_MANUAL_REBASELINE, MISSING, FLAKY, NOW, NONE) = range(18)
 
 # FIXME: Perhas these two routines should be part of the Port instead?
 BASELINE_SUFFIX_LIST = ('png', 'wav', 'txt')
@@ -70,6 +70,7 @@
 
     REBASELINE_MODIFIER = 'rebaseline'
     NEEDS_REBASELINE_MODIFIER = 'needsrebaseline'
+    NEEDS_MANUAL_REBASELINE_MODIFIER = 'needsmanualrebaseline'
     PASS_EXPECTATION = 'pass'
     SKIP_MODIFIER = 'skip'
     SLOW_MODIFIER = 'slow'
@@ -240,6 +241,7 @@
         'Pass': 'PASS',
         'Rebaseline': 'REBASELINE',
         'NeedsRebaseline': 'NeedsRebaseline',
+        'NeedsManualRebaseline': 'NeedsManualRebaseline',
         'Skip': 'SKIP',
         'Slow': 'SLOW',
         'Timeout': 'TIMEOUT',
@@ -358,7 +360,7 @@
             # FIXME: This is really a semantic warning and shouldn't be here. Remove when we drop the old syntax.
             warnings.append('A test marked Skip must not have other expectations.')
         elif not expectations:
-            if 'SKIP' not in modifiers and 'REBASELINE' not in modifiers and 'NeedsRebaseline' not in modifiers and 'SLOW' not in modifiers:
+            if 'SKIP' not in modifiers and 'REBASELINE' not in modifiers and 'NeedsRebaseline' not in modifiers  and 'NeedsManualRebaseline' not in modifiers and 'SLOW' not in modifiers:
                 modifiers.append('SKIP')
             expectations = ['PASS']
 
@@ -419,6 +421,9 @@
     def is_flaky(self):
         return len(self.parsed_expectations) > 1
 
+    def is_whitespace_or_comment(self):
+        return bool(re.match("^\s*$", self.original_string.split('#')[0]))
+
     @staticmethod
     def create_passing_expectation(test):
         expectation_line = TestExpectationLine()
@@ -429,6 +434,29 @@
         expectation_line.matching_tests = [test]
         return expectation_line
 
+    @staticmethod
+    def merge_expectation_lines(line1, line2):
+        """Merges the expectations of line2 into line1 and returns a fresh object."""
+        if line1 is None:
+            return line2
+        if line2 is None:
+            return line1
+
+        # Don't merge original_string, filename, line_number or comment.
+        result = TestExpectationLine()
+        result.name = line1.name
+        result.path = line1.path
+        result.parsed_expectations = set(line1.parsed_expectations) | set(line2.parsed_expectations)
+        result.expectations = list(set(line1.expectations) | set(line2.expectations))
+        result.modifiers = list(set(line1.modifiers) | set(line2.modifiers))
+        result.parsed_modifiers = list(set(line1.parsed_modifiers) | set(line2.parsed_modifiers))
+        result.parsed_bug_modifiers = list(set(line1.parsed_bug_modifiers) | set(line2.parsed_bug_modifiers))
+        result.matching_configurations = set(line1.matching_configurations) | set(line2.matching_configurations)
+        result.matching_tests = list(list(set(line1.matching_tests) | set(line2.matching_tests)))
+        result.warnings = list(set(line1.warnings) | set(line2.warnings))
+        result.is_skipped_outside_expectations_file = line1.is_skipped_outside_expectations_file or line2.is_skipped_outside_expectations_file
+        return result
+
     def to_string(self, test_configuration_converter, include_modifiers=True, include_expectations=True, include_comment=True):
         parsed_expectation_to_string = dict([[parsed_expectation, expectation_string] for expectation_string, parsed_expectation in TestExpectations.EXPECTATIONS.items()])
 
@@ -489,7 +517,7 @@
             else:
                 # FIXME: Make this all work with the mixed-cased modifiers (e.g. WontFix, Slow, etc).
                 modifier = modifier.upper()
-                if modifier in ('SLOW', 'SKIP', 'REBASELINE', 'NeedsRebaseline', 'WONTFIX'):
+                if modifier in ('SLOW', 'SKIP', 'REBASELINE', 'NeedsRebaseline', 'NeedsManualRebaseline', 'WONTFIX'):
                     new_expectations.append(TestExpectationParser._inverted_expectation_tokens.get(modifier))
                 else:
                     new_modifiers.append(TestExpectationParser._inverted_configuration_tokens.get(modifier, modifier))
@@ -634,16 +662,23 @@
         self._clear_expectations_for_test(test)
         del self._test_to_expectation_line[test]
 
-    def add_expectation_line(self, expectation_line, override_existing_matches=False):
+    def add_expectation_line(self, expectation_line,
+                             override_existing_matches=False,
+                             merge_existing_matches=False):
         """Returns a list of warnings encountered while matching modifiers."""
 
         if expectation_line.is_invalid():
             return
 
         for test in expectation_line.matching_tests:
-            if not override_existing_matches and self._already_seen_better_match(test, expectation_line):
+            if (not (override_existing_matches or merge_existing_matches)
+                    and self._already_seen_better_match(test, expectation_line)):
                 continue
 
+            if merge_existing_matches:
+                merge = TestExpectationLine.merge_expectation_lines
+                expectation_line = merge(self.get_expectation_line(test), expectation_line)
+
             self._clear_expectations_for_test(test)
             self._test_to_expectation_line[test] = expectation_line
             self._add_test(test, expectation_line)
@@ -804,7 +839,8 @@
                     'crash': CRASH,
                     'missing': MISSING,
                     'skip': SKIP,
-                    'needsrebaseline': NEEDS_REBASELINE}
+                    'needsrebaseline': NEEDS_REBASELINE,
+                    'needsmanualrebaseline': NEEDS_MANUAL_REBASELINE}
 
     EXPECTATIONS_TO_STRING = dict((k, v) for (v, k) in EXPECTATIONS.iteritems())
 
@@ -829,6 +865,7 @@
                  TestExpectationParser.SLOW_MODIFIER: SLOW,
                  TestExpectationParser.REBASELINE_MODIFIER: REBASELINE,
                  TestExpectationParser.NEEDS_REBASELINE_MODIFIER: NEEDS_REBASELINE,
+                 TestExpectationParser.NEEDS_MANUAL_REBASELINE_MODIFIER: NEEDS_MANUAL_REBASELINE,
                  'none': NONE}
 
     MODIFIERS_TO_STRING = dict((k, v) for (v, k) in MODIFIERS.iteritems())
@@ -855,7 +892,7 @@
             test_needs_rebaselining: whether test was marked as REBASELINE"""
         if result in expected_results:
             return True
-        if result in (TEXT, IMAGE, IMAGE_PLUS_TEXT, AUDIO, MISSING) and (NEEDS_REBASELINE in expected_results):
+        if result in (TEXT, IMAGE, IMAGE_PLUS_TEXT, AUDIO, MISSING) and (NEEDS_REBASELINE in expected_results or NEEDS_MANUAL_REBASELINE in expected_results):
             return True
         if result in (TEXT, IMAGE_PLUS_TEXT, AUDIO) and (FAIL in expected_results):
             return True
@@ -1025,8 +1062,14 @@
                 expectations_to_remove.append(expectation)
 
         for expectation in expectations_to_remove:
+            index = self._expectations.index(expectation)
             self._expectations.remove(expectation)
 
+            if index == len(self._expectations) or self._expectations[index].is_whitespace_or_comment():
+                while index and self._expectations[index - 1].is_whitespace_or_comment():
+                    index = index - 1
+                    self._expectations.pop(index)
+
         return self.list_to_string(self._expectations, self._parser._test_configuration_converter, modified_expectations)
 
     def remove_rebaselined_tests(self, except_these_tests, filename):
@@ -1064,7 +1107,10 @@
         bot_expectations = self._port.bot_expectations()
         for test_name in bot_expectations:
             expectation_line = self._parser.expectation_line_for_test(test_name, bot_expectations[test_name])
-            self._model.add_expectation_line(expectation_line, override_existing_matches=True)
+
+            # Unexpected results are merged into existing expectations.
+            merge = self._port.get_option('ignore_flaky_tests') == 'unexpected'
+            self._model.add_expectation_line(expectation_line, override_existing_matches=True, merge_existing_matches=merge)
 
     def add_expectation_line(self, expectation_line):
         self._model.add_expectation_line(expectation_line)
diff --git a/Tools/Scripts/webkitpy/layout_tests/models/test_expectations_unittest.py b/Tools/Scripts/webkitpy/layout_tests/models/test_expectations_unittest.py
index 04617a9..8cc214e 100644
--- a/Tools/Scripts/webkitpy/layout_tests/models/test_expectations_unittest.py
+++ b/Tools/Scripts/webkitpy/layout_tests/models/test_expectations_unittest.py
@@ -60,6 +60,7 @@
                 self.get_test('failures/expected/image_checksum.html'),
                 self.get_test('failures/expected/crash.html'),
                 self.get_test('failures/expected/needsrebaseline.html'),
+                self.get_test('failures/expected/needsmanualrebaseline.html'),
                 self.get_test('failures/expected/missing_text.html'),
                 self.get_test('failures/expected/image.html'),
                 self.get_test('passes/text.html')]
@@ -69,6 +70,7 @@
 Bug(test) failures/expected/text.html [ Failure ]
 Bug(test) failures/expected/crash.html [ WontFix ]
 Bug(test) failures/expected/needsrebaseline.html [ NeedsRebaseline ]
+Bug(test) failures/expected/needsmanualrebaseline.html [ NeedsManualRebaseline ]
 Bug(test) failures/expected/missing_image.html [ Rebaseline Missing ]
 Bug(test) failures/expected/image_checksum.html [ WontFix ]
 Bug(test) failures/expected/image.html [ WontFix Mac ]
@@ -245,6 +247,8 @@
         self.assertTrue(match('failures/expected/crash.html', PASS, False))
         self.assertTrue(match('failures/expected/needsrebaseline.html', TEXT, True))
         self.assertFalse(match('failures/expected/needsrebaseline.html', CRASH, True))
+        self.assertTrue(match('failures/expected/needsmanualrebaseline.html', TEXT, True))
+        self.assertFalse(match('failures/expected/needsmanualrebaseline.html', CRASH, True))
         self.assertTrue(match('passes/text.html', PASS, False))
 
     def test_more_specific_override_resets_skip(self):
@@ -264,15 +268,42 @@
 
         expectations = TestExpectations(self._port, self.get_basic_tests())
         self.assertEqual(expectations.get_expectations(self.get_test(test_name)), set([IMAGE]))
+        self.assertEqual(expectations.get_modifiers(self.get_test(test_name)), ['Bug(x)'])
 
         def bot_expectations():
             return {test_name: ['PASS', 'IMAGE']}
         self._port.bot_expectations = bot_expectations
-        self._port._options.ignore_flaky = 'very-flaky'
+        self._port._options.ignore_flaky_tests = 'very-flaky'
 
         expectations = TestExpectations(self._port, self.get_basic_tests())
         self.assertEqual(expectations.get_expectations(self.get_test(test_name)), set([PASS, IMAGE]))
 
+        # The following line tests the actual behavior, which is not necessarily a usefull behavior.
+        # Existing modifiers from a test expectation file are overridden by the bot expectations.
+        self.assertEqual(expectations.get_modifiers(self.get_test(test_name)), [])
+
+    def test_bot_test_expectations_merge(self):
+        """Test that expectations are merged rather than overridden when using flaky option 'unexpected'."""
+        test_name1 = 'failures/expected/text.html'
+        test_name2 = 'passes/text.html'
+
+        expectations_dict = OrderedDict()
+        expectations_dict['expectations'] = "Bug(x) %s [ ImageOnlyFailure ]\nBug(x) %s [ Slow ]\n" % (test_name1, test_name2)
+        self._port.expectations_dict = lambda: expectations_dict
+
+        expectations = TestExpectations(self._port, self.get_basic_tests())
+        self.assertEqual(expectations.get_expectations(self.get_test(test_name1)), set([IMAGE]))
+        self.assertEqual(set(expectations.get_modifiers(self.get_test(test_name2))), set(['Bug(x)', 'SLOW']))
+
+        def bot_expectations():
+            return {test_name1: ['PASS', 'TIMEOUT'], test_name2: ['CRASH']}
+        self._port.bot_expectations = bot_expectations
+        self._port._options.ignore_flaky_tests = 'unexpected'
+
+        expectations = TestExpectations(self._port, self.get_basic_tests())
+        self.assertEqual(expectations.get_expectations(self.get_test(test_name1)), set([PASS, IMAGE, TIMEOUT]))
+        self.assertEqual(expectations.get_expectations(self.get_test(test_name2)), set([PASS, CRASH]))
+        self.assertEqual(set(expectations.get_modifiers(self.get_test(test_name2))), set(['Bug(x)', 'SLOW']))
 
 class SkippedTests(Base):
     def check(self, expectations, overrides, skips, lint=False):
@@ -519,15 +550,17 @@
 Bug(y) [ Win Mac Debug ] failures/expected/foo.html [ Crash ]
 """, actual_expectations)
 
-    def test_remove_line(self):
+    def test_remove_line_with_comments(self):
         host = MockHost()
         test_port = host.port_factory.get('test-win-xp', None)
         test_port.test_exists = lambda test: True
         test_port.test_isfile = lambda test: True
 
         test_config = test_port.test_configuration()
-        test_port.expectations_dict = lambda: {'expectations': """Bug(x) [ Win Release ] failures/expected/foo.html [ Failure ]
-Bug(y) [ Win Debug ] failures/expected/foo.html [ Crash ]
+        test_port.expectations_dict = lambda: {'expectations': """Bug(y) [ Win Debug ] failures/expected/foo.html [ Crash ]
+
+ # This comment line should get stripped. As should the preceding line.
+Bug(x) [ Win Release ] failures/expected/foo.html [ Failure ]
 """}
         expectations = TestExpectations(test_port)
 
@@ -537,6 +570,110 @@
         self.assertEqual("""Bug(y) [ Win Debug ] failures/expected/foo.html [ Crash ]
 """, actual_expectations)
 
+    def test_remove_line_with_comments_at_start(self):
+        host = MockHost()
+        test_port = host.port_factory.get('test-win-xp', None)
+        test_port.test_exists = lambda test: True
+        test_port.test_isfile = lambda test: True
+
+        test_config = test_port.test_configuration()
+        test_port.expectations_dict = lambda: {'expectations': """
+ # This comment line should get stripped. As should the preceding line.
+Bug(x) [ Win Release ] failures/expected/foo.html [ Failure ]
+
+Bug(y) [ Win Debug ] failures/expected/foo.html [ Crash ]
+"""}
+        expectations = TestExpectations(test_port)
+
+        actual_expectations = expectations.remove_configuration_from_test('failures/expected/foo.html', test_config)
+        actual_expectations = expectations.remove_configuration_from_test('failures/expected/foo.html', host.port_factory.get('test-win-win7', None).test_configuration())
+
+        self.assertEqual("""
+Bug(y) [ Win Debug ] failures/expected/foo.html [ Crash ]
+""", actual_expectations)
+
+    def test_remove_line_with_comments_at_end_with_no_trailing_newline(self):
+        host = MockHost()
+        test_port = host.port_factory.get('test-win-xp', None)
+        test_port.test_exists = lambda test: True
+        test_port.test_isfile = lambda test: True
+
+        test_config = test_port.test_configuration()
+        test_port.expectations_dict = lambda: {'expectations': """Bug(y) [ Win Debug ] failures/expected/foo.html [ Crash ]
+
+ # This comment line should get stripped. As should the preceding line.
+Bug(x) [ Win Release ] failures/expected/foo.html [ Failure ]"""}
+        expectations = TestExpectations(test_port)
+
+        actual_expectations = expectations.remove_configuration_from_test('failures/expected/foo.html', test_config)
+        actual_expectations = expectations.remove_configuration_from_test('failures/expected/foo.html', host.port_factory.get('test-win-win7', None).test_configuration())
+
+        self.assertEqual("""Bug(y) [ Win Debug ] failures/expected/foo.html [ Crash ]""", actual_expectations)
+
+    def test_remove_line_leaves_comments_for_next_line(self):
+        host = MockHost()
+        test_port = host.port_factory.get('test-win-xp', None)
+        test_port.test_exists = lambda test: True
+        test_port.test_isfile = lambda test: True
+
+        test_config = test_port.test_configuration()
+        test_port.expectations_dict = lambda: {'expectations': """
+ # This comment line should not get stripped.
+Bug(x) [ Win Release ] failures/expected/foo.html [ Failure ]
+Bug(y) [ Win Debug ] failures/expected/foo.html [ Crash ]
+"""}
+        expectations = TestExpectations(test_port)
+
+        actual_expectations = expectations.remove_configuration_from_test('failures/expected/foo.html', test_config)
+        actual_expectations = expectations.remove_configuration_from_test('failures/expected/foo.html', host.port_factory.get('test-win-win7', None).test_configuration())
+
+        self.assertEqual("""
+ # This comment line should not get stripped.
+Bug(y) [ Win Debug ] failures/expected/foo.html [ Crash ]
+""", actual_expectations)
+
+    def test_remove_line_no_whitespace_lines(self):
+        host = MockHost()
+        test_port = host.port_factory.get('test-win-xp', None)
+        test_port.test_exists = lambda test: True
+        test_port.test_isfile = lambda test: True
+
+        test_config = test_port.test_configuration()
+        test_port.expectations_dict = lambda: {'expectations': """
+ # This comment line should get stripped.
+Bug(x) [ Win Release ] failures/expected/foo.html [ Failure ]
+ # This comment line should not get stripped.
+Bug(y) [ Win Debug ] failures/expected/foo.html [ Crash ]
+"""}
+        expectations = TestExpectations(test_port)
+
+        actual_expectations = expectations.remove_configuration_from_test('failures/expected/foo.html', test_config)
+        actual_expectations = expectations.remove_configuration_from_test('failures/expected/foo.html', host.port_factory.get('test-win-win7', None).test_configuration())
+
+        self.assertEqual(""" # This comment line should not get stripped.
+Bug(y) [ Win Debug ] failures/expected/foo.html [ Crash ]
+""", actual_expectations)
+
+    def test_remove_first_line(self):
+        host = MockHost()
+        test_port = host.port_factory.get('test-win-xp', None)
+        test_port.test_exists = lambda test: True
+        test_port.test_isfile = lambda test: True
+
+        test_config = test_port.test_configuration()
+        test_port.expectations_dict = lambda: {'expectations': """Bug(x) [ Win Release ] failures/expected/foo.html [ Failure ]
+ # This comment line should not get stripped.
+Bug(y) [ Win Debug ] failures/expected/foo.html [ Crash ]
+"""}
+        expectations = TestExpectations(test_port)
+
+        actual_expectations = expectations.remove_configuration_from_test('failures/expected/foo.html', test_config)
+        actual_expectations = expectations.remove_configuration_from_test('failures/expected/foo.html', host.port_factory.get('test-win-win7', None).test_configuration())
+
+        self.assertEqual(""" # This comment line should not get stripped.
+Bug(y) [ Win Debug ] failures/expected/foo.html [ Crash ]
+""", actual_expectations)
+
     def test_remove_flaky_line(self):
         host = MockHost()
         test_port = host.port_factory.get('test-win-xp', None)
diff --git a/Tools/Scripts/webkitpy/layout_tests/models/test_run_results_unittest.py b/Tools/Scripts/webkitpy/layout_tests/models/test_run_results_unittest.py
index 3ad1d8a..c00a836 100644
--- a/Tools/Scripts/webkitpy/layout_tests/models/test_run_results_unittest.py
+++ b/Tools/Scripts/webkitpy/layout_tests/models/test_run_results_unittest.py
@@ -126,13 +126,13 @@
 
     def test_num_failures_by_type(self):
         summary = summarized_results(self.port, expected=False, passing=False, flaky=False)
-        self.assertEquals(summary['num_failures_by_type'], {'CRASH': 1, 'MISSING': 0, 'TEXT': 0, 'IMAGE': 0, 'NEEDSREBASELINE': 0, 'PASS': 0, 'SKIP': 0, 'TIMEOUT': 2, 'IMAGE+TEXT': 0, 'FAIL': 0, 'AUDIO': 1})
+        self.assertEquals(summary['num_failures_by_type'], {'CRASH': 1, 'MISSING': 0, 'TEXT': 0, 'IMAGE': 0, 'NEEDSREBASELINE': 0, 'NEEDSMANUALREBASELINE': 0, 'PASS': 0, 'SKIP': 0, 'TIMEOUT': 2, 'IMAGE+TEXT': 0, 'FAIL': 0, 'AUDIO': 1})
 
         summary = summarized_results(self.port, expected=True, passing=False, flaky=False)
-        self.assertEquals(summary['num_failures_by_type'], {'CRASH': 1, 'MISSING': 0, 'TEXT': 0, 'IMAGE': 0, 'NEEDSREBASELINE': 0, 'PASS': 1, 'SKIP': 0, 'TIMEOUT': 1, 'IMAGE+TEXT': 0, 'FAIL': 0, 'AUDIO': 1})
+        self.assertEquals(summary['num_failures_by_type'], {'CRASH': 1, 'MISSING': 0, 'TEXT': 0, 'IMAGE': 0, 'NEEDSREBASELINE': 0, 'NEEDSMANUALREBASELINE': 0, 'PASS': 1, 'SKIP': 0, 'TIMEOUT': 1, 'IMAGE+TEXT': 0, 'FAIL': 0, 'AUDIO': 1})
 
         summary = summarized_results(self.port, expected=False, passing=True, flaky=False)
-        self.assertEquals(summary['num_failures_by_type'], {'CRASH': 0, 'MISSING': 0, 'TEXT': 0, 'IMAGE': 0, 'NEEDSREBASELINE': 0, 'PASS': 4, 'SKIP': 1, 'TIMEOUT': 0, 'IMAGE+TEXT': 0, 'FAIL': 0, 'AUDIO': 0})
+        self.assertEquals(summary['num_failures_by_type'], {'CRASH': 0, 'MISSING': 0, 'TEXT': 0, 'IMAGE': 0, 'NEEDSREBASELINE': 0, 'NEEDSMANUALREBASELINE': 0, 'PASS': 4, 'SKIP': 1, 'TIMEOUT': 0, 'IMAGE+TEXT': 0, 'FAIL': 0, 'AUDIO': 0})
 
     def test_svn_revision(self):
         self.port._options.builder_name = 'dummy builder'
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/chromium_android.py b/Tools/Scripts/webkitpy/layout_tests/port/android.py
similarity index 98%
rename from Tools/Scripts/webkitpy/layout_tests/port/chromium_android.py
rename to Tools/Scripts/webkitpy/layout_tests/port/android.py
index faa17b4..f294187 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/chromium_android.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/android.py
@@ -142,36 +142,46 @@
 MD5SUM_HOST_FILE_NAME = 'md5sum_bin_host'
 MD5SUM_DEVICE_PATH = '/data/local/tmp/' + MD5SUM_DEVICE_FILE_NAME
 
-# Shared pieces of information for the two supported test runners.
-class SharedDriverDetails(object):
+
+# Information required when running layout tests using content_shell as the test runner.
+class ContentShellDriverDetails():
     def device_cache_directory(self):
         return self.device_directory() + 'cache/'
+
     def device_fonts_directory(self):
         return self.device_directory() + 'fonts/'
+
     def device_forwarder_path(self):
         return self.device_directory() + 'forwarder'
+
     def device_fifo_directory(self):
         return '/data/data/' + self.package_name() + '/files/'
 
-# Information required when running layout tests using content_shell as the test runner.
-class ContentShellDriverDetails(SharedDriverDetails):
     def apk_name(self):
         return 'apks/ContentShell.apk'
+
     def package_name(self):
         return 'org.chromium.content_shell_apk'
+
     def activity_name(self):
         return self.package_name() + '/.ContentShellActivity'
+
     def library_name(self):
         return 'libcontent_shell_content_view.so'
+
     def additional_resources(self):
         return ['content_resources.pak', 'shell_resources.pak']
+
     def command_line_file(self):
         return '/data/local/tmp/content-shell-command-line'
+
     def additional_command_line_flags(self):
         return ['--dump-render-tree', '--encode-binary']
+
     def device_directory(self):
         return DEVICE_SOURCE_ROOT_DIR + 'content_shell/'
 
+
 # The AndroidCommands class encapsulates commands to communicate with an attached device.
 class AndroidCommands(object):
     _adb_command_path = None
@@ -346,18 +356,21 @@
         return int(re.findall('level: (\d+)', battery_status)[0])
 
 
-class ChromiumAndroidPort(chromium.ChromiumPort):
-    port_name = 'chromium-android'
+class AndroidPort(chromium.ChromiumPort):
+    port_name = 'android'
 
     # Avoid initializing the adb path [worker count]+1 times by storing it as a static member.
     _adb_path = None
 
     SUPPORTED_VERSIONS = ('android')
 
-    FALLBACK_PATHS = { 'android': [ 'chromium-android' ] + linux.LinuxPort.latest_platform_fallback_path() }
+    FALLBACK_PATHS = { 'android': [ 'android' ] + linux.LinuxPort.latest_platform_fallback_path() }
+
+    # Android has aac and mp3 codecs built in.
+    PORT_HAS_AUDIO_CODECS_BUILT_IN = True
 
     def __init__(self, host, port_name, **kwargs):
-        super(ChromiumAndroidPort, self).__init__(host, port_name, **kwargs)
+        super(AndroidPort, self).__init__(host, port_name, **kwargs)
 
         self._operating_system = 'android'
         self._version = 'icecreamsandwich'
@@ -422,7 +435,7 @@
         return self._host_port.check_wdiff(logging)
 
     def check_build(self, needs_http):
-        result = super(ChromiumAndroidPort, self).check_build(needs_http)
+        result = super(AndroidPort, self).check_build(needs_http)
         result = self._check_file_exists(self.path_to_md5sum(), 'md5sum utility') and result
         result = self._check_file_exists(self.path_to_md5sum_host(), 'md5sum host utility') and result
         result = self._check_file_exists(self.path_to_forwarder(), 'forwarder utility') and result
@@ -456,7 +469,7 @@
             additional_dirs = {}
         additional_dirs[PERF_TEST_PATH_PREFIX] = self.perf_tests_dir()
         additional_dirs[LAYOUT_TEST_PATH_PREFIX] = self.layout_tests_dir()
-        super(ChromiumAndroidPort, self).start_http_server(additional_dirs, number_of_servers)
+        super(AndroidPort, self).start_http_server(additional_dirs, number_of_servers)
 
     def create_driver(self, worker_number, no_timeout=False):
         return ChromiumAndroidDriver(self, worker_number, pixel_tests=self.get_option('pixel_tests'),
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/chromium_android_unittest.py b/Tools/Scripts/webkitpy/layout_tests/port/android_unittest.py
similarity index 74%
rename from Tools/Scripts/webkitpy/layout_tests/port/chromium_android_unittest.py
rename to Tools/Scripts/webkitpy/layout_tests/port/android_unittest.py
index 06ec873..4f2f5e5 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/chromium_android_unittest.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/android_unittest.py
@@ -36,12 +36,13 @@
 from webkitpy.common.system.executive_mock import MockExecutive2
 from webkitpy.common.system.systemhost_mock import MockSystemHost
 
-from webkitpy.layout_tests.port import chromium_android
+from webkitpy.layout_tests.port import android
 from webkitpy.layout_tests.port import chromium_port_testcase
 from webkitpy.layout_tests.port import driver
 from webkitpy.layout_tests.port import driver_unittest
 from webkitpy.tool.mocktool import MockOptions
 
+
 # Any "adb" commands will be interpret by this class instead of executing actual
 # commansd on the file system, which we don't want to do.
 class MockAndroidDebugBridge:
@@ -83,28 +84,28 @@
                    '123456789ABCDEF3', '123456789ABCDEF4', '123456789ABCDEF5']
         output = 'List of devices attached\n'
         for serial in serials[:self._device_count]:
-          output += '%s\tdevice\n' % serial
+            output += '%s\tdevice\n' % serial
         return output
 
 
 class AndroidCommandsTest(unittest.TestCase):
     def setUp(self):
-        chromium_android.AndroidCommands._adb_command_path = None
-        chromium_android.AndroidCommands._adb_command_path_options = ['adb']
+        android.AndroidCommands._adb_command_path = None
+        android.AndroidCommands._adb_command_path_options = ['adb']
 
     def make_executive(self, device_count):
         self._mock_executive = MockAndroidDebugBridge(device_count)
         return MockExecutive2(run_command_fn=self._mock_executive.run_command)
 
     def make_android_commands(self, device_count, serial):
-        return chromium_android.AndroidCommands(self.make_executive(device_count), serial)
+        return android.AndroidCommands(self.make_executive(device_count), serial)
 
     # The "adb" binary with the latest version should be used.
     def serial_test_adb_command_path(self):
         executive = self.make_executive(0)
 
-        chromium_android.AndroidCommands.set_adb_command_path_options(['path1', 'path2', 'path3'])
-        self.assertEqual('path2', chromium_android.AndroidCommands.adb_command_path(executive))
+        android.AndroidCommands.set_adb_command_path_options(['path1', 'path2', 'path3'])
+        self.assertEqual('path2', android.AndroidCommands.adb_command_path(executive))
 
     # The used adb command should include the device's serial number, and get_serial() should reflect this.
     def test_adb_command_and_get_serial(self):
@@ -134,12 +135,12 @@
         self.assertEquals('adb -s 123456789ABCDEF0 pull bar foo', self._mock_executive.last_command())
 
 
-class ChromiumAndroidPortTest(chromium_port_testcase.ChromiumPortTestCase):
-    port_name = 'chromium-android'
-    port_maker = chromium_android.ChromiumAndroidPort
+class AndroidPortTest(chromium_port_testcase.ChromiumPortTestCase):
+    port_name = 'android'
+    port_maker = android.AndroidPort
 
     def make_port(self, **kwargs):
-        port = super(ChromiumAndroidPortTest, self).make_port(**kwargs)
+        port = super(AndroidPortTest, self).make_port(**kwargs)
         port._mock_adb = MockAndroidDebugBridge(kwargs.get('device_count', 1))
         port._executive = MockExecutive2(run_command_fn=port._mock_adb.run_command)
         return port
@@ -171,10 +172,10 @@
         self._mock_adb = MockAndroidDebugBridge(1)
         self._mock_executive = MockExecutive2(run_command_fn=self._mock_adb.run_command)
 
-        android_commands = chromium_android.AndroidCommands(self._mock_executive, '123456789ABCDEF0')
-        self._port = chromium_android.ChromiumAndroidPort(MockSystemHost(executive=self._mock_executive), 'chromium-android')
-        self._driver = chromium_android.ChromiumAndroidDriver(self._port, worker_number=0,
-            pixel_tests=True, driver_details=chromium_android.ContentShellDriverDetails(), android_devices=self._port._devices)
+        android_commands = android.AndroidCommands(self._mock_executive, '123456789ABCDEF0')
+        self._port = android.AndroidPort(MockSystemHost(executive=self._mock_executive), 'android')
+        self._driver = android.ChromiumAndroidDriver(self._port, worker_number=0,
+            pixel_tests=True, driver_details=android.ContentShellDriverDetails(), android_devices=self._port._devices)
 
     # The cmd_line() method in the Android port is used for starting a shell, not the test runner.
     def test_cmd_line(self):
@@ -194,11 +195,11 @@
         mock_adb = MockAndroidDebugBridge(2)
         mock_executive = MockExecutive2(run_command_fn=mock_adb.run_command)
 
-        port = chromium_android.ChromiumAndroidPort(MockSystemHost(executive=mock_executive), 'chromium-android')
-        driver0 = chromium_android.ChromiumAndroidDriver(port, worker_number=0, pixel_tests=True,
-            driver_details=chromium_android.ContentShellDriverDetails(), android_devices=port._devices)
-        driver1 = chromium_android.ChromiumAndroidDriver(port, worker_number=1, pixel_tests=True,
-            driver_details=chromium_android.ContentShellDriverDetails(), android_devices=port._devices)
+        port = android.AndroidPort(MockSystemHost(executive=mock_executive), 'android')
+        driver0 = android.ChromiumAndroidDriver(port, worker_number=0, pixel_tests=True,
+            driver_details=android.ContentShellDriverDetails(), android_devices=port._devices)
+        driver1 = android.ChromiumAndroidDriver(port, worker_number=1, pixel_tests=True,
+            driver_details=android.ContentShellDriverDetails(), android_devices=port._devices)
 
         self.assertEqual(['adb', '-s', '123456789ABCDEF0', 'shell'], driver0.cmd_line(True, []))
         self.assertEqual(['adb', '-s', '123456789ABCDEF1', 'shell'], driver1.cmd_line(True, ['anything']))
@@ -210,25 +211,10 @@
         mock_adb = MockAndroidDebugBridge(2)
         mock_executive = MockExecutive2(run_command_fn=mock_adb.run_command)
 
-        port0 = chromium_android.ChromiumAndroidPort(MockSystemHost(executive=mock_executive),
-            'chromium-android', options=MockOptions(additional_drt_flag=['--foo=bar']))
-        port1 = chromium_android.ChromiumAndroidPort(MockSystemHost(executive=mock_executive),
-            'chromium-android', options=MockOptions(driver_name='content_shell'))
-
-        self.assertEqual(1, port0.driver_cmd_line().count('--foo=bar'))
-        self.assertEqual(0, port1.driver_cmd_line().count('--create-stdin-fifo'))
-
-
-class ChromiumAndroidTwoPortsTest(unittest.TestCase):
-    # Test that the driver's command line indeed goes through to the driver.
-    def test_options_with_two_ports(self):
-        mock_adb = MockAndroidDebugBridge(2)
-        mock_executive = MockExecutive2(run_command_fn=mock_adb.run_command)
-
-        port0 = chromium_android.ChromiumAndroidPort(MockSystemHost(executive=mock_executive),
-            'chromium-android', options=MockOptions(additional_drt_flag=['--foo=bar']))
-        port1 = chromium_android.ChromiumAndroidPort(MockSystemHost(executive=mock_executive),
-            'chromium-android', options=MockOptions(driver_name='content_shell'))
+        port0 = android.AndroidPort(MockSystemHost(executive=mock_executive),
+            'android', options=MockOptions(additional_drt_flag=['--foo=bar']))
+        port1 = android.AndroidPort(MockSystemHost(executive=mock_executive),
+            'android', options=MockOptions(driver_name='content_shell'))
 
         self.assertEqual(1, port0.driver_cmd_line().count('--foo=bar'))
         self.assertEqual(0, port1.driver_cmd_line().count('--create-stdin-fifo'))
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/base.py b/Tools/Scripts/webkitpy/layout_tests/port/base.py
index dd17af4..5b9c9de 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/base.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/base.py
@@ -73,7 +73,7 @@
     """Abstract class for Port-specific hooks for the layout_test package."""
 
     # Subclasses override this. This should indicate the basic implementation
-    # part of the port name, e.g., 'chromium-mac', 'win', 'gtk'; there is probably (?)
+    # part of the port name, e.g., 'mac', 'win', 'gtk'; there is probably (?)
     # one unique value per class.
 
     # FIXME: We should probably rename this to something like 'implementation_name'.
@@ -86,6 +86,9 @@
 
     CONTENT_SHELL_NAME = 'content_shell'
 
+    # True if the port as aac and mp3 codecs built in.
+    PORT_HAS_AUDIO_CODECS_BUILT_IN = False
+
     @classmethod
     def determine_full_port_name(cls, host, options, port_name):
         """Return a fully-specified port name that can be used to construct objects."""
@@ -212,6 +215,12 @@
         baseline_search_paths = self.baseline_search_path()
         return baseline_search_paths[0]
 
+    def virtual_baseline_search_path(self, test_name):
+        suite = self.lookup_virtual_suite(test_name)
+        if not suite:
+            return None
+        return [self._filesystem.join(path, suite.name) for path in self.default_baseline_search_path()]
+
     def baseline_search_path(self):
         return self.get_option('additional_platform_directory', []) + self._compare_baseline() + self.default_baseline_search_path()
 
@@ -1353,12 +1362,21 @@
                 virtual_tests.extend(suite.tests.keys())
         return virtual_tests
 
-    def lookup_virtual_test_base(self, test_name):
+    def is_virtual_test(self, test_name):
+        return bool(self.lookup_virtual_suite(test_name))
+
+    def lookup_virtual_suite(self, test_name):
         for suite in self.populated_virtual_test_suites():
             if test_name.startswith(suite.name):
-                return test_name.replace(suite.name, suite.base, 1)
+                return suite
         return None
 
+    def lookup_virtual_test_base(self, test_name):
+        suite = self.lookup_virtual_suite(test_name)
+        if not suite:
+            return None
+        return test_name.replace(suite.name, suite.base, 1)
+
     def lookup_virtual_test_args(self, test_name):
         for suite in self.populated_virtual_test_suites():
             if test_name.startswith(suite.name):
@@ -1393,10 +1411,13 @@
     # If ports don't ever enable certain features, then those directories can just be
     # in the Skipped list instead of compile-time-checked here.
     def _missing_symbol_to_skipped_tests(self):
-        return {
-            "ff_mp3_decoder": ["webaudio/codec-tests/mp3"],
-            "ff_aac_decoder": ["webaudio/codec-tests/aac"],
-        }
+        if self.PORT_HAS_AUDIO_CODECS_BUILT_IN:
+            return {}
+        else:
+            return {
+                "ff_mp3_decoder": ["webaudio/codec-tests/mp3"],
+                "ff_aac_decoder": ["webaudio/codec-tests/aac"],
+            }
 
     def _has_test_in_directories(self, directory_lists, test_list):
         if not test_list:
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/builders.py b/Tools/Scripts/webkitpy/layout_tests/port/builders.py
index 7fa8610..91c7c7a 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/builders.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/builders.py
@@ -33,31 +33,24 @@
 
 # In this dictionary, each item stores:
 # * port_name -- a fully qualified port name
-# * is_debug -- whether we are using a debug build
-# * move_overwritten_baselines_to -- (optional) list of platform directories that we will copy an existing
-#      baseline to before pulling down a new baseline during rebaselining. This is useful
-#      for bringing up a new port, for example when adding a Lion was the most recent Mac version and
-#      we wanted to bring up Mountain Lion, we would want to copy an existing baseline in platform/mac
-#      to platform/mac-mountainlion before updating the platform/mac entry.
 # * rebaseline_override_dir -- (optional) directory to put baselines in instead of where you would normally put them.
 #      This is useful when we don't have bots that cover particular configurations; so, e.g., you might
 #      support mac-mountainlion but not have a mac-mountainlion bot yet, so you'd want to put the mac-lion
 #      results into platform/mac temporarily.
 
 _exact_matches = {
-    # These builders are on build.chromium.org.
-    "WebKit XP": {"port_name": "win-xp", "is_debug": False},
-    "WebKit Win7": {"port_name": "win-win7", "is_debug": False},
-    "WebKit Win7 (dbg)(1)": {"port_name": "win-win7", "is_debug": True},
-    "WebKit Win7 (dbg)(2)": {"port_name": "win-win7", "is_debug": True},
-    "WebKit Linux": {"port_name": "linux-x86_64", "is_debug": False},
-    "WebKit Linux 32": {"port_name": "linux-x86", "is_debug": False},
-    "WebKit Linux (dbg)": {"port_name": "linux-x86_64", "is_debug": True},
-    "WebKit Mac10.6": {"port_name": "chromium-mac-snowleopard", "is_debug": False},
-    "WebKit Mac10.6 (dbg)": {"port_name": "chromium-mac-snowleopard", "is_debug": True},
-    "WebKit Mac10.7": {"port_name": "chromium-mac-lion", "is_debug": False},
-    "WebKit Mac10.7 (dbg)": {"port_name": "chromium-mac-lion", "is_debug": True},
-    "WebKit Mac10.8": {"port_name": "chromium-mac-mountainlion", "is_debug": False},
+    "WebKit XP": {"port_name": "win-xp"},
+    "WebKit Win7": {"port_name": "win-win7"},
+    "WebKit Win7 (dbg)(1)": {"port_name": "win-win7"},
+    "WebKit Win7 (dbg)(2)": {"port_name": "win-win7"},
+    "WebKit Linux": {"port_name": "linux-x86_64"},
+    "WebKit Linux 32": {"port_name": "linux-x86"},
+    "WebKit Linux (dbg)": {"port_name": "linux-x86_64"},
+    "WebKit Mac10.6": {"port_name": "mac-snowleopard"},
+    "WebKit Mac10.6 (dbg)": {"port_name": "mac-snowleopard"},
+    "WebKit Mac10.7": {"port_name": "mac-lion"},
+    "WebKit Mac10.7 (dbg)": {"port_name": "mac-lion"},
+    "WebKit Mac10.8": {"port_name": "mac-mountainlion"},
 }
 
 
@@ -67,15 +60,15 @@
     "linux-x86_64": "WebKit Linux (deps)",
     "win-xp": "WebKit XP (deps)",
     "win-win7": "WebKit XP (deps)",
-    "chromium-mac-snowleopard": "WebKit Mac10.6 (deps)",
-    "chromium-mac-lion": "WebKit Mac10.6 (deps)",
-    "chromium-mac-mountainlion": "WebKit Mac10.6 (deps)",
+    "mac-snowleopard": "WebKit Mac10.6 (deps)",
+    "mac-lion": "WebKit Mac10.6 (deps)",
+    "mac-mountainlion": "WebKit Mac10.6 (deps)",
 }
 
 
 _ports_without_builders = [
     # FIXME: Move to _exact_matches.
-    "chromium-android",
+    "android",
 ]
 
 
@@ -95,10 +88,6 @@
     return _exact_matches[builder_name].get("rebaseline_override_dir", None)
 
 
-def move_overwritten_baselines_to(builder_name):
-    return _exact_matches[builder_name].get("move_overwritten_baselines_to", [])
-
-
 def port_name_for_builder_name(builder_name):
     return _exact_matches[builder_name]["port_name"]
 
@@ -107,7 +96,7 @@
     debug_builder_name = None
     for builder_name, builder_info in _exact_matches.items():
         if builder_info['port_name'] == target_port_name:
-            if builder_info['is_debug']:
+            if 'dbg' in builder_name:
                 debug_builder_name = builder_name
             else:
                 return builder_name
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/chromium.py b/Tools/Scripts/webkitpy/layout_tests/port/chromium.py
index 5f52631..bc4b6dd 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/chromium.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/chromium.py
@@ -64,7 +64,7 @@
         )
 
     ALL_BASELINE_VARIANTS = [
-        'chromium-mac-mountainlion', 'chromium-mac-lion', 'chromium-mac-snowleopard',
+        'mac-mountainlion', 'mac-lion', 'mac-snowleopard',
         'win-win7', 'win-xp',
         'linux-x86_64', 'linux-x86',
     ]
@@ -73,7 +73,7 @@
         'mac': ['snowleopard', 'lion', 'mountainlion'],
         'win': ['xp', 'win7'],
         'linux': ['lucid'],
-        'android': ['icecreamsandwich'],
+        # 'android': ['icecreamsandwich'],  # FIXME: see comment above next to 'icecreamsandwich'.
     }
 
     DEFAULT_BUILD_DIRECTORIES = ('out',)
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/chromium_port_testcase.py b/Tools/Scripts/webkitpy/layout_tests/port/chromium_port_testcase.py
index d75d044..82712ab 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/chromium_port_testcase.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/chromium_port_testcase.py
@@ -33,9 +33,9 @@
 from webkitpy.common.system.systemhost_mock import MockSystemHost
 from webkitpy.tool.mocktool import MockOptions
 
-import chromium_android
+import android
 import linux
-import chromium_mac
+import mac
 import win
 
 from webkitpy.layout_tests.models.test_configuration import TestConfiguration
@@ -66,7 +66,10 @@
         # Test that we get the chromium skips and not the webkit default skips
         port = self.make_port()
         skip_dict = port._missing_symbol_to_skipped_tests()
-        self.assertTrue('ff_mp3_decoder' in skip_dict)
+        if port.PORT_HAS_AUDIO_CODECS_BUILT_IN:
+            self.assertEqual(skip_dict, {})
+        else:
+            self.assertTrue('ff_mp3_decoder' in skip_dict)
         self.assertFalse('WebGLShader' in skip_dict)
 
     def test_all_test_configurations(self):
@@ -89,19 +92,19 @@
             TestConfiguration('lucid', 'x86_64', 'release'),
         ]))
 
-    class TestMacPort(chromium_mac.ChromiumMacPort):
+    class TestMacPort(mac.MacPort):
         def __init__(self, options=None):
             options = options or MockOptions()
-            chromium_mac.ChromiumMacPort.__init__(self, MockSystemHost(os_name='mac', os_version='leopard'), 'chromium-mac-leopard', options=options)
+            mac.MacPort.__init__(self, MockSystemHost(os_name='mac', os_version='leopard'), 'mac-leopard', options=options)
 
         def default_configuration(self):
             self.default_configuration_called = True
             return 'default'
 
-    class TestAndroidPort(chromium_android.ChromiumAndroidPort):
+    class TestAndroidPort(android.AndroidPort):
         def __init__(self, options=None):
             options = options or MockOptions()
-            chromium_android.ChromiumAndroidPort.__init__(self, MockSystemHost(os_name='android', os_version='icecreamsandwich'), 'chromium-android', options=options)
+            android.AndroidPort.__init__(self, MockSystemHost(os_name='android', os_version='icecreamsandwich'), 'android', options=options)
 
         def default_configuration(self):
             self.default_configuration_called = True
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/chromium_unittest.py b/Tools/Scripts/webkitpy/layout_tests/port/chromium_unittest.py
index c6c50df..66a791f 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/chromium_unittest.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/chromium_unittest.py
@@ -36,7 +36,6 @@
 from webkitpy.tool.mocktool import MockOptions
 
 import chromium
-import chromium_mac
 
 from webkitpy.layout_tests.port import chromium_port_testcase
 from webkitpy.layout_tests.port.driver import DriverInput
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/factory.py b/Tools/Scripts/webkitpy/layout_tests/port/factory.py
index 0e2e557..db2144b 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/factory.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/factory.py
@@ -45,9 +45,9 @@
             const=('chromium*' if use_globs else 'chromium'),
             help=('Alias for --platform=chromium*' if use_globs else 'Alias for --platform=chromium')),
 
-        optparse.make_option('--chromium-android', action='store_const', dest='platform',
-            const=('chromium-android*' if use_globs else 'chromium-android'),
-            help=('Alias for --platform=chromium-android*' if use_globs else 'Alias for --platform=chromium')),
+        optparse.make_option('--android', action='store_const', dest='platform',
+            const=('android*' if use_globs else 'android'),
+            help=('Alias for --platform=android*' if use_globs else 'Alias for --platform=android')),
         ]
 
 
@@ -72,9 +72,9 @@
 
 class PortFactory(object):
     PORT_CLASSES = (
-        'chromium_android.ChromiumAndroidPort',
+        'android.AndroidPort',
         'linux.LinuxPort',
-        'chromium_mac.ChromiumMacPort',
+        'mac.MacPort',
         'win.WinPort',
         'mock_drt.MockDRTPort',
         'test.TestPort',
@@ -88,7 +88,7 @@
         if platform.is_linux() or platform.is_freebsd():
             return 'linux'
         elif platform.is_mac():
-            return 'chromium-mac'
+            return 'mac'
         elif platform.is_win():
             return 'win'
         raise NotImplementedError('unknown platform: %s' % platform)
@@ -99,18 +99,10 @@
         appropriate port on this platform."""
         port_name = port_name or self._default_port(options)
 
-        # FIXME(dpranke): We special-case '--platform chromium' so that it can co-exist
-        # with '--platform chromium-mac' and '--platform linux' properly (we
-        # can't look at the port_name prefix in this case).
+        # FIXME(steveblock): There's no longer any need to pass '--platform
+        # chromium' on the command line so we can remove this logic.
         if port_name == 'chromium':
-            # FIXME(steveblock): This hack will go away once all ports have
-            # been renamed to remove the 'chromium-' part.
-            if self._host.platform.os_name == 'win':
-                port_name = 'win'
-            elif self._host.platform.os_name == 'linux':
-                port_name = 'linux'
-            else:
-                port_name = 'chromium-' + self._host.platform.os_name
+            port_name = self._host.platform.os_name
 
         for port_class in self.PORT_CLASSES:
             module_name, class_name = port_class.rsplit('.', 1)
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/factory_unittest.py b/Tools/Scripts/webkitpy/layout_tests/port/factory_unittest.py
index 6dfd726..4a9dda5 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/factory_unittest.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/factory_unittest.py
@@ -31,9 +31,9 @@
 from webkitpy.tool.mocktool import MockOptions
 from webkitpy.common.system.systemhost_mock import MockSystemHost
 
-from webkitpy.layout_tests.port import chromium_android
+from webkitpy.layout_tests.port import android
 from webkitpy.layout_tests.port import linux
-from webkitpy.layout_tests.port import chromium_mac
+from webkitpy.layout_tests.port import mac
 from webkitpy.layout_tests.port import win
 from webkitpy.layout_tests.port import factory
 from webkitpy.layout_tests.port import test
@@ -52,19 +52,19 @@
         port = factory.PortFactory(host).get(port_name, options=options)
         self.assertIsInstance(port, cls)
 
-    def test_chromium_mac(self):
-        self.assert_port(port_name='chromium-mac', os_name='mac', os_version='snowleopard',
-                         cls=chromium_mac.ChromiumMacPort)
+    def test_mac(self):
+        self.assert_port(port_name='mac', os_name='mac', os_version='snowleopard',
+                         cls=mac.MacPort)
         self.assert_port(port_name='chromium', os_name='mac', os_version='lion',
-                         cls=chromium_mac.ChromiumMacPort)
+                         cls=mac.MacPort)
 
     def test_linux(self):
         self.assert_port(port_name='linux', cls=linux.LinuxPort)
         self.assert_port(port_name='chromium', os_name='linux', os_version='lucid',
                          cls=linux.LinuxPort)
 
-    def test_chromium_android(self):
-        self.assert_port(port_name='chromium-android', cls=chromium_android.ChromiumAndroidPort)
+    def test_android(self):
+        self.assert_port(port_name='android', cls=android.AndroidPort)
         # NOTE: We can't check for port_name=chromium here, as this will append the host's
         # operating system, whereas host!=target for Android.
 
@@ -83,4 +83,4 @@
 
     def test_get_from_builder_name(self):
         self.assertEqual(factory.PortFactory(MockSystemHost()).get_from_builder_name('WebKit Mac10.7').name(),
-                          'chromium-mac-lion')
+                          'mac-lion')
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/chromium_mac.py b/Tools/Scripts/webkitpy/layout_tests/port/mac.py
similarity index 89%
rename from Tools/Scripts/webkitpy/layout_tests/port/chromium_mac.py
rename to Tools/Scripts/webkitpy/layout_tests/port/mac.py
index 8085a32..4a62eb9 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/chromium_mac.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/mac.py
@@ -37,13 +37,13 @@
 _log = logging.getLogger(__name__)
 
 
-class ChromiumMacPort(chromium.ChromiumPort):
+class MacPort(chromium.ChromiumPort):
     SUPPORTED_VERSIONS = ('snowleopard', 'lion', 'mountainlion')
-    port_name = 'chromium-mac'
+    port_name = 'mac'
 
-    FALLBACK_PATHS = { 'mountainlion': [ 'chromium-mac' ]}
-    FALLBACK_PATHS['lion'] = ['chromium-mac-lion'] + FALLBACK_PATHS['mountainlion']
-    FALLBACK_PATHS['snowleopard'] = ['chromium-mac-snowleopard'] + FALLBACK_PATHS['lion']
+    FALLBACK_PATHS = { 'mountainlion': [ 'mac' ]}
+    FALLBACK_PATHS['lion'] = ['mac-lion'] + FALLBACK_PATHS['mountainlion']
+    FALLBACK_PATHS['snowleopard'] = ['mac-snowleopard'] + FALLBACK_PATHS['lion']
 
     DEFAULT_BUILD_DIRECTORIES = ('xcodebuild', 'out')
 
@@ -51,13 +51,13 @@
 
     @classmethod
     def determine_full_port_name(cls, host, options, port_name):
-        if port_name.endswith('-mac'):
+        if port_name.endswith('mac'):
             return port_name + '-' + host.platform.os_version
         return port_name
 
     def __init__(self, host, port_name, **kwargs):
         chromium.ChromiumPort.__init__(self, host, port_name, **kwargs)
-        self._version = port_name[port_name.index('chromium-mac-') + len('chromium-mac-'):]
+        self._version = port_name[port_name.index('mac-') + len('mac-'):]
         assert self._version in self.SUPPORTED_VERSIONS
 
     def _modules_to_search_for_symbols(self):
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/chromium_mac_unittest.py b/Tools/Scripts/webkitpy/layout_tests/port/mac_unittest.py
similarity index 79%
rename from Tools/Scripts/webkitpy/layout_tests/port/chromium_mac_unittest.py
rename to Tools/Scripts/webkitpy/layout_tests/port/mac_unittest.py
index da01a4c..77ce522 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/chromium_mac_unittest.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/mac_unittest.py
@@ -28,44 +28,44 @@
 
 import unittest2 as unittest
 
-from webkitpy.layout_tests.port import chromium_mac
+from webkitpy.layout_tests.port import mac
 from webkitpy.layout_tests.port import chromium_port_testcase
 from webkitpy.tool.mocktool import MockOptions
 
 
-class ChromiumMacPortTest(chromium_port_testcase.ChromiumPortTestCase):
+class MacPortTest(chromium_port_testcase.ChromiumPortTestCase):
     os_name = 'mac'
     os_version = 'snowleopard'
-    port_name = 'chromium-mac'
-    port_maker = chromium_mac.ChromiumMacPort
+    port_name = 'mac'
+    port_maker = mac.MacPort
 
     def assert_name(self, port_name, os_version_string, expected):
         port = self.make_port(os_version=os_version_string, port_name=port_name)
         self.assertEqual(expected, port.name())
 
     def test_versions(self):
-        self.assertTrue(self.make_port().name() in ('chromium-mac-snowleopard', 'chromium-mac-lion', 'chromium-mac-mountainlion'))
+        self.assertTrue(self.make_port().name() in ('mac-snowleopard', 'mac-lion', 'mac-mountainlion'))
 
-        self.assert_name(None, 'snowleopard', 'chromium-mac-snowleopard')
-        self.assert_name('chromium-mac', 'snowleopard', 'chromium-mac-snowleopard')
-        self.assert_name('chromium-mac-snowleopard', 'leopard', 'chromium-mac-snowleopard')
-        self.assert_name('chromium-mac-snowleopard', 'snowleopard', 'chromium-mac-snowleopard')
+        self.assert_name(None, 'snowleopard', 'mac-snowleopard')
+        self.assert_name('mac', 'snowleopard', 'mac-snowleopard')
+        self.assert_name('mac-snowleopard', 'leopard', 'mac-snowleopard')
+        self.assert_name('mac-snowleopard', 'snowleopard', 'mac-snowleopard')
 
-        self.assert_name(None, 'lion', 'chromium-mac-lion')
-        self.assert_name(None, 'mountainlion', 'chromium-mac-mountainlion')
+        self.assert_name(None, 'lion', 'mac-lion')
+        self.assert_name(None, 'mountainlion', 'mac-mountainlion')
 
-        self.assert_name('chromium-mac', 'lion', 'chromium-mac-lion')
+        self.assert_name('mac', 'lion', 'mac-lion')
         self.assertRaises(AssertionError, self.assert_name, None, 'tiger', 'should-raise-assertion-so-this-value-does-not-matter')
 
     def test_baseline_path(self):
-        port = self.make_port(port_name='chromium-mac-snowleopard')
-        self.assertEqual(port.baseline_path(), port._webkit_baseline_path('chromium-mac-snowleopard'))
+        port = self.make_port(port_name='mac-snowleopard')
+        self.assertEqual(port.baseline_path(), port._webkit_baseline_path('mac-snowleopard'))
 
-        port = self.make_port(port_name='chromium-mac-lion')
-        self.assertEqual(port.baseline_path(), port._webkit_baseline_path('chromium-mac-lion'))
+        port = self.make_port(port_name='mac-lion')
+        self.assertEqual(port.baseline_path(), port._webkit_baseline_path('mac-lion'))
 
-        port = self.make_port(port_name='chromium-mac-mountainlion')
-        self.assertEqual(port.baseline_path(), port._webkit_baseline_path('chromium-mac'))
+        port = self.make_port(port_name='mac-mountainlion')
+        self.assertEqual(port.baseline_path(), port._webkit_baseline_path('mac'))
 
     def test_operating_system(self):
         self.assertEqual('mac', self.make_port().operating_system())
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/mock_drt_unittest.py b/Tools/Scripts/webkitpy/layout_tests/port/mock_drt_unittest.py
index b4d69c4..3d5fd10 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/mock_drt_unittest.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/mock_drt_unittest.py
@@ -48,7 +48,7 @@
     def make_port(self, options=mock_options):
         host = MockSystemHost()
         test.add_unit_tests_to_mock_filesystem(host.filesystem)
-        return mock_drt.MockDRTPort(host, port_name='mock-chromium-mac', options=options)
+        return mock_drt.MockDRTPort(host, port_name='mock-mac', options=options)
 
     def test_port_name_in_constructor(self):
         self.assertTrue(mock_drt.MockDRTPort(MockSystemHost(), port_name='mock-test'))
@@ -239,7 +239,7 @@
             {'/tmp/png_result0.png': 'image_checksum\x8a-pngtEXtchecksum\x00image_checksum-checksum'})
 
     def test_test_shell_parse_options(self):
-        options, args = mock_drt.parse_options(['--platform', 'chromium-mac', '--test-shell',
+        options, args = mock_drt.parse_options(['--platform', 'mac', '--test-shell',
             '--pixel-tests=/tmp/png_result0.png'])
         self.assertTrue(options.test_shell)
         self.assertTrue(options.pixel_tests)
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/test.py b/Tools/Scripts/webkitpy/layout_tests/port/test.py
index b616ac9..df2c024 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/test.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/test.py
@@ -101,8 +101,8 @@
 #
 # These numbers may need to be updated whenever we add or delete tests.
 #
-TOTAL_TESTS = 104
-TOTAL_SKIPS = 26
+TOTAL_TESTS = 106
+TOTAL_SKIPS = 27
 TOTAL_RETRIES = 14
 
 UNEXPECTED_PASSES = 6
@@ -116,6 +116,7 @@
     tests.add('failures/expected/hang.html', hang=True)
     tests.add('failures/expected/missing_text.html', expected_text=None)
     tests.add('failures/expected/needsrebaseline.html', actual_text='needsrebaseline text')
+    tests.add('failures/expected/needsmanualrebaseline.html', actual_text='needsmanualrebaseline text')
     tests.add('failures/expected/image.html',
               actual_image='image_fail-pngtEXtchecksum\x00checksum_fail',
               expected_image='image-pngtEXtchecksum\x00checksum-png')
@@ -276,6 +277,7 @@
 Bug(test) failures/expected/crash.html [ Crash ]
 Bug(test) failures/expected/image.html [ ImageOnlyFailure ]
 Bug(test) failures/expected/needsrebaseline.html [ NeedsRebaseline ]
+Bug(test) failures/expected/needsmanualrebaseline.html [ NeedsManualRebaseline ]
 Bug(test) failures/expected/audio.html [ Failure ]
 Bug(test) failures/expected/image_checksum.html [ ImageOnlyFailure ]
 Bug(test) failures/expected/mismatch.html [ ImageOnlyFailure ]
diff --git a/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests.py b/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests.py
index f2b249f..b349fc5 100644
--- a/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests.py
+++ b/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests.py
@@ -205,6 +205,7 @@
             help=("determine the order in which the test cases will be run. "
                   "'none' == use the order in which the tests were listed either in arguments or test list, "
                   "'natural' == use the natural order (default), "
+                  "'random-seeded' == randomize the test order using a fixed seed, "
                   "'random' == randomize the test order.")),
         optparse.make_option("--run-chunk",
             help=("Run a specified chunk (n:l), the nth of len l, "
diff --git a/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests_integrationtest.py b/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests_integrationtest.py
index 5541ed9..5882740 100644
--- a/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests_integrationtest.py
+++ b/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests_integrationtest.py
@@ -329,6 +329,11 @@
         tests_run = get_tests_run(['--order=random'] + tests_to_run)
         self.assertEqual(sorted(tests_to_run), sorted(tests_run))
 
+    def test_random_daily_seed_order(self):
+        tests_to_run = ['passes/audio.html', 'failures/expected/text.html', 'failures/expected/missing_text.html', 'passes/args.html']
+        tests_run = get_tests_run(['--order=random-seeded'] + tests_to_run)
+        self.assertEqual(sorted(tests_to_run), sorted(tests_run))
+
     def test_random_order_test_specified_multiple_times(self):
         tests_to_run = ['passes/args.html', 'passes/audio.html', 'passes/audio.html', 'passes/args.html']
         tests_run = get_tests_run(['--order=random'] + tests_to_run)
@@ -788,7 +793,7 @@
         if sys.platform == 'darwin':
             port_name = 'linux-x86'
         else:
-            port_name = 'chromium-mac-lion'
+            port_name = 'mac-lion'
         out = StringIO.StringIO()
         err = StringIO.StringIO()
         self.assertEqual(run_webkit_tests.main(['--platform', port_name, 'fast/harness/results.html'], out, err), -1)
@@ -896,11 +901,11 @@
     def assert_mock_port_works(self, port_name, args=[]):
         self.assertTrue(passing_run(args + ['--platform', 'mock-' + port_name, 'fast/harness/results.html'], tests_included=True, host=Host()))
 
-    def disabled_test_chromium_mac_lion(self):
-        self.assert_mock_port_works('chromium-mac-lion')
+    def disabled_test_mac_lion(self):
+        self.assert_mock_port_works('mac-lion')
 
-    def disabled_test_chromium_mac_lion_in_test_shell_mode(self):
-        self.assert_mock_port_works('chromium-mac-lion', args=['--additional-drt-flag=--test-shell'])
+    def disabled_test_mac_lion_in_test_shell_mode(self):
+        self.assert_mock_port_works('mac-lion', args=['--additional-drt-flag=--test-shell'])
 
     def disabled_test_qt_linux(self):
         self.assert_mock_port_works('qt-linux')
diff --git a/Tools/Scripts/webkitpy/layout_tests/views/buildbot_results.py b/Tools/Scripts/webkitpy/layout_tests/views/buildbot_results.py
index 46c2e70..a1b07e3 100644
--- a/Tools/Scripts/webkitpy/layout_tests/views/buildbot_results.py
+++ b/Tools/Scripts/webkitpy/layout_tests/views/buildbot_results.py
@@ -30,7 +30,7 @@
 
 from webkitpy.layout_tests.models import test_expectations
 
-from webkitpy.common.net import resultsjsonparser
+from webkitpy.common.net import layouttestresults
 
 
 TestExpectations = test_expectations.TestExpectations
@@ -119,7 +119,7 @@
             else:
                 add_to_dict_of_lists(regressions, results['actual'], test)
 
-        resultsjsonparser.for_each_test(summarized_results['tests'], add_result)
+        layouttestresults.for_each_test(summarized_results['tests'], add_result)
 
         if len(passes) or len(flaky) or len(regressions):
             self._print("")
@@ -140,7 +140,7 @@
                 tests.sort()
 
                 for test in tests:
-                    result = resultsjsonparser.result_for_test(summarized_results['tests'], test)
+                    result = layouttestresults.result_for_test(summarized_results['tests'], test)
                     actual = result['actual'].split(" ")
                     expected = result['expected'].split(" ")
                     result = TestExpectations.EXPECTATIONS[key.lower()]
diff --git a/Tools/Scripts/webkitpy/performance_tests/perftestsrunner.py b/Tools/Scripts/webkitpy/performance_tests/perftestsrunner.py
index b5bc338..ffc963e 100644
--- a/Tools/Scripts/webkitpy/performance_tests/perftestsrunner.py
+++ b/Tools/Scripts/webkitpy/performance_tests/perftestsrunner.py
@@ -83,11 +83,11 @@
             optparse.make_option('--release', action='store_const', const='Release', dest="configuration",
                 help='Set the configuration to Release'),
             optparse.make_option("--platform",
-                help="Specify port/platform being tested (i.e. chromium-mac)"),
+                help="Specify port/platform being tested (e.g. mac)"),
             optparse.make_option("--chromium",
                 action="store_const", const='chromium', dest='platform', help='Alias for --platform=chromium'),
-            optparse.make_option("--chromium-android",
-                action="store_const", const='chromium-android', dest='platform', help='Alias for --platform=chromium-android'),
+            optparse.make_option("--android",
+                action="store_const", const='android', dest='platform', help='Alias for --platform=android'),
             optparse.make_option("--builder-name",
                 help=("The name of the builder shown on the waterfall running this script e.g. google-mac-2.")),
             optparse.make_option("--build-number",
diff --git a/Tools/Scripts/webkitpy/thirdparty/__init__.py b/Tools/Scripts/webkitpy/thirdparty/__init__.py
index 478439c..2e0919a 100644
--- a/Tools/Scripts/webkitpy/thirdparty/__init__.py
+++ b/Tools/Scripts/webkitpy/thirdparty/__init__.py
@@ -103,11 +103,11 @@
         return did_install_something
 
     def _install_mechanize(self):
-        return self._install("http://pypi.python.org/packages/source/m/mechanize/mechanize-0.2.5.tar.gz",
+        return self._install("https://pypi.python.org/packages/source/m/mechanize/mechanize-0.2.5.tar.gz",
                              "mechanize-0.2.5/mechanize")
 
     def _install_pep8(self):
-        return self._install("http://pypi.python.org/packages/source/p/pep8/pep8-0.5.0.tar.gz#md5=512a818af9979290cd619cce8e9c2e2b",
+        return self._install("https://pypi.python.org/packages/source/p/pep8/pep8-0.5.0.tar.gz#md5=512a818af9979290cd619cce8e9c2e2b",
                              "pep8-0.5.0/pep8.py")
 
     def _install_pylint(self):
@@ -120,20 +120,20 @@
             files_to_remove = []
             if sys.platform == 'win32':
                 files_to_remove = ['test/data/write_protected_file.txt']
-            did_install_something = installer.install("http://pypi.python.org/packages/source/l/logilab-common/logilab-common-0.58.1.tar.gz#md5=77298ab2d8bb8b4af9219791e7cee8ce", url_subpath="logilab-common-0.58.1", target_name="logilab/common", files_to_remove=files_to_remove)
-            did_install_something |= installer.install("http://pypi.python.org/packages/source/l/logilab-astng/logilab-astng-0.24.1.tar.gz#md5=ddaf66e4d85714d9c47a46d4bed406de", url_subpath="logilab-astng-0.24.1", target_name="logilab/astng")
-            did_install_something |= installer.install('http://pypi.python.org/packages/source/p/pylint/pylint-0.25.1.tar.gz#md5=728bbc2b339bc3749af013709a7f87a5', url_subpath="pylint-0.25.1", target_name="pylint")
+            did_install_something = installer.install("https://pypi.python.org/packages/source/l/logilab-common/logilab-common-0.58.1.tar.gz#md5=77298ab2d8bb8b4af9219791e7cee8ce", url_subpath="logilab-common-0.58.1", target_name="logilab/common", files_to_remove=files_to_remove)
+            did_install_something |= installer.install("https://pypi.python.org/packages/source/l/logilab-astng/logilab-astng-0.24.1.tar.gz#md5=ddaf66e4d85714d9c47a46d4bed406de", url_subpath="logilab-astng-0.24.1", target_name="logilab/astng")
+            did_install_something |= installer.install('https://pypi.python.org/packages/source/p/pylint/pylint-0.25.1.tar.gz#md5=728bbc2b339bc3749af013709a7f87a5', url_subpath="pylint-0.25.1", target_name="pylint")
         return did_install_something
 
     # autoinstalled.buildbot is used by BuildSlaveSupport/build.webkit.org-config/mastercfg_unittest.py
     # and should ideally match the version of BuildBot used at build.webkit.org.
     def _install_coverage(self):
         self._ensure_autoinstalled_dir_is_in_sys_path()
-        return self._install(url="http://pypi.python.org/packages/source/c/coverage/coverage-3.5.1.tar.gz#md5=410d4c8155a4dab222f2bc51212d4a24", url_subpath="coverage-3.5.1/coverage")
+        return self._install(url="https://pypi.python.org/packages/source/c/coverage/coverage-3.5.1.tar.gz#md5=410d4c8155a4dab222f2bc51212d4a24", url_subpath="coverage-3.5.1/coverage")
 
     def _install_unittest2(self):
         self._ensure_autoinstalled_dir_is_in_sys_path()
-        return self._install(url="http://pypi.python.org/packages/source/u/unittest2/unittest2-0.5.1.tar.gz#md5=a0af5cac92bbbfa0c3b0e99571390e0f", url_subpath="unittest2-0.5.1/unittest2")
+        return self._install(url="https://pypi.python.org/packages/source/u/unittest2/unittest2-0.5.1.tar.gz#md5=a0af5cac92bbbfa0c3b0e99571390e0f", url_subpath="unittest2-0.5.1/unittest2")
 
     def _install_webpagereplay(self):
         did_install_something = False
diff --git a/Tools/Scripts/webkitpy/tool/commands/gardenomatic.py b/Tools/Scripts/webkitpy/tool/commands/gardenomatic.py
index c87c1a2..e1ebe9b 100644
--- a/Tools/Scripts/webkitpy/tool/commands/gardenomatic.py
+++ b/Tools/Scripts/webkitpy/tool/commands/gardenomatic.py
@@ -33,7 +33,6 @@
 
     def __init__(self):
         super(GardenOMatic, self).__init__(options=(self.platform_options + [
-            self.move_overwritten_baselines_option,
             self.results_directory_option,
             self.no_optimize_option,
             ]))
diff --git a/Tools/Scripts/webkitpy/tool/commands/queries.py b/Tools/Scripts/webkitpy/tool/commands/queries.py
index 74f7f87..309572f 100644
--- a/Tools/Scripts/webkitpy/tool/commands/queries.py
+++ b/Tools/Scripts/webkitpy/tool/commands/queries.py
@@ -32,214 +32,16 @@
 import logging
 import re
 
-from datetime import datetime
 from optparse import make_option
 
-from webkitpy.tool import steps
-
-import webkitpy.common.config.urls as config_urls
-from webkitpy.common.net.buildbot import BuildBot
-from webkitpy.common.net.regressionwindow import RegressionWindow
 from webkitpy.common.system.crashlogs import CrashLogs
-from webkitpy.common.system.user import User
-from webkitpy.tool.commands.abstractsequencedcommand import AbstractSequencedCommand
-from webkitpy.tool.grammar import pluralize
 from webkitpy.tool.multicommandtool import AbstractDeclarativeCommand
 from webkitpy.layout_tests.models.test_expectations import TestExpectations
-from webkitpy.layout_tests.port import platform_options, configuration_options
+from webkitpy.layout_tests.port import platform_options
 
 _log = logging.getLogger(__name__)
 
 
-class ResultsFor(AbstractDeclarativeCommand):
-    name = "results-for"
-    help_text = "Print a list of failures for the passed revision from bots on %s" % config_urls.buildbot_url
-    argument_names = "REVISION"
-
-    def _print_layout_test_results(self, results):
-        if not results:
-            print " No results."
-            return
-        for title, files in results.parsed_results().items():
-            print " %s" % title
-            for filename in files:
-                print "  %s" % filename
-
-    def execute(self, options, args, tool):
-        builders = self._tool.buildbot.builders()
-        for builder in builders:
-            print "%s:" % builder.name()
-            build = builder.build_for_revision(args[0], allow_failed_lookups=True)
-            self._print_layout_test_results(build.layout_test_results())
-
-
-class FailureReason(AbstractDeclarativeCommand):
-    name = "failure-reason"
-    help_text = "Lists revisions where individual test failures started at %s" % config_urls.buildbot_url
-
-    def _blame_line_for_revision(self, revision):
-        try:
-            commit_info = self._tool.checkout().commit_info_for_revision(revision)
-        except Exception, e:
-            return "FAILED to fetch CommitInfo for r%s, exception: %s" % (revision, e)
-        if not commit_info:
-            return "FAILED to fetch CommitInfo for r%s, likely missing ChangeLog" % revision
-        return commit_info.blame_string(self._tool.bugs)
-
-    def _print_blame_information_for_transition(self, regression_window, failing_tests):
-        red_build = regression_window.failing_build()
-        print "SUCCESS: Build %s (r%s) was the first to show failures: %s" % (red_build._number, red_build.revision(), failing_tests)
-        print "Suspect revisions:"
-        for revision in regression_window.revisions():
-            print self._blame_line_for_revision(revision)
-
-    def _explain_failures_for_builder(self, builder, start_revision):
-        print "Examining failures for \"%s\", starting at r%s" % (builder.name(), start_revision)
-        revision_to_test = start_revision
-        build = builder.build_for_revision(revision_to_test, allow_failed_lookups=True)
-        layout_test_results = build.layout_test_results()
-        if not layout_test_results:
-            # FIXME: This could be made more user friendly.
-            print "Failed to load layout test results from %s; can't continue. (start revision = r%s)" % (build.results_url(), start_revision)
-            return 1
-
-        results_to_explain = set(layout_test_results.failing_tests())
-        last_build_with_results = build
-        print "Starting at %s" % revision_to_test
-        while results_to_explain:
-            revision_to_test -= 1
-            new_build = builder.build_for_revision(revision_to_test, allow_failed_lookups=True)
-            if not new_build:
-                print "No build for %s" % revision_to_test
-                continue
-            build = new_build
-            latest_results = build.layout_test_results()
-            if not latest_results:
-                print "No results build %s (r%s)" % (build._number, build.revision())
-                continue
-            failures = set(latest_results.failing_tests())
-            if len(failures) >= 20:
-                # FIXME: We may need to move this logic into the LayoutTestResults class.
-                # The buildbot stops runs after 20 failures so we don't have full results to work with here.
-                print "Too many failures in build %s (r%s), ignoring." % (build._number, build.revision())
-                continue
-            fixed_results = results_to_explain - failures
-            if not fixed_results:
-                print "No change in build %s (r%s), %s unexplained failures (%s in this build)" % (build._number, build.revision(), len(results_to_explain), len(failures))
-                last_build_with_results = build
-                continue
-            regression_window = RegressionWindow(build, last_build_with_results)
-            self._print_blame_information_for_transition(regression_window, fixed_results)
-            last_build_with_results = build
-            results_to_explain -= fixed_results
-        if results_to_explain:
-            print "Failed to explain failures: %s" % results_to_explain
-            return 1
-        print "Explained all results for %s" % builder.name()
-        return 0
-
-    def _builder_to_explain(self):
-        builder_statuses = self._tool.buildbot.builder_statuses()
-        red_statuses = [status for status in builder_statuses if not status["is_green"]]
-        print "%s failing" % (pluralize("builder", len(red_statuses)))
-        builder_choices = [status["name"] for status in red_statuses]
-        # We could offer an "All" choice here.
-        chosen_name = self._tool.user.prompt_with_list("Which builder to diagnose:", builder_choices)
-        # FIXME: prompt_with_list should really take a set of objects and a set of names and then return the object.
-        for status in red_statuses:
-            if status["name"] == chosen_name:
-                return (self._tool.buildbot.builder_with_name(chosen_name), status["built_revision"])
-
-    def execute(self, options, args, tool):
-        (builder, latest_revision) = self._builder_to_explain()
-        start_revision = self._tool.user.prompt("Revision to walk backwards from? [%s] " % latest_revision) or latest_revision
-        if not start_revision:
-            print "Revision required."
-            return 1
-        return self._explain_failures_for_builder(builder, start_revision=int(start_revision))
-
-
-class FindFlakyTests(AbstractDeclarativeCommand):
-    name = "find-flaky-tests"
-    help_text = "Lists tests that often fail for a single build at %s" % config_urls.buildbot_url
-
-    def _find_failures(self, builder, revision):
-        build = builder.build_for_revision(revision, allow_failed_lookups=True)
-        if not build:
-            print "No build for %s" % revision
-            return (None, None)
-        results = build.layout_test_results()
-        if not results:
-            print "No results build %s (r%s)" % (build._number, build.revision())
-            return (None, None)
-        failures = set(results.failing_tests())
-        if len(failures) >= 20:
-            # FIXME: We may need to move this logic into the LayoutTestResults class.
-            # The buildbot stops runs after 20 failures so we don't have full results to work with here.
-            print "Too many failures in build %s (r%s), ignoring." % (build._number, build.revision())
-            return (None, None)
-        return (build, failures)
-
-    def _increment_statistics(self, flaky_tests, flaky_test_statistics):
-        for test in flaky_tests:
-            count = flaky_test_statistics.get(test, 0)
-            flaky_test_statistics[test] = count + 1
-
-    def _print_statistics(self, statistics):
-        print "=== Results ==="
-        print "Occurances Test name"
-        for value, key in sorted([(value, key) for key, value in statistics.items()]):
-            print "%10d %s" % (value, key)
-
-    def _walk_backwards_from(self, builder, start_revision, limit):
-        flaky_test_statistics = {}
-        all_previous_failures = set([])
-        one_time_previous_failures = set([])
-        previous_build = None
-        for i in range(limit):
-            revision = start_revision - i
-            print "Analyzing %s ... " % revision,
-            (build, failures) = self._find_failures(builder, revision)
-            if failures == None:
-                # Notice that we don't loop on the empty set!
-                continue
-            print "has %s failures" % len(failures)
-            flaky_tests = one_time_previous_failures - failures
-            if flaky_tests:
-                print "Flaky tests: %s %s" % (sorted(flaky_tests),
-                                              previous_build.results_url())
-            self._increment_statistics(flaky_tests, flaky_test_statistics)
-            one_time_previous_failures = failures - all_previous_failures
-            all_previous_failures = failures
-            previous_build = build
-        self._print_statistics(flaky_test_statistics)
-
-    def _builder_to_analyze(self):
-        statuses = self._tool.buildbot.builder_statuses()
-        choices = [status["name"] for status in statuses]
-        chosen_name = self._tool.user.prompt_with_list("Which builder to analyze:", choices)
-        for status in statuses:
-            if status["name"] == chosen_name:
-                return (self._tool.buildbot.builder_with_name(chosen_name), status["built_revision"])
-
-    def execute(self, options, args, tool):
-        (builder, latest_revision) = self._builder_to_analyze()
-        limit = self._tool.user.prompt("How many revisions to look through? [10000] ") or 10000
-        return self._walk_backwards_from(builder, latest_revision, limit=int(limit))
-
-
-class TreeStatus(AbstractDeclarativeCommand):
-    name = "tree-status"
-    help_text = "Print the status of the %s buildbots" % config_urls.buildbot_url
-    long_help = """Fetches build status from http://build.webkit.org/one_box_per_builder
-and displayes the status of each builder."""
-
-    def execute(self, options, args, tool):
-        for builder in tool.buildbot.builder_statuses():
-            status_string = "ok" if builder["is_green"] else "FAIL"
-            print "%s : %s" % (status_string.ljust(4), builder["name"])
-
-
 class CrashLog(AbstractDeclarativeCommand):
     name = "crash-log"
     help_text = "Print the newest crash log for the given process"
diff --git a/Tools/Scripts/webkitpy/tool/commands/queries_unittest.py b/Tools/Scripts/webkitpy/tool/commands/queries_unittest.py
index 98ef88c..07062a1 100644
--- a/Tools/Scripts/webkitpy/tool/commands/queries_unittest.py
+++ b/Tools/Scripts/webkitpy/tool/commands/queries_unittest.py
@@ -38,36 +38,6 @@
 from webkitpy.tool.mocktool import MockTool, MockOptions
 
 
-class MockTestPort1(object):
-    def skips_layout_test(self, test_name):
-        return test_name in ["media/foo/bar.html", "foo"]
-
-
-class MockTestPort2(object):
-    def skips_layout_test(self, test_name):
-        return test_name == "media/foo/bar.html"
-
-
-class MockPortFactory(object):
-    def __init__(self):
-        self._all_ports = {
-            "test_port1": MockTestPort1(),
-            "test_port2": MockTestPort2(),
-        }
-
-    def all_port_names(self, options=None):
-        return self._all_ports.keys()
-
-    def get(self, port_name):
-        return self._all_ports.get(port_name)
-
-
-class QueryCommandsTest(CommandsTest):
-    def test_tree_status(self):
-        expected_stdout = "ok   : Builder1\nok   : Builder2\n"
-        self.assert_execute_outputs(TreeStatus(), None, expected_stdout)
-
-
 class PrintExpectationsTest(unittest.TestCase):
     def run_test(self, tests, expected_stdout, platform='test-win-xp', **args):
         options = MockOptions(all=False, csv=False, full=False, platform=platform,
diff --git a/Tools/Scripts/webkitpy/tool/commands/rebaseline.py b/Tools/Scripts/webkitpy/tool/commands/rebaseline.py
index fdff81e..48f1ebb 100644
--- a/Tools/Scripts/webkitpy/tool/commands/rebaseline.py
+++ b/Tools/Scripts/webkitpy/tool/commands/rebaseline.py
@@ -53,9 +53,6 @@
 class AbstractRebaseliningCommand(AbstractDeclarativeCommand):
     # not overriding execute() - pylint: disable=W0223
 
-    move_overwritten_baselines_option = optparse.make_option("--move-overwritten-baselines", action="store_true", default=False,
-        help="Move overwritten baselines elsewhere in the baseline path. This is for bringing up new ports.")
-
     no_optimize_option = optparse.make_option('--no-optimize', dest='optimize', action='store_false', default=True,
         help=('Do not optimize/de-dup the expectations after rebaselining (default is to de-dup automatically). '
               'You can use "webkit-patch optimize-baselines" to optimize separately.'))
@@ -72,24 +69,18 @@
         self._baseline_suffix_list = BASELINE_SUFFIX_LIST
 
 
-class RebaselineTest(AbstractRebaseliningCommand):
-    name = "rebaseline-test-internal"
-    help_text = "Rebaseline a single test from a buildbot. Only intended for use by other webkit-patch commands."
-
+class BaseInternalRebaselineCommand(AbstractRebaseliningCommand):
     def __init__(self):
-        super(RebaselineTest, self).__init__(options=[
-            self.no_optimize_option,
+        super(BaseInternalRebaselineCommand, self).__init__(options=[
             self.results_directory_option,
             self.suffixes_option,
             optparse.make_option("--builder", help="Builder to pull new baselines from"),
-            optparse.make_option("--move-overwritten-baselines-to", action="append", default=[],
-                help="Platform to move existing baselines to before rebaselining. This is for bringing up new ports."),
             optparse.make_option("--test", help="Test to rebaseline"),
             ])
         self._scm_changes = {'add': [], 'remove-lines': []}
 
-    def _results_url(self, builder_name):
-        return self._tool.buildbot_for_builder_name(builder_name).builder_with_name(builder_name).latest_layout_test_results_url()
+    def _add_to_scm(self, path):
+        self._scm_changes['add'].append(path)
 
     def _baseline_directory(self, builder_name):
         port = self._tool.port_factory.get_from_builder_name(builder_name)
@@ -98,6 +89,20 @@
             return self._tool.filesystem.join(port.layout_tests_dir(), 'platform', override_dir)
         return port.baseline_version_dir()
 
+    def _test_root(self, test_name):
+        return self._tool.filesystem.splitext(test_name)[0]
+
+    def _file_name_for_actual_result(self, test_name, suffix):
+        return "%s-actual.%s" % (self._test_root(test_name), suffix)
+
+    def _file_name_for_expected_result(self, test_name, suffix):
+        return "%s-expected.%s" % (self._test_root(test_name), suffix)
+
+
+class CopyExistingBaselinesInternal(BaseInternalRebaselineCommand):
+    name = "copy-existing-baselines-internal"
+    help_text = "Copy existing baselines down one level in the baseline order to ensure new baselines don't break existing passing platforms."
+
     @memoized
     def _immediate_predecessors_in_fallback(self, path_to_rebaseline):
         port_names = self._tool.port_factory.all_port_names()
@@ -122,13 +127,16 @@
                 return port
         raise Exception("Failed to find port for primary baseline %s." % baseline)
 
-    def _copy_existing_baseline(self, move_overwritten_baselines_to, test_name, suffix):
+    def _copy_existing_baseline(self, builder_name, test_name, suffix):
+        baseline_directory = self._baseline_directory(builder_name)
+        ports = [self._port_for_primary_baseline(baseline) for baseline in self._immediate_predecessors_in_fallback(baseline_directory)]
+
         old_baselines = []
         new_baselines = []
 
         # Need to gather all the baseline paths before modifying the filesystem since
         # the modifications can affect the results of port.expected_filename.
-        for port in [self._port_for_primary_baseline(baseline) for baseline in move_overwritten_baselines_to]:
+        for port in ports:
             old_baseline = port.expected_filename(test_name, "." + suffix)
             if not self._tool.filesystem.exists(old_baseline):
                 _log.debug("No existing baseline for %s." % test_name)
@@ -152,41 +160,36 @@
             if not self._tool.scm().exists(new_baseline):
                 self._add_to_scm(new_baseline)
 
+    def execute(self, options, args, tool):
+        for suffix in options.suffixes.split(','):
+            self._copy_existing_baseline(options.builder, options.test, suffix)
+        print json.dumps(self._scm_changes)
+
+
+class RebaselineTest(BaseInternalRebaselineCommand):
+    name = "rebaseline-test-internal"
+    help_text = "Rebaseline a single test from a buildbot. Only intended for use by other webkit-patch commands."
+
+    def _results_url(self, builder_name):
+        return self._tool.buildbot_for_builder_name(builder_name).builder_with_name(builder_name).latest_layout_test_results_url()
+
     def _save_baseline(self, data, target_baseline, baseline_directory, test_name, suffix):
         if not data:
             _log.debug("No baseline data to save.")
             return
 
-        self._copy_existing_baseline(self._immediate_predecessors_in_fallback(baseline_directory), test_name, suffix)
-
         filesystem = self._tool.filesystem
         filesystem.maybe_make_directory(filesystem.dirname(target_baseline))
         filesystem.write_binary_file(target_baseline, data)
         if not self._tool.scm().exists(target_baseline):
             self._add_to_scm(target_baseline)
 
-    def _add_to_scm(self, path):
-        self._scm_changes['add'].append(path)
-
-    def _test_root(self, test_name):
-        return self._tool.filesystem.splitext(test_name)[0]
-
-    def _file_name_for_actual_result(self, test_name, suffix):
-        return "%s-actual.%s" % (self._test_root(test_name), suffix)
-
-    def _file_name_for_expected_result(self, test_name, suffix):
-        return "%s-expected.%s" % (self._test_root(test_name), suffix)
-
-    def _rebaseline_test(self, builder_name, test_name, move_overwritten_baselines_to, suffix, results_url):
+    def _rebaseline_test(self, builder_name, test_name, suffix, results_url):
         baseline_directory = self._baseline_directory(builder_name)
 
         source_baseline = "%s/%s" % (results_url, self._file_name_for_actual_result(test_name, suffix))
         target_baseline = self._tool.filesystem.join(baseline_directory, self._file_name_for_expected_result(test_name, suffix))
 
-        # FIXME: This concept is outdated now that we always move baselines in _save_baseline.
-        if move_overwritten_baselines_to:
-            self._copy_existing_baseline(move_overwritten_baselines_to, test_name, suffix)
-
         _log.debug("Retrieving %s." % source_baseline)
         self._save_baseline(self._tool.web.get_binary(source_baseline, convert_404_to_None=True), target_baseline, baseline_directory, test_name, suffix)
 
@@ -203,7 +206,7 @@
         self._baseline_suffix_list = options.suffixes.split(',')
 
         for suffix in self._baseline_suffix_list:
-            self._rebaseline_test(options.builder, options.test, options.move_overwritten_baselines_to, suffix, results_url)
+            self._rebaseline_test(options.builder, options.test, suffix, results_url)
         self._scm_changes['remove-lines'].append({'builder': options.builder, 'test': options.test})
 
     def execute(self, options, args, tool):
@@ -282,6 +285,15 @@
 class AbstractParallelRebaselineCommand(AbstractRebaseliningCommand):
     # not overriding execute() - pylint: disable=W0223
 
+    # The release builders cycle much faster than the debug ones and cover all the platforms.
+    def _release_builders(self):
+        release_builders = []
+        for builder_name in builders.all_builder_names():
+            port = self._tool.port_factory.get_from_builder_name(builder_name)
+            if port.test_configuration().build_type == 'release':
+                release_builders.append(builder_name)
+        return release_builders
+
     def _run_webkit_patch(self, args, verbose):
         try:
             verbose_args = ['--verbose'] if verbose else []
@@ -301,7 +313,7 @@
         builders_to_fallback_paths = {}
         for builder in builders_to_check:
             port = self._tool.port_factory.get_from_builder_name(builder)
-            if port.test_configuration().build_type == 'Release':
+            if port.test_configuration().build_type == 'release':
                 release_builders.add(builder)
             else:
                 debug_builders.add(builder)
@@ -315,24 +327,22 @@
     def _rebaseline_commands(self, test_prefix_list, options):
         path_to_webkit_patch = self._tool.path()
         cwd = self._tool.scm().checkout_root
-        commands = []
+        copy_baseline_commands = []
+        rebaseline_commands = []
         port = self._tool.port_factory.get()
 
         for test_prefix in test_prefix_list:
             for test in port.tests([test_prefix]):
                 for builder in self._builders_to_fetch_from(test_prefix_list[test_prefix]):
                     suffixes = ','.join(test_prefix_list[test_prefix][builder])
-                    cmd_line = [path_to_webkit_patch, 'rebaseline-test-internal', '--suffixes', suffixes, '--builder', builder, '--test', test]
-                    if options.move_overwritten_baselines:
-                        move_overwritten_baselines_to = builders.move_overwritten_baselines_to(builder)
-                        for platform in move_overwritten_baselines_to:
-                            cmd_line.extend(['--move-overwritten-baselines-to', platform])
+                    cmd_line = ['--suffixes', suffixes, '--builder', builder, '--test', test]
                     if options.results_directory:
                         cmd_line.extend(['--results-directory', options.results_directory])
                     if options.verbose:
                         cmd_line.append('--verbose')
-                    commands.append(tuple([cmd_line, cwd]))
-        return commands
+                    copy_baseline_commands.append(tuple([[path_to_webkit_patch, 'copy-existing-baselines-internal'] + cmd_line, cwd]))
+                    rebaseline_commands.append(tuple([[path_to_webkit_patch, 'rebaseline-test-internal'] + cmd_line, cwd]))
+        return copy_baseline_commands, rebaseline_commands
 
     def _files_to_add(self, command_results):
         files_to_add = set()
@@ -381,13 +391,7 @@
                         expectationsString = expectations.remove_configuration_from_test(test, test_configuration)
                 self._tool.filesystem.write_text_file(path, expectationsString)
 
-    def _rebaseline(self, options, test_prefix_list):
-        for test, builders_to_check in sorted(test_prefix_list.items()):
-            _log.info("Rebaselining %s" % test)
-            for builder, suffixes in sorted(builders_to_check.items()):
-                _log.debug("  %s: %s" % (builder, ",".join(suffixes)))
-
-        commands = self._rebaseline_commands(test_prefix_list, options)
+    def _run_in_parallel_and_update_scm(self, commands):
         command_results = self._tool.executive.run_in_parallel(commands)
         log_output = '\n'.join(result[2] for result in command_results).replace('\n\n', '\n')
         for line in log_output.split('\n'):
@@ -397,7 +401,18 @@
         files_to_add, lines_to_remove = self._files_to_add(command_results)
         if files_to_add:
             self._tool.scm().add_list(list(files_to_add))
-        self._update_expectations_files(lines_to_remove)
+        if lines_to_remove:
+            self._update_expectations_files(lines_to_remove)
+
+    def _rebaseline(self, options, test_prefix_list):
+        for test, builders_to_check in sorted(test_prefix_list.items()):
+            _log.info("Rebaselining %s" % test)
+            for builder, suffixes in sorted(builders_to_check.items()):
+                _log.debug("  %s: %s" % (builder, ",".join(suffixes)))
+
+        copy_baseline_commands, rebaseline_commands = self._rebaseline_commands(test_prefix_list, options)
+        self._run_in_parallel_and_update_scm(copy_baseline_commands)
+        self._run_in_parallel_and_update_scm(rebaseline_commands)
 
         if options.optimize:
             self._optimize_baselines(test_prefix_list, options.verbose)
@@ -409,7 +424,6 @@
 
     def __init__(self,):
         super(RebaselineJson, self).__init__(options=[
-            self.move_overwritten_baselines_option,
             self.no_optimize_option,
             self.results_directory_option,
             ])
@@ -424,7 +438,6 @@
 
     def __init__(self):
         super(RebaselineExpectations, self).__init__(options=[
-            self.move_overwritten_baselines_option,
             self.no_optimize_option,
             ] + self.platform_options)
         self._test_prefix_list = None
@@ -472,29 +485,25 @@
 
     def __init__(self):
         super(Rebaseline, self).__init__(options=[
-            self.move_overwritten_baselines_option,
             self.no_optimize_option,
             # FIXME: should we support the platform options in addition to (or instead of) --builders?
             self.suffixes_option,
+            self.results_directory_option,
             optparse.make_option("--builders", default=None, action="append", help="Comma-separated-list of builders to pull new baselines from (can also be provided multiple times)"),
             ])
 
     def _builders_to_pull_from(self):
-        chromium_buildbot_builder_names = []
-        for name in builders.all_builder_names():
-            chromium_buildbot_builder_names.append(name)
-        chosen_names = self._tool.user.prompt_with_list("Which builder to pull results from:", chromium_buildbot_builder_names, can_choose_multiple=True)
+        chosen_names = self._tool.user.prompt_with_list("Which builder to pull results from:", self._release_builders(), can_choose_multiple=True)
         return [self._builder_with_name(name) for name in chosen_names]
 
     def _builder_with_name(self, name):
         return self._tool.buildbot_for_builder_name(name).builder_with_name(name)
 
-    def _tests_to_update(self, builder):
-        failing_tests = builder.latest_layout_test_results().tests_matching_failure_types([test_failures.FailureTextMismatch])
-        return self._tool.user.prompt_with_list("Which test(s) to rebaseline for %s:" % builder.name(), failing_tests, can_choose_multiple=True)
-
     def execute(self, options, args, tool):
-        options.results_directory = None
+        if not args:
+            _log.error("Must list tests to rebaseline.")
+            return
+
         if options.builders:
             builders_to_check = []
             for builder_names in options.builders:
@@ -506,8 +515,7 @@
         suffixes_to_update = options.suffixes.split(",")
 
         for builder in builders_to_check:
-            tests = args or self._tests_to_update(builder)
-            for test in tests:
+            for test in args:
                 if test not in test_prefix_list:
                     test_prefix_list[test] = {}
                 test_prefix_list[test][builder.name()] = suffixes_to_update
diff --git a/Tools/Scripts/webkitpy/tool/commands/rebaseline_unittest.py b/Tools/Scripts/webkitpy/tool/commands/rebaseline_unittest.py
index 913e86c..abf5c6f 100644
--- a/Tools/Scripts/webkitpy/tool/commands/rebaseline_unittest.py
+++ b/Tools/Scripts/webkitpy/tool/commands/rebaseline_unittest.py
@@ -73,18 +73,107 @@
         self.tool.filesystem.written_files = {}
 
 
+class TestCopyExistingBaselinesInternal(_BaseTestCase):
+    command_constructor = CopyExistingBaselinesInternal  # AKA webkit-patch rebaseline-test-internal
+
+    def setUp(self):
+        super(TestCopyExistingBaselinesInternal, self).setUp()
+
+    def test_copying_overwritten_baseline(self):
+        self.tool.executive = MockExecutive2()
+
+        # FIXME: it's confusing that this is the test- port, and not the regular lion port. Really all of the tests should be using the test ports.
+        port = self.tool.port_factory.get('test-mac-snowleopard')
+        self._write(port._filesystem.join(port.layout_tests_dir(), 'platform/test-mac-snowleopard/failures/expected/image-expected.txt'), 'original snowleopard result')
+
+        old_exact_matches = builders._exact_matches
+        oc = OutputCapture()
+        try:
+            builders._exact_matches = {
+                "MOCK Leopard": {"port_name": "test-mac-leopard", "specifiers": set(["mock-specifier"])},
+                "MOCK SnowLeopard": {"port_name": "test-mac-snowleopard", "specifiers": set(["mock-specifier"])},
+            }
+
+            options = MockOptions(builder="MOCK SnowLeopard", suffixes="txt", verbose=True, test="failures/expected/image.html", results_directory=None)
+
+            oc.capture_output()
+            self.command.execute(options, [], self.tool)
+        finally:
+            out, _, _ = oc.restore_output()
+            builders._exact_matches = old_exact_matches
+
+        self.assertMultiLineEqual(self._read(self.tool.filesystem.join(port.layout_tests_dir(), 'platform/test-mac-leopard/failures/expected/image-expected.txt')), 'original snowleopard result')
+        self.assertMultiLineEqual(out, '{"add": [], "remove-lines": []}\n')
+
+    def test_copying_overwritten_baseline_to_multiple_locations(self):
+        self.tool.executive = MockExecutive2()
+
+        # FIXME: it's confusing that this is the test- port, and not the regular win port. Really all of the tests should be using the test ports.
+        port = self.tool.port_factory.get('test-win-win7')
+        self._write(port._filesystem.join(port.layout_tests_dir(), 'platform/test-win-win7/failures/expected/image-expected.txt'), 'original win7 result')
+
+        old_exact_matches = builders._exact_matches
+        oc = OutputCapture()
+        try:
+            builders._exact_matches = {
+                "MOCK Leopard": {"port_name": "test-mac-leopard", "specifiers": set(["mock-specifier"])},
+                "MOCK Linux": {"port_name": "test-linux-x86_64", "specifiers": set(["mock-specifier"])},
+                "MOCK Win7": {"port_name": "test-win-win7", "specifiers": set(["mock-specifier"])},
+            }
+
+            options = MockOptions(builder="MOCK Win7", suffixes="txt", verbose=True, test="failures/expected/image.html", results_directory=None)
+
+            oc.capture_output()
+            self.command.execute(options, [], self.tool)
+        finally:
+            out, _, _ = oc.restore_output()
+            builders._exact_matches = old_exact_matches
+
+        self.assertMultiLineEqual(self._read(self.tool.filesystem.join(port.layout_tests_dir(), 'platform/test-linux-x86_64/failures/expected/image-expected.txt')), 'original win7 result')
+        self.assertFalse(self.tool.filesystem.exists(self.tool.filesystem.join(port.layout_tests_dir(), 'platform/mac-leopard/userscripts/another-test-expected.txt')))
+        self.assertMultiLineEqual(out, '{"add": [], "remove-lines": []}\n')
+
+    def test_no_copy_existing_baseline(self):
+        self.tool.executive = MockExecutive2()
+
+        # FIXME: it's confusing that this is the test- port, and not the regular win port. Really all of the tests should be using the test ports.
+        port = self.tool.port_factory.get('test-win-win7')
+        self._write(port._filesystem.join(port.layout_tests_dir(), 'platform/test-win-win7/failures/expected/image-expected.txt'), 'original win7 result')
+
+        old_exact_matches = builders._exact_matches
+        oc = OutputCapture()
+        try:
+            builders._exact_matches = {
+                "MOCK Leopard": {"port_name": "test-mac-leopard", "specifiers": set(["mock-specifier"])},
+                "MOCK Linux": {"port_name": "test-linux-x86_64", "specifiers": set(["mock-specifier"])},
+                "MOCK Win7": {"port_name": "test-win-win7", "specifiers": set(["mock-specifier"])},
+            }
+
+            options = MockOptions(builder="MOCK Win7", suffixes="txt", verbose=True, test="failures/expected/image.html", results_directory=None)
+
+            oc.capture_output()
+            self.command.execute(options, [], self.tool)
+        finally:
+            out, _, _ = oc.restore_output()
+            builders._exact_matches = old_exact_matches
+
+        self.assertMultiLineEqual(self._read(self.tool.filesystem.join(port.layout_tests_dir(), 'platform/test-linux-x86_64/failures/expected/image-expected.txt')), 'original win7 result')
+        self.assertMultiLineEqual(self._read(self.tool.filesystem.join(port.layout_tests_dir(), 'platform/test-win-win7/failures/expected/image-expected.txt')), 'original win7 result')
+        self.assertFalse(self.tool.filesystem.exists(self.tool.filesystem.join(port.layout_tests_dir(), 'platform/mac-leopard/userscripts/another-test-expected.txt')))
+        self.assertMultiLineEqual(out, '{"add": [], "remove-lines": []}\n')
+
+
 class TestRebaselineTest(_BaseTestCase):
     command_constructor = RebaselineTest  # AKA webkit-patch rebaseline-test-internal
 
     def setUp(self):
         super(TestRebaselineTest, self).setUp()
-        self.options = MockOptions(builder="WebKit Mac10.7", test="userscripts/another-test.html", suffixes="txt",
-                                   move_overwritten_baselines_to=None, results_directory=None)
+        self.options = MockOptions(builder="WebKit Mac10.7", test="userscripts/another-test.html", suffixes="txt", results_directory=None)
 
     def test_baseline_directory(self):
         command = self.command
-        self.assertMultiLineEqual(command._baseline_directory("WebKit Mac10.7"), "/mock-checkout/LayoutTests/platform/chromium-mac-lion")
-        self.assertMultiLineEqual(command._baseline_directory("WebKit Mac10.6"), "/mock-checkout/LayoutTests/platform/chromium-mac-snowleopard")
+        self.assertMultiLineEqual(command._baseline_directory("WebKit Mac10.7"), "/mock-checkout/LayoutTests/platform/mac-lion")
+        self.assertMultiLineEqual(command._baseline_directory("WebKit Mac10.6"), "/mock-checkout/LayoutTests/platform/mac-snowleopard")
 
     def test_rebaseline_updates_expectations_file_noop(self):
         self._zero_out_test_expectations()
@@ -108,7 +197,7 @@
 """)
 
     def test_rebaseline_test(self):
-        self.command._rebaseline_test("WebKit Linux", "userscripts/another-test.html", None, "txt", self.WEB_PREFIX)
+        self.command._rebaseline_test("WebKit Linux", "userscripts/another-test.html", "txt", self.WEB_PREFIX)
         self.assertItemsEqual(self.tool.web.urls_fetched, [self.WEB_PREFIX + '/userscripts/another-test-actual.txt'])
 
     def test_rebaseline_test_with_results_directory(self):
@@ -130,131 +219,10 @@
         self.command._scm_changes = {'add': [], 'delete': []}
         self.tool._scm.exists = lambda x: False
 
-        self.command._rebaseline_test("WebKit Linux", "userscripts/another-test.html", None, "txt", None)
+        self.command._rebaseline_test("WebKit Linux", "userscripts/another-test.html", "txt", None)
 
         self.assertDictEqual(self.command._scm_changes, {'add': ['/mock-checkout/LayoutTests/platform/linux/userscripts/another-test-expected.txt'], 'delete': []})
 
-    def test_rebaseline_and_copy_test(self):
-        self._write("userscripts/another-test-expected.txt", "generic result")
-
-        self.command._rebaseline_test("WebKit Mac10.7", "userscripts/another-test.html", ["chromium-mac-snowleopard"], "txt", None)
-
-        self.assertMultiLineEqual(self._read('platform/chromium-mac-lion/userscripts/another-test-expected.txt'), self.MOCK_WEB_RESULT)
-        self.assertMultiLineEqual(self._read('platform/chromium-mac-snowleopard/userscripts/another-test-expected.txt'), 'generic result')
-
-    def test_rebaseline_and_copy_test_no_existing_result(self):
-        self.command._rebaseline_test("WebKit Mac10.7", "userscripts/another-test.html", ["chromium-mac-snowleopard"], "txt", None)
-
-        self.assertMultiLineEqual(self._read('platform/chromium-mac-lion/userscripts/another-test-expected.txt'), self.MOCK_WEB_RESULT)
-        self.assertFalse(self.tool.filesystem.exists(self._expand('platform/chromium-mac-snowleopard/userscripts/another-test-expected.txt')))
-
-    def test_rebaseline_and_copy_test_with_lion_result(self):
-        self._write("platform/chromium-mac-lion/userscripts/another-test-expected.txt", "original lion result")
-
-        self.command._rebaseline_test("WebKit Mac10.7", "userscripts/another-test.html", ["chromium-mac-snowleopard"], "txt", self.WEB_PREFIX)
-
-        self.assertItemsEqual(self.tool.web.urls_fetched, [self.WEB_PREFIX + '/userscripts/another-test-actual.txt'])
-        self.assertMultiLineEqual(self._read("platform/chromium-mac-snowleopard/userscripts/another-test-expected.txt"), "original lion result")
-        self.assertMultiLineEqual(self._read("platform/chromium-mac-lion/userscripts/another-test-expected.txt"), self.MOCK_WEB_RESULT)
-
-    def test_rebaseline_and_copy_no_overwrite_test(self):
-        self._write("platform/chromium-mac-lion/userscripts/another-test-expected.txt", "original lion result")
-        self._write("platform/chromium-mac-snowleopard/userscripts/another-test-expected.txt", "original snowleopard result")
-
-        self.command._rebaseline_test("WebKit Mac10.7", "userscripts/another-test.html", ["chromium-mac-snowleopard"], "txt", None)
-
-        self.assertMultiLineEqual(self._read("platform/chromium-mac-snowleopard/userscripts/another-test-expected.txt"), "original snowleopard result")
-        self.assertMultiLineEqual(self._read("platform/chromium-mac-lion/userscripts/another-test-expected.txt"), self.MOCK_WEB_RESULT)
-
-    def test_rebaseline_test_internal_with_copying_overwritten_baseline_first(self):
-        self.tool.executive = MockExecutive2()
-
-        # FIXME: it's confusing that this is the test- port, and not the regular lion port. Really all of the tests should be using the test ports.
-        port = self.tool.port_factory.get('test-mac-snowleopard')
-        self._write(port._filesystem.join(port.layout_tests_dir(), 'platform/test-mac-snowleopard/failures/expected/image-expected.txt'), 'original snowleopard result')
-
-        old_exact_matches = builders._exact_matches
-        oc = OutputCapture()
-        try:
-            builders._exact_matches = {
-                "MOCK Leopard": {"port_name": "test-mac-leopard", "specifiers": set(["mock-specifier"])},
-                "MOCK SnowLeopard": {"port_name": "test-mac-snowleopard", "specifiers": set(["mock-specifier"])},
-            }
-
-            options = MockOptions(optimize=True, builder="MOCK SnowLeopard", suffixes="txt",
-                move_overwritten_baselines_to=None, verbose=True, test="failures/expected/image.html",
-                results_directory=None)
-
-            oc.capture_output()
-            self.command.execute(options, [], self.tool)
-        finally:
-            out, _, _ = oc.restore_output()
-            builders._exact_matches = old_exact_matches
-
-        self.assertMultiLineEqual(self._read(self.tool.filesystem.join(port.layout_tests_dir(), 'platform/test-mac-leopard/failures/expected/image-expected.txt')), 'original snowleopard result')
-        self.assertMultiLineEqual(out, '{"add": [], "remove-lines": [{"test": "failures/expected/image.html", "builder": "MOCK SnowLeopard"}]}\n')
-
-    def test_rebaseline_test_internal_with_copying_overwritten_baseline_first_to_multiple_locations(self):
-        self.tool.executive = MockExecutive2()
-
-        # FIXME: it's confusing that this is the test- port, and not the regular win port. Really all of the tests should be using the test ports.
-        port = self.tool.port_factory.get('test-win-win7')
-        self._write(port._filesystem.join(port.layout_tests_dir(), 'platform/test-win-win7/failures/expected/image-expected.txt'), 'original win7 result')
-
-        old_exact_matches = builders._exact_matches
-        oc = OutputCapture()
-        try:
-            builders._exact_matches = {
-                "MOCK Leopard": {"port_name": "test-mac-leopard", "specifiers": set(["mock-specifier"])},
-                "MOCK Linux": {"port_name": "test-linux-x86_64", "specifiers": set(["mock-specifier"])},
-                "MOCK Win7": {"port_name": "test-win-win7", "specifiers": set(["mock-specifier"])},
-            }
-
-            options = MockOptions(optimize=True, builder="MOCK Win7", suffixes="txt",
-                move_overwritten_baselines_to=None, verbose=True, test="failures/expected/image.html",
-                results_directory=None)
-
-            oc.capture_output()
-            self.command.execute(options, [], self.tool)
-        finally:
-            out, _, _ = oc.restore_output()
-            builders._exact_matches = old_exact_matches
-
-        self.assertMultiLineEqual(self._read(self.tool.filesystem.join(port.layout_tests_dir(), 'platform/test-linux-x86_64/failures/expected/image-expected.txt')), 'original win7 result')
-        self.assertFalse(self.tool.filesystem.exists(self.tool.filesystem.join(port.layout_tests_dir(), 'platform/chromium-mac-leopard/userscripts/another-test-expected.txt')))
-        self.assertMultiLineEqual(out, '{"add": [], "remove-lines": [{"test": "failures/expected/image.html", "builder": "MOCK Win7"}]}\n')
-
-    def test_rebaseline_test_internal_with_no_overwrite_existing_baseline(self):
-        self.tool.executive = MockExecutive2()
-
-        # FIXME: it's confusing that this is the test- port, and not the regular win port. Really all of the tests should be using the test ports.
-        port = self.tool.port_factory.get('test-win-win7')
-        self._write(port._filesystem.join(port.layout_tests_dir(), 'platform/test-win-win7/failures/expected/image-expected.txt'), 'original win7 result')
-
-        old_exact_matches = builders._exact_matches
-        oc = OutputCapture()
-        try:
-            builders._exact_matches = {
-                "MOCK Leopard": {"port_name": "test-mac-leopard", "specifiers": set(["mock-specifier"])},
-                "MOCK Linux": {"port_name": "test-linux-x86_64", "specifiers": set(["mock-specifier"])},
-                "MOCK Win7": {"port_name": "test-win-win7", "specifiers": set(["mock-specifier"])},
-            }
-
-            options = MockOptions(optimize=True, builder="MOCK Win7", suffixes="txt",
-                move_overwritten_baselines_to=None, verbose=True, test="failures/expected/image.html",
-                results_directory=None)
-
-            oc.capture_output()
-            self.command.execute(options, [], self.tool)
-        finally:
-            out, _, _ = oc.restore_output()
-            builders._exact_matches = old_exact_matches
-
-        self.assertMultiLineEqual(self._read(self.tool.filesystem.join(port.layout_tests_dir(), 'platform/test-linux-x86_64/failures/expected/image-expected.txt')), 'original win7 result')
-        self.assertMultiLineEqual(self._read(self.tool.filesystem.join(port.layout_tests_dir(), 'platform/test-win-win7/failures/expected/image-expected.txt')), 'MOCK Web result, convert 404 to None=True')
-        self.assertFalse(self.tool.filesystem.exists(self.tool.filesystem.join(port.layout_tests_dir(), 'platform/chromium-mac-leopard/userscripts/another-test-expected.txt')))
-        self.assertMultiLineEqual(out, '{"add": [], "remove-lines": [{"test": "failures/expected/image.html", "builder": "MOCK Win7"}]}\n')
-
     def test_rebaseline_test_internal_with_port_that_lacks_buildbot(self):
         self.tool.executive = MockExecutive2()
 
@@ -271,8 +239,7 @@
             }
 
             options = MockOptions(optimize=True, builder="MOCK Win7", suffixes="txt",
-                move_overwritten_baselines_to=None, verbose=True, test="failures/expected/image.html",
-                results_directory=None)
+                verbose=True, test="failures/expected/image.html", results_directory=None)
 
             oc.capture_output()
             self.command.execute(options, [], self.tool)
@@ -285,6 +252,25 @@
         self.assertMultiLineEqual(out, '{"add": [], "remove-lines": [{"test": "failures/expected/image.html", "builder": "MOCK Win7"}]}\n')
 
 
+class TestAbstractParallelRebaselineCommand(_BaseTestCase):
+    command_constructor = AbstractParallelRebaselineCommand
+
+    def test_builders_to_fetch_from(self):
+        old_exact_matches = builders._exact_matches
+        try:
+            builders._exact_matches = {
+                "MOCK XP": {"port_name": "test-win-xp"},
+                "MOCK Win7": {"port_name": "test-win-win7"},
+                "MOCK Win7 (dbg)(1)": {"port_name": "test-win-win7"},
+                "MOCK Win7 (dbg)(2)": {"port_name": "test-win-win7"},
+            }
+
+            builders_to_fetch = self.command._builders_to_fetch_from(["MOCK XP", "MOCK Win7 (dbg)(1)", "MOCK Win7 (dbg)(2)", "MOCK Win7"])
+            self.assertEqual(builders_to_fetch, ["MOCK XP", "MOCK Win7"])
+        finally:
+            builders._exact_matches = old_exact_matches
+
+
 class TestRebaselineJson(_BaseTestCase):
     command_constructor = RebaselineJson
 
@@ -293,9 +279,8 @@
         self.tool.executive = MockExecutive2()
         self.old_exact_matches = builders._exact_matches
         builders._exact_matches = {
-            "MOCK builder": {"port_name": "test-mac-snowleopard", "specifiers": set(["mock-specifier"]),
-                             "move_overwritten_baselines_to": ["test-mac-leopard"]},
-            "MOCK builder (Debug)": {"port_name": "test-mac-snowleopard", "specifiers": set(["mock-specifier", "debug"])},
+            "MOCK builder": {"port_name": "test-mac-snowleopard"},
+            "MOCK builder (Debug)": {"port_name": "test-mac-snowleopard"},
         }
 
     def tearDown(self):
@@ -303,52 +288,46 @@
         super(TestRebaselineJson, self).tearDown()
 
     def test_rebaseline_all(self):
-        options = MockOptions(optimize=True, verbose=True, move_overwritten_baselines=False, results_directory=None)
+        options = MockOptions(optimize=True, verbose=True, results_directory=None)
         self._write("user-scripts/another-test.html", "Dummy test contents")
         self.command._rebaseline(options,  {"user-scripts/another-test.html": {"MOCK builder": ["txt", "png"]}})
 
         # Note that we have one run_in_parallel() call followed by a run_command()
         self.assertEqual(self.tool.executive.calls,
-            [[['echo', 'rebaseline-test-internal', '--suffixes', 'txt,png', '--builder', 'MOCK builder', '--test', 'user-scripts/another-test.html', '--verbose']],
+            [[['echo', 'copy-existing-baselines-internal', '--suffixes', 'txt,png', '--builder', 'MOCK builder', '--test', 'user-scripts/another-test.html', '--verbose']],
+             [['echo', 'rebaseline-test-internal', '--suffixes', 'txt,png', '--builder', 'MOCK builder', '--test', 'user-scripts/another-test.html', '--verbose']],
              ['echo', '--verbose', 'optimize-baselines', '--suffixes', 'txt,png', 'user-scripts/another-test.html']])
 
     def test_rebaseline_debug(self):
-        options = MockOptions(optimize=True, verbose=True, move_overwritten_baselines=False, results_directory=None)
+        options = MockOptions(optimize=True, verbose=True, results_directory=None)
         self._write("user-scripts/another-test.html", "Dummy test contents")
         self.command._rebaseline(options,  {"user-scripts/another-test.html": {"MOCK builder (Debug)": ["txt", "png"]}})
 
         # Note that we have one run_in_parallel() call followed by a run_command()
         self.assertEqual(self.tool.executive.calls,
-            [[['echo', 'rebaseline-test-internal', '--suffixes', 'txt,png', '--builder', 'MOCK builder (Debug)', '--test', 'user-scripts/another-test.html', '--verbose']],
-             ['echo', '--verbose', 'optimize-baselines', '--suffixes', 'txt,png', 'user-scripts/another-test.html']])
-
-    def test_move_overwritten(self):
-        options = MockOptions(optimize=True, verbose=True, move_overwritten_baselines=True, results_directory=None)
-        self._write("user-scripts/another-test.html", "Dummy test contents")
-        self.command._rebaseline(options,  {"user-scripts/another-test.html": {"MOCK builder": ["txt", "png"]}})
-
-        # Note that we have one run_in_parallel() call followed by a run_command()
-        self.assertEqual(self.tool.executive.calls,
-            [[['echo', 'rebaseline-test-internal', '--suffixes', 'txt,png', '--builder', 'MOCK builder', '--test', 'user-scripts/another-test.html', '--move-overwritten-baselines-to', 'test-mac-leopard', '--verbose']],
+            [[['echo', 'copy-existing-baselines-internal', '--suffixes', 'txt,png', '--builder', 'MOCK builder (Debug)', '--test', 'user-scripts/another-test.html', '--verbose']],
+             [['echo', 'rebaseline-test-internal', '--suffixes', 'txt,png', '--builder', 'MOCK builder (Debug)', '--test', 'user-scripts/another-test.html', '--verbose']],
              ['echo', '--verbose', 'optimize-baselines', '--suffixes', 'txt,png', 'user-scripts/another-test.html']])
 
     def test_no_optimize(self):
-        options = MockOptions(optimize=False, verbose=True, move_overwritten_baselines=False, results_directory=None)
+        options = MockOptions(optimize=False, verbose=True, results_directory=None)
         self._write("user-scripts/another-test.html", "Dummy test contents")
         self.command._rebaseline(options,  {"user-scripts/another-test.html": {"MOCK builder (Debug)": ["txt", "png"]}})
 
         # Note that we have only one run_in_parallel() call
         self.assertEqual(self.tool.executive.calls,
-            [[['echo', 'rebaseline-test-internal', '--suffixes', 'txt,png', '--builder', 'MOCK builder (Debug)', '--test', 'user-scripts/another-test.html', '--verbose']]])
+            [[['echo', 'copy-existing-baselines-internal', '--suffixes', 'txt,png', '--builder', 'MOCK builder (Debug)', '--test', 'user-scripts/another-test.html', '--verbose']],
+             [['echo', 'rebaseline-test-internal', '--suffixes', 'txt,png', '--builder', 'MOCK builder (Debug)', '--test', 'user-scripts/another-test.html', '--verbose']]])
 
     def test_results_directory(self):
-        options = MockOptions(optimize=False, verbose=True, move_overwritten_baselines=False, results_directory='/tmp')
+        options = MockOptions(optimize=False, verbose=True, results_directory='/tmp')
         self._write("user-scripts/another-test.html", "Dummy test contents")
         self.command._rebaseline(options,  {"user-scripts/another-test.html": {"MOCK builder": ["txt", "png"]}})
 
         # Note that we have only one run_in_parallel() call
         self.assertEqual(self.tool.executive.calls,
-            [[['echo', 'rebaseline-test-internal', '--suffixes', 'txt,png', '--builder', 'MOCK builder', '--test', 'user-scripts/another-test.html', '--results-directory', '/tmp', '--verbose']]])
+            [[['echo', 'copy-existing-baselines-internal', '--suffixes', 'txt,png', '--builder', 'MOCK builder', '--test', 'user-scripts/another-test.html', '--results-directory', '/tmp', '--verbose']],
+             [['echo', 'rebaseline-test-internal', '--suffixes', 'txt,png', '--builder', 'MOCK builder', '--test', 'user-scripts/another-test.html', '--results-directory', '/tmp', '--verbose']]])
 
 class TestRebaselineJsonUpdatesExpectationsFiles(_BaseTestCase):
     command_constructor = RebaselineJson
@@ -369,7 +348,7 @@
         self.tool.executive.run_command = mock_run_command
 
     def test_rebaseline_updates_expectations_file(self):
-        options = MockOptions(optimize=False, verbose=True, move_overwritten_baselines=False, results_directory=None)
+        options = MockOptions(optimize=False, verbose=True, results_directory=None)
 
         self._write(self.lion_expectations_path, "Bug(x) [ Mac ] userscripts/another-test.html [ ImageOnlyFailure ]\nbug(z) [ Linux ] userscripts/another-test.html [ ImageOnlyFailure ]\n")
         self._write("userscripts/another-test.html", "Dummy test contents")
@@ -380,7 +359,7 @@
         self.assertMultiLineEqual(new_expectations, "Bug(x) [ MountainLion SnowLeopard ] userscripts/another-test.html [ ImageOnlyFailure ]\nbug(z) [ Linux ] userscripts/another-test.html [ ImageOnlyFailure ]\n")
 
     def test_rebaseline_updates_expectations_file_all_platforms(self):
-        options = MockOptions(optimize=False, verbose=True, move_overwritten_baselines=False, results_directory=None)
+        options = MockOptions(optimize=False, verbose=True, results_directory=None)
 
         self._write(self.lion_expectations_path, "Bug(x) userscripts/another-test.html [ ImageOnlyFailure ]\n")
         self._write("userscripts/another-test.html", "Dummy test contents")
@@ -396,13 +375,8 @@
 
     command_constructor = Rebaseline  # AKA webkit-patch rebaseline
 
-    def test_tests_to_update(self):
-        build = Mock()
-        OutputCapture().assert_outputs(self, self.command._tests_to_update, [build])
-
     def test_rebaseline(self):
         self.command._builders_to_pull_from = lambda: [MockBuilder('MOCK builder')]
-        self.command._tests_to_update = lambda builder: ['mock/path/to/test.html']
 
         self._write("mock/path/to/test.html", "Dummy test contents")
 
@@ -413,17 +387,17 @@
             builders._exact_matches = {
                 "MOCK builder": {"port_name": "test-mac-leopard", "specifiers": set(["mock-specifier"])},
             }
-            self.command.execute(MockOptions(optimize=False, builders=None, suffixes="txt,png", verbose=True, move_overwritten_baselines=False), [], self.tool)
+            self.command.execute(MockOptions(results_directory=False, optimize=False, builders=None, suffixes="txt,png", verbose=True), ['mock/path/to/test.html'], self.tool)
         finally:
             builders._exact_matches = old_exact_matches
 
         calls = filter(lambda x: x != ['qmake', '-v'] and x[0] != 'perl', self.tool.executive.calls)
         self.assertEqual(calls,
-            [[['echo', 'rebaseline-test-internal', '--suffixes', 'txt,png', '--builder', 'MOCK builder', '--test', 'mock/path/to/test.html', '--verbose']]])
+            [[['echo', 'copy-existing-baselines-internal', '--suffixes', 'txt,png', '--builder', 'MOCK builder', '--test', 'mock/path/to/test.html', '--verbose']],
+             [['echo', 'rebaseline-test-internal', '--suffixes', 'txt,png', '--builder', 'MOCK builder', '--test', 'mock/path/to/test.html', '--verbose']]])
 
     def test_rebaseline_directory(self):
         self.command._builders_to_pull_from = lambda: [MockBuilder('MOCK builder')]
-        self.command._tests_to_update = lambda builder: ['userscripts']
 
         self._write("userscripts/first-test.html", "test data")
         self._write("userscripts/second-test.html", "test data")
@@ -433,13 +407,15 @@
             builders._exact_matches = {
                 "MOCK builder": {"port_name": "test-mac-leopard", "specifiers": set(["mock-specifier"])},
             }
-            self.command.execute(MockOptions(optimize=False, builders=None, suffixes="txt,png", verbose=True, move_overwritten_baselines=False), [], self.tool)
+            self.command.execute(MockOptions(results_directory=False, optimize=False, builders=None, suffixes="txt,png", verbose=True), ['userscripts'], self.tool)
         finally:
             builders._exact_matches = old_exact_matches
 
         calls = filter(lambda x: x != ['qmake', '-v'] and x[0] != 'perl', self.tool.executive.calls)
         self.assertEqual(calls,
-            [[['echo', 'rebaseline-test-internal', '--suffixes', 'txt,png', '--builder', 'MOCK builder', '--test', 'userscripts/first-test.html', '--verbose'],
+            [[['echo', 'copy-existing-baselines-internal', '--suffixes', 'txt,png', '--builder', 'MOCK builder', '--test', 'userscripts/first-test.html', '--verbose'],
+              ['echo', 'copy-existing-baselines-internal', '--suffixes', 'txt,png', '--builder', 'MOCK builder', '--test', 'userscripts/second-test.html', '--verbose']],
+             [['echo', 'rebaseline-test-internal', '--suffixes', 'txt,png', '--builder', 'MOCK builder', '--test', 'userscripts/first-test.html', '--verbose'],
               ['echo', 'rebaseline-test-internal', '--suffixes', 'txt,png', '--builder', 'MOCK builder', '--test', 'userscripts/second-test.html', '--verbose']]])
 
 
@@ -448,8 +424,7 @@
 
     def setUp(self):
         super(TestRebaselineExpectations, self).setUp()
-        self.options = MockOptions(optimize=False, builders=None, suffixes=['txt'], verbose=False, platform=None,
-                                   move_overwritten_baselines=False, results_directory=None)
+        self.options = MockOptions(optimize=False, builders=None, suffixes=['txt'], verbose=False, platform=None, results_directory=None)
 
     def test_rebaseline_expectations(self):
         self._zero_out_test_expectations()
@@ -463,7 +438,7 @@
 
         # FIXME: change this to use the test- ports.
         calls = filter(lambda x: x != ['qmake', '-v'], self.tool.executive.calls)
-        self.assertEqual(len(calls), 1)
+        self.assertEqual(len(calls), 2)
         self.assertEqual(len(calls[0]), 14)
 
     def test_rebaseline_expectations_noop(self):
diff --git a/Tools/Scripts/webkitpy/tool/commands/rebaselineserver.py b/Tools/Scripts/webkitpy/tool/commands/rebaselineserver.py
index 09c6d0b..ffc03b8 100644
--- a/Tools/Scripts/webkitpy/tool/commands/rebaselineserver.py
+++ b/Tools/Scripts/webkitpy/tool/commands/rebaselineserver.py
@@ -31,7 +31,7 @@
 images and text) and allows one-click rebaselining of tests."""
 
 from webkitpy.common import system
-from webkitpy.common.net.resultsjsonparser import for_each_test, JSONTestResult
+from webkitpy.common.net.layouttestresults import for_each_test, JSONTestResult
 from webkitpy.layout_tests.layout_package import json_results_generator
 from webkitpy.tool.commands.abstractlocalservercommand import AbstractLocalServerCommand
 from webkitpy.tool.servers.rebaselineserver import get_test_baselines, RebaselineHTTPServer, STATE_NEEDS_REBASELINE
diff --git a/Tools/Scripts/webkitpy/tool/servers/gardeningserver.py b/Tools/Scripts/webkitpy/tool/servers/gardeningserver.py
index 0a5713c..8bc38cd 100644
--- a/Tools/Scripts/webkitpy/tool/servers/gardeningserver.py
+++ b/Tools/Scripts/webkitpy/tool/servers/gardeningserver.py
@@ -101,8 +101,6 @@
 
     def rebaselineall(self):
         command = ['rebaseline-json']
-        if self.server.options.move_overwritten_baselines:
-            command.append('--move-overwritten-baselines')
         if self.server.options.results_directory:
             command.extend(['--results-directory', self.server.options.results_directory])
         if not self.server.options.optimize:
diff --git a/Tools/Scripts/webkitpy/tool/servers/rebaselineserver_unittest.py b/Tools/Scripts/webkitpy/tool/servers/rebaselineserver_unittest.py
index fd19c4d..bab9cf3 100644
--- a/Tools/Scripts/webkitpy/tool/servers/rebaselineserver_unittest.py
+++ b/Tools/Scripts/webkitpy/tool/servers/rebaselineserver_unittest.py
@@ -29,7 +29,7 @@
 import json
 import unittest2 as unittest
 
-from webkitpy.common.net import resultsjsonparser_unittest
+from webkitpy.common.net import layouttestresults_unittest
 from webkitpy.common.host_mock import MockHost
 from webkitpy.layout_tests.layout_package.json_results_generator import strip_json_wrapper
 from webkitpy.layout_tests.port.base import Port
@@ -205,7 +205,7 @@
             ])
 
     def test_gather_baselines(self):
-        example_json = resultsjsonparser_unittest.ResultsJSONParserTest._example_full_results_json
+        example_json = layouttestresults_unittest.LayoutTestResultsTest.example_full_results_json
         results_json = json.loads(strip_json_wrapper(example_json))
         server = RebaselineServer()
         server._test_config = get_test_config()
diff --git a/Tools/TestResultServer/app.yaml b/Tools/TestResultServer/app.yaml
index 7d7ece8..eae2dab 100644
--- a/Tools/TestResultServer/app.yaml
+++ b/Tools/TestResultServer/app.yaml
@@ -4,6 +4,9 @@
 api_version: 1
 threadsafe: true
 
+builtins:
+- appstats: on
+
 handlers:
 - url: /robots.txt
   static_files: robots.txt
diff --git a/Tools/TestResultServer/appengine_config.py b/Tools/TestResultServer/appengine_config.py
new file mode 100755
index 0000000..0a1d35e
--- /dev/null
+++ b/Tools/TestResultServer/appengine_config.py
@@ -0,0 +1,33 @@
+# Copyright (C) 2013 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+def webapp_add_wsgi_middleware(app):
+    from google.appengine.ext.appstats import recording
+    app = recording.appstats_wsgi_middleware(app)
+    return app
diff --git a/Tools/TestResultServer/cron.yaml b/Tools/TestResultServer/cron.yaml
index 0968a24..cc95048 100644
--- a/Tools/TestResultServer/cron.yaml
+++ b/Tools/TestResultServer/cron.yaml
@@ -1,5 +1,5 @@
 cron:
 - description: refresh builders list job
   url: /updatebuilders
-  schedule: every 2 hours
+  schedule: every 30 minutes
   timezone: America/Los_Angeles
diff --git a/Tools/TestResultServer/model/datastorefile.py b/Tools/TestResultServer/model/datastorefile.py
index 1862f21..8dca34d 100755
--- a/Tools/TestResultServer/model/datastorefile.py
+++ b/Tools/TestResultServer/model/datastorefile.py
@@ -26,6 +26,7 @@
 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+import math
 import logging
 
 from google.appengine.ext import blobstore
@@ -35,6 +36,14 @@
 MAX_ENTRY_LEN = 1000 * 1000
 
 
+class ChunkData:
+    def __init__(self):
+        self.reused_key = None
+        self.data_entry = None
+        self.entry_future = None
+        self.index = None
+
+
 class DataEntry(db.Model):
     """Datastore entry that stores one segmant of file data
        (<1000*1000 bytes).
@@ -46,8 +55,13 @@
     def get(cls, key):
         return db.get(key)
 
-    def get_data(self, key):
-        return db.get(key)
+    @classmethod
+    def get_async(cls, key):
+        return db.get_async(key)
+
+    @classmethod
+    def delete_async(cls, key):
+        return db.delete_async(key)
 
 
 class DataStoreFile(db.Model):
@@ -65,7 +79,10 @@
 
     data = None
 
-    # FIXME: Remove this once all the bots have cycled after converting to the high-replication database.
+    def _get_chunk_indices(self, data_length):
+        nchunks = math.ceil(float(data_length) / MAX_ENTRY_LEN)
+        return xrange(0, int(nchunks) * MAX_ENTRY_LEN, MAX_ENTRY_LEN)
+
     def _convert_blob_keys(self, keys):
         converted_keys = []
         for key in keys:
@@ -79,11 +96,17 @@
     def delete_data(self, keys=None):
         if not keys:
             keys = self._convert_blob_keys(self.data_keys)
+        logging.info('Doing async delete of keys: %s', keys)
 
-        for key in keys:
-            data_entry = DataEntry.get(key)
-            if data_entry:
-                data_entry.delete()
+        get_futures = [DataEntry.get_async(k) for k in keys]
+        delete_futures = []
+        for get_future in get_futures:
+            result = get_future.get_result()
+            if result:
+                delete_futures.append(DataEntry.delete_async(result.key()))
+
+        for delete_future in delete_futures:
+            delete_future.get_result()
 
     def save_data(self, data):
         if not data:
@@ -105,34 +128,42 @@
         keys = self._convert_blob_keys(self.new_data_keys)
         self.new_data_keys = []
 
-        # FIXME: is all this complexity with storing the file in chunks really needed anymore?
-        # Can we just store it in a single blob?
-        while start < len(data):
-            if keys:
-                key = keys[0]
-                data_entry = DataEntry.get(key)
-                if not data_entry:
-                    logging.warning("Found key, but no data entry: %s", key)
-                    data_entry = DataEntry()
-            else:
-                data_entry = DataEntry()
+        chunk_indices = self._get_chunk_indices(len(data))
+        logging.info('Saving file in %s chunks', len(chunk_indices))
 
-            data_entry.data = db.Blob(data[start: start + MAX_ENTRY_LEN])
+        chunk_data = []
+        for chunk_index in chunk_indices:
+            chunk = ChunkData()
+            chunk.index = chunk_index
+            if keys:
+                chunk.reused_key = keys.pop()
+                chunk.entry_future = DataEntry.get_async(chunk.reused_key)
+            else:
+                chunk.data_entry = DataEntry()
+            chunk_data.append(chunk)
+
+        put_futures = []
+        for chunk in chunk_data:
+            if chunk.entry_future:
+                data_entry = chunk.entry_future.get_result()
+                if not data_entry:
+                    logging.warning("Found key, but no data entry: %s", chunk.reused_key)
+                    data_entry = DataEntry()
+                chunk.data_entry = data_entry
+
+            chunk.data_entry.data = db.Blob(data[chunk.index: chunk.index + MAX_ENTRY_LEN])
+            put_futures.append(db.put_async(chunk.data_entry))
+
+        for future in put_futures:
+            key = None
             try:
-                data_entry.put()
+                key = future.get_result()
+                self.new_data_keys.append(key)
             except Exception, err:
                 logging.error("Failed to save data store entry: %s", err)
-                if keys:
-                    self.delete_data(keys)
+                self.delete_data(keys)
                 return False
 
-            logging.info("Data saved: %s.", data_entry.key())
-            self.new_data_keys.append(data_entry.key())
-            if keys:
-                keys.pop(0)
-
-            start = start + MAX_ENTRY_LEN
-
         if keys:
             self.delete_data(keys)
 
@@ -148,16 +179,16 @@
             logging.warning("No data to load.")
             return None
 
+        data_futures = [(k, DataEntry.get_async(k)) for k in self._convert_blob_keys(self.data_keys)]
+
         data = []
-        for key in self._convert_blob_keys(self.data_keys):
-            logging.info("Loading data for key: %s.", key)
-            data_entry = DataEntry.get(key)
-            if not data_entry:
+        for key, future in data_futures:
+            result = future.get_result()
+            if not result:
                 logging.error("No data found for key: %s.", key)
                 return None
+            data.append(result)
 
-            data.append(data_entry.data)
-
-        self.data = "".join(data)
+        self.data = "".join([d.data for d in data])
 
         return self.data
diff --git a/Tools/TestResultServer/model/datastorefile_test.py b/Tools/TestResultServer/model/datastorefile_test.py
new file mode 100644
index 0000000..088cd94
--- /dev/null
+++ b/Tools/TestResultServer/model/datastorefile_test.py
@@ -0,0 +1,103 @@
+# Copyright (C) 2013 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+import datastorefile
+
+from google.appengine.ext import db
+from google.appengine.ext import testbed
+
+
+class DataStoreFileTest(unittest.TestCase):
+    def setUp(self):
+        self.testbed = testbed.Testbed()
+        self.testbed.activate()
+        self.testbed.init_datastore_v3_stub()
+
+        self.test_file = datastorefile.DataStoreFile()
+
+    def tearDown(self):
+        self.testbed.deactivate()
+
+    def testSaveLoadDeleteData(self):
+        test_data = 'x' * datastorefile.MAX_ENTRY_LEN * 3
+
+        self.assertTrue(self.test_file.save_data(test_data))
+        self.assertEqual(test_data, self.test_file.data)
+
+        self.assertEqual(test_data, self.test_file.load_data())
+        self.assertEqual(test_data, self.test_file.data)
+
+        self.test_file.delete_data()
+        self.assertFalse(self.test_file.load_data())
+
+    def testSaveData(self):
+        self.assertFalse(self.test_file.save_data(None))
+
+        too_big_data = 'x' * (datastorefile.MAX_DATA_ENTRY_PER_FILE * datastorefile.MAX_ENTRY_LEN + 1)
+        self.assertFalse(self.test_file.save_data(too_big_data))
+
+        test_data = 'x' * datastorefile.MAX_ENTRY_LEN * 5
+        self.assertTrue(self.test_file.save_data(test_data))
+        nchunks = datastorefile.DataEntry.all().count()
+        nkeys = len(self.test_file.data_keys) + len(self.test_file.new_data_keys)
+        self.assertEqual(nkeys, nchunks)
+
+    def testSaveDataKeyReuse(self):
+        test_data = 'x' * datastorefile.MAX_ENTRY_LEN * 5
+        self.assertTrue(self.test_file.save_data(test_data))
+        nchunks = datastorefile.DataEntry.all().count()
+        nkeys = len(self.test_file.data_keys) + len(self.test_file.new_data_keys)
+        self.assertEqual(nkeys, nchunks)
+
+        smaller_data = 'x' * datastorefile.MAX_ENTRY_LEN * 3
+        self.assertTrue(self.test_file.save_data(smaller_data))
+        nchunks = datastorefile.DataEntry.all().count()
+        nkeys_before = len(self.test_file.data_keys) + len(self.test_file.new_data_keys)
+        self.assertEqual(nkeys_before, nchunks)
+
+        self.assertTrue(self.test_file.save_data(smaller_data))
+        nchunks = datastorefile.DataEntry.all().count()
+        nkeys_after = len(self.test_file.data_keys) + len(self.test_file.new_data_keys)
+        self.assertEqual(nkeys_after, nchunks)
+        self.assertNotEqual(nkeys_before, nkeys_after)
+
+    def testGetChunkIndices(self):
+        data_length = datastorefile.MAX_ENTRY_LEN * 3
+        chunk_indices = self.test_file._get_chunk_indices(data_length)
+        self.assertEqual(len(chunk_indices), 3)
+        self.assertNotEqual(chunk_indices[0], chunk_indices[-1])
+
+        data_length += 1
+        chunk_indices = self.test_file._get_chunk_indices(data_length)
+        self.assertEqual(len(chunk_indices), 4)
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/Tools/TestResultServer/model/jsonresults.py b/Tools/TestResultServer/model/jsonresults.py
index aa8cc6f..35b56c9 100755
--- a/Tools/TestResultServer/model/jsonresults.py
+++ b/Tools/TestResultServer/model/jsonresults.py
@@ -367,20 +367,12 @@
     def _convert_full_results_format_to_aggregate(cls, full_results_format):
         num_total_tests = 0
         num_failing_tests = 0
-        fixableCounts = {}
         failures_by_type = full_results_format[FAILURES_BY_TYPE_KEY]
 
         # FIXME: full_results format has "FAIL" entries, but that is no longer a possible result type.
         if 'FAIL' in failures_by_type:
             del failures_by_type['FAIL']
 
-        for failure_type in failures_by_type:
-            count = failures_by_type[failure_type]
-            num_total_tests += count
-            if failure_type != PASS_STRING:
-                num_failing_tests += count
-            fixableCounts[FAILURE_TO_CHAR[failure_type]] = count
-
         tests = {}
         cls._populate_tests_from_full_results(full_results_format[TESTS_KEY], tests)
 
diff --git a/Tools/TestResultServer/model/testfile.py b/Tools/TestResultServer/model/testfile.py
index bada12f..a3c3932 100644
--- a/Tools/TestResultServer/model/testfile.py
+++ b/Tools/TestResultServer/model/testfile.py
@@ -31,7 +31,7 @@
 
 from google.appengine.ext import db
 
-from model.datastorefile import DataStoreFile
+from datastorefile import DataStoreFile
 
 
 class TestFile(DataStoreFile):
diff --git a/Tools/TestResultServer/model/testfile_test.py b/Tools/TestResultServer/model/testfile_test.py
new file mode 100644
index 0000000..2745030
--- /dev/null
+++ b/Tools/TestResultServer/model/testfile_test.py
@@ -0,0 +1,138 @@
+# Copyright (C) 2013 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import datastorefile
+import time
+import testfile
+import unittest
+
+from datetime import datetime
+
+from google.appengine.datastore import datastore_stub_util
+from google.appengine.ext import db
+from google.appengine.ext import testbed
+
+TEST_DATA = [
+    # master, builder, test_type, name, data; order matters.
+    ['ChromiumWebKit', 'WebKit Linux', 'layout-tests', 'webkit_linux_results.json', 'a'],
+    ['ChromiumWebKit', 'WebKit Win7', 'layout-tests', 'webkit_win7_results.json', 'b'],
+    ['ChromiumWin', 'Win7 (Dbg)', 'unittests', 'win7_dbg_unittests.json', 'c'],
+]
+
+
+class DataStoreFileTest(unittest.TestCase):
+    def setUp(self):
+        self.testbed = testbed.Testbed()
+        self.testbed.activate()
+
+        self.policy = datastore_stub_util.PseudoRandomHRConsistencyPolicy(probability=1)
+        self.testbed.init_datastore_v3_stub(consistency_policy=self.policy)
+
+        test_file = testfile.TestFile()
+
+    def _getAllFiles(self):
+        return testfile.TestFile.get_files(None, None, None, None, limit=None)
+
+    def _assertFileMatchesData(self, expected_data, actual_file):
+        actual_fields = [actual_file.master, actual_file.builder, actual_file.test_type, actual_file.name, actual_file.data]
+        self.assertEqual(expected_data, actual_fields, 'Mismatch between expected fields in file and actual file.')
+
+    def _addFileAndAssert(self, file_data):
+        _, code = testfile.TestFile.add_file(*file_data)
+        self.assertEqual(200, code, 'Unable to create file with data: %s' % file_data)
+
+    def testSaveFile(self):
+        file_data = TEST_DATA[0][:]
+        self._addFileAndAssert(file_data)
+
+        files = self._getAllFiles()
+        self.assertEqual(1, len(files))
+        self._assertFileMatchesData(file_data, files[0])
+
+        _, code = testfile.TestFile.save_file(files[0], None)
+        self.assertEqual(500, code, 'Expected empty file not to have been saved.')
+
+        files = self._getAllFiles()
+        self.assertEqual(1, len(files), 'Expected exactly one file to be present.')
+        self._assertFileMatchesData(file_data, files[0])
+
+    def testAddAndGetFile(self):
+        for file_data in TEST_DATA:
+            self._addFileAndAssert(file_data)
+
+        files = self._getAllFiles()
+        self.assertEqual(len(TEST_DATA), len(files), 'Mismatch between number of test records and number of files in db.')
+
+        for f in files:
+            fields = [f.master, f.builder, f.test_type, f.name, f.data]
+            self.assertIn(fields, TEST_DATA)
+
+    def testOverwriteOrAddFile(self):
+        file_data = TEST_DATA[0][:]
+        _, code = testfile.TestFile.overwrite_or_add_file(*file_data)
+        self.assertEqual(200, code, 'Unable to create file with data: %s' % file_data)
+        files = self._getAllFiles()
+        self.assertEqual(1, len(files))
+
+        _, code = testfile.TestFile.overwrite_or_add_file(*file_data)
+        self.assertEqual(200, code, 'Unable to overwrite or create file with data: %s' % file_data)
+        files = self._getAllFiles()
+        self.assertEqual(1, len(files))
+
+        file_data = TEST_DATA[1][:]
+        _, code = testfile.TestFile.overwrite_or_add_file(*file_data)
+        self.assertEqual(200, code, 'Unable to overwrite or create file with different data: %s' % file_data)
+        files = self._getAllFiles()
+        self.assertEqual(2, len(files))
+
+    def testDeleteFile(self):
+        file_contents = 'x' * datastorefile.MAX_ENTRY_LEN * 2
+        file_data = ['ChromiumWebKit', 'WebKit Linux', 'layout-tests', 'results.json', file_contents]
+        self._addFileAndAssert(file_data)
+
+        ndeleted = testfile.TestFile.delete_file(None, 'ChromiumWebKit', 'WebKit Linux', 'layout-tests', 'results.json', None, None)
+        self.assertEqual(1, ndeleted, 'Expected exactly one file to have been deleted.')
+
+        nfiles = testfile.TestFile.all().count()
+        self.assertEqual(0, nfiles, 'Expected exactly zero files to be present in db.')
+
+    def testDeleteAll(self):
+        for file_data in TEST_DATA:
+            self._addFileAndAssert(file_data)
+
+        files = self._getAllFiles()
+        self.assertEqual(len(TEST_DATA), len(files))
+
+        files[0]._delete_all()
+
+        files = self._getAllFiles()
+        self.assertEqual(len(TEST_DATA) - 1, len(files))
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/Tools/gdb/webkit.py b/Tools/gdb/webkit.py
index f0d3207..f805c01 100644
--- a/Tools/gdb/webkit.py
+++ b/Tools/gdb/webkit.py
@@ -261,12 +261,12 @@
         self.val = val
 
     def children(self):
-        start = self.val['m_buffer']['m_buffer']
+        start = self.val['m_buffer']
         return self.Iterator(start, start + self.val['m_size'])
 
     def to_string(self):
         return ('%s of length %d, capacity %d'
-                % ('WTF::Vector', self.val['m_size'], self.val['m_buffer']['m_capacity']))
+                % ('WTF::Vector', self.val['m_size'], self.val['m_capacity']))
 
     def display_hint(self):
         return 'array'