Merge from Chromium at DEPS revision r202854

This commit was generated by merge_to_master.py.

Change-Id: I5b225b76b2157384357897051ee5866dd4bb23a8
diff --git a/Tools/DumpRenderTree/DumpRenderTree.gyp/DumpRenderTree.gyp b/Tools/DumpRenderTree/DumpRenderTree.gyp/DumpRenderTree.gyp
index 9825a51..c219cb1 100644
--- a/Tools/DumpRenderTree/DumpRenderTree.gyp/DumpRenderTree.gyp
+++ b/Tools/DumpRenderTree/DumpRenderTree.gyp/DumpRenderTree.gyp
@@ -83,15 +83,16 @@
             'include_dirs': [
                 '<(DEPTH)',
                 '<(source_dir)/WebKit/chromium/public',
-                '<(DEPTH)',
                 '../chromium/TestRunner/public',
                 '../chromium/TestRunner/src',
                 '../../../Source',
+                '../../..',
             ],
             'direct_dependent_settings': {
                 'include_dirs': [
                     '../chromium/TestRunner/public',
                     '../../../Source',
+                    '../../..',
                 ],
             },
             'sources': [
@@ -234,7 +235,6 @@
                 'DumpRenderTree_resources',
                 '<(source_dir)/devtools/devtools.gyp:devtools_frontend_resources',
                 '<(source_dir)/WebKit/chromium/WebKit.gyp:webkit',
-                '<(source_dir)/WebKit/chromium/WebKit.gyp:webkit_wtf_support',
                 '<(source_dir)/wtf/wtf.gyp:wtf',
                 '<(DEPTH)/base/base.gyp:test_support_base',
                 '<(DEPTH)/build/temp_gyp/googleurl.gyp:googleurl',
@@ -247,7 +247,6 @@
                 '<(DEPTH)',
                 '<(source_dir)/WebKit/chromium/public',
                 '<(tools_dir)/DumpRenderTree',
-                '<(DEPTH)',
             ],
             'defines': [
                 # Technically not a unit test but require functions available only to
diff --git a/Tools/DumpRenderTree/DumpRenderTree.gypi b/Tools/DumpRenderTree/DumpRenderTree.gypi
index 7ce8140..f22c858 100644
--- a/Tools/DumpRenderTree/DumpRenderTree.gypi
+++ b/Tools/DumpRenderTree/DumpRenderTree.gypi
@@ -51,6 +51,8 @@
             'chromium/TestRunner/src/GamepadController.h',
             'chromium/TestRunner/src/KeyCodeMapping.cpp',
             'chromium/TestRunner/src/KeyCodeMapping.h',
+            'chromium/TestRunner/src/MockColorChooser.cpp',
+            'chromium/TestRunner/src/MockColorChooser.h',
             'chromium/TestRunner/src/MockConstraints.cpp',
             'chromium/TestRunner/src/MockConstraints.h',
             'chromium/TestRunner/src/MockGrammarCheck.cpp',
diff --git a/Tools/DumpRenderTree/TestNetscapePlugIn/PluginObject.cpp b/Tools/DumpRenderTree/TestNetscapePlugIn/PluginObject.cpp
index 7563184..1e7464f 100644
--- a/Tools/DumpRenderTree/TestNetscapePlugIn/PluginObject.cpp
+++ b/Tools/DumpRenderTree/TestNetscapePlugIn/PluginObject.cpp
@@ -799,10 +799,10 @@
     if (!tempFile)
         return false;
 
-    if (!fwrite(contentsString.UTF8Characters, contentsString.UTF8Length, 1, tempFile))
-        return false;
-
+    size_t written = fwrite(contentsString.UTF8Characters, contentsString.UTF8Length, 1, tempFile);
     fclose(tempFile);
+    if (!written)
+        return false;
 
     NPError error = browser->posturl(obj->npp, url, target, pathString.UTF8Length, path, TRUE);
 
diff --git a/Tools/DumpRenderTree/chromium/DumpRenderTree.cpp b/Tools/DumpRenderTree/chromium/DumpRenderTree.cpp
index 8038040..9913676 100644
--- a/Tools/DumpRenderTree/chromium/DumpRenderTree.cpp
+++ b/Tools/DumpRenderTree/chromium/DumpRenderTree.cpp
@@ -62,6 +62,7 @@
 static const char optionEnableAcceleratedCompositingForVideo[] = "--enable-accelerated-video";
 static const char optionEnableAcceleratedFixedPosition[] = "--enable-accelerated-fixed-position";
 static const char optionEnableAcceleratedOverflowScroll[] = "--enable-accelerated-overflow-scroll";
+static const char optionEnableAcceleratedTransition[] = "--enable-accelerated-transition";
 static const char optionEnablePerTilePainting[] = "--enable-per-tile-painting";
 static const char optionEnableDeferredImageDecoding[] = "--enable-deferred-image-decoding";
 static const char optionDisableThreadedHTMLParser[] = "--disable-threaded-html-parser";
@@ -132,6 +133,7 @@
     bool acceleratedCompositingForVideoEnabled = false;
     bool acceleratedCompositingForFixedPositionEnabled = false;
     bool acceleratedCompositingForOverflowScrollEnabled = false;
+    bool acceleratedCompositingForTransitionEnabled = false;
     bool softwareCompositingEnabled = false;
     bool threadedCompositingEnabled = false;
     bool forceCompositingMode = false;
@@ -171,6 +173,8 @@
             acceleratedCompositingForFixedPositionEnabled = true;
         else if (argument == optionEnableAcceleratedOverflowScroll)
             acceleratedCompositingForOverflowScrollEnabled = true;
+        else if (argument == optionEnableAcceleratedTransition)
+            acceleratedCompositingForTransitionEnabled = true;
         else if (argument == optionEnableSoftwareCompositing)
             softwareCompositingEnabled = true;
         else if (argument == optionEnableThreadedCompositing)
@@ -219,6 +223,7 @@
         shell.setAcceleratedCompositingForVideoEnabled(acceleratedCompositingForVideoEnabled);
         shell.setAcceleratedCompositingForFixedPositionEnabled(acceleratedCompositingForFixedPositionEnabled);
         shell.setAcceleratedCompositingForOverflowScrollEnabled(acceleratedCompositingForOverflowScrollEnabled);
+        shell.setAcceleratedCompositingForTransitionEnabled(acceleratedCompositingForTransitionEnabled);
         shell.setSoftwareCompositingEnabled(softwareCompositingEnabled);
         shell.setThreadedCompositingEnabled(threadedCompositingEnabled);
         shell.setForceCompositingMode(forceCompositingMode);
diff --git a/Tools/DumpRenderTree/chromium/TestRunner/public/WebPreferences.h b/Tools/DumpRenderTree/chromium/TestRunner/public/WebPreferences.h
index 74eb547..8431642 100644
--- a/Tools/DumpRenderTree/chromium/TestRunner/public/WebPreferences.h
+++ b/Tools/DumpRenderTree/chromium/TestRunner/public/WebPreferences.h
@@ -31,10 +31,10 @@
 #ifndef WebPreferences_h
 #define WebPreferences_h
 
-#include "Platform/chromium/public/WebString.h"
-#include "Platform/chromium/public/WebURL.h"
 #include "WebKit/chromium/public/WebSettings.h"
 #include "WebTestCommon.h"
+#include "public/platform/WebString.h"
+#include "public/platform/WebURL.h"
 
 namespace WebKit {
 class WebView;
@@ -67,7 +67,6 @@
     bool experimentalCSSRegionsEnabled;
     bool experimentalCSSExclusionsEnabled;
     bool experimentalCSSGridLayoutEnabled;
-    bool experimentalWebSocketEnabled;
     bool javaEnabled;
     bool javaScriptCanAccessClipboard;
     bool javaScriptCanOpenWindowsAutomatically;
@@ -89,6 +88,7 @@
     bool acceleratedCompositingForVideoEnabled;
     bool acceleratedCompositingForFixedPositionEnabled;
     bool acceleratedCompositingForOverflowScrollEnabled;
+    bool acceleratedCompositingForTransitionEnabled;
     bool acceleratedCompositingEnabled;
     bool forceCompositingMode;
     bool threadedHTMLParser;
diff --git a/Tools/DumpRenderTree/chromium/TestRunner/public/WebTestDelegate.h b/Tools/DumpRenderTree/chromium/TestRunner/public/WebTestDelegate.h
index 1897f55..d86024a 100644
--- a/Tools/DumpRenderTree/chromium/TestRunner/public/WebTestDelegate.h
+++ b/Tools/DumpRenderTree/chromium/TestRunner/public/WebTestDelegate.h
@@ -31,9 +31,9 @@
 #ifndef WebTestDelegate_h
 #define WebTestDelegate_h
 
-#include "Platform/chromium/public/WebString.h"
-#include "Platform/chromium/public/WebURL.h"
-#include "Platform/chromium/public/WebVector.h"
+#include "public/platform/WebString.h"
+#include "public/platform/WebURL.h"
+#include "public/platform/WebVector.h"
 #include <string>
 
 #define WEBTESTRUNNER_NEW_HISTORY_CAPTURE
@@ -45,6 +45,7 @@
 class WebMediaPlayer;
 class WebMediaPlayerClient;
 struct WebRect;
+struct WebSize;
 struct WebURLError;
 }
 
@@ -99,6 +100,10 @@
     // Resizes the WebView to the given size.
     virtual void setClientWindowRect(const WebKit::WebRect&) = 0;
 
+    // Controls auto resize mode.
+    virtual void enableAutoResizeMode(const WebKit::WebSize& minSize, const WebKit::WebSize& maxSize) { }
+    virtual void disableAutoResizeMode(const WebKit::WebSize&) { }
+
     // Opens and closes the inspector.
     virtual void showDevTools() = 0;
     virtual void closeDevTools() = 0;
diff --git a/Tools/DumpRenderTree/chromium/TestRunner/public/WebTestProxy.h b/Tools/DumpRenderTree/chromium/TestRunner/public/WebTestProxy.h
index e8f0c80..b9b583c 100644
--- a/Tools/DumpRenderTree/chromium/TestRunner/public/WebTestProxy.h
+++ b/Tools/DumpRenderTree/chromium/TestRunner/public/WebTestProxy.h
@@ -31,9 +31,6 @@
 #ifndef WebTestProxy_h
 #define WebTestProxy_h
 
-#include "Platform/chromium/public/WebRect.h"
-#include "Platform/chromium/public/WebURLError.h"
-#include "Platform/chromium/public/WebURLRequest.h"
 #include "WebKit/chromium/public/WebAccessibilityNotification.h"
 #include "WebKit/chromium/public/WebDOMMessageEvent.h"
 #include "WebKit/chromium/public/WebDragOperation.h"
@@ -45,6 +42,9 @@
 #include "WebKit/chromium/public/WebTextAffinity.h"
 #include "WebKit/chromium/public/WebTextDirection.h"
 #include "WebTestCommon.h"
+#include "public/platform/WebRect.h"
+#include "public/platform/WebURLError.h"
+#include "public/platform/WebURLRequest.h"
 #include <map>
 #include <memory>
 #include <string>
@@ -52,6 +52,8 @@
 namespace WebKit {
 class WebAccessibilityObject;
 class WebCachedURLRequest;
+class WebColorChooser;
+class WebColorChooserClient;
 class WebDataSource;
 class WebDeviceOrientationClient;
 class WebDeviceOrientationClientMock;
@@ -82,6 +84,7 @@
 struct WebPoint;
 struct WebSize;
 struct WebWindowFeatures;
+typedef unsigned WebColor;
 }
 
 class SkCanvas;
@@ -105,6 +108,7 @@
     void reset();
 
     WebKit::WebSpellCheckClient *spellCheckClient() const;
+    WebKit::WebColorChooser* createColorChooser(WebKit::WebColorChooserClient*, const WebKit::WebColor&);
 
     std::string captureTree(bool debugRenderTree);
     SkCanvas* capturePixels();
@@ -114,6 +118,10 @@
     // FIXME: Make this private again.
     void scheduleComposite();
 
+    void didOpenChooser();
+    void didCloseChooser();
+    bool isChooserShown();
+
 #if WEBTESTRUNNER_IMPLEMENTATION
     void display();
     void displayInvalidatedRegion();
@@ -185,7 +193,6 @@
     void didRunInsecureContent(WebKit::WebFrame*, const WebKit::WebSecurityOrigin&, const WebKit::WebURL& insecureURL);
     void didDetectXSS(WebKit::WebFrame*, const WebKit::WebURL& insecureURL, bool didBlockEntirePage);
     void willRequestResource(WebKit::WebFrame*, const WebKit::WebCachedURLRequest&);
-    bool canHandleRequest(WebKit::WebFrame*, const WebKit::WebURLRequest&);
     WebKit::WebURLError cannotHandleRequestError(WebKit::WebFrame*, const WebKit::WebURLRequest&);
     void didCreateDataSource(WebKit::WebFrame*, WebKit::WebDataSource*);
     void willSendRequest(WebKit::WebFrame*, unsigned identifier, WebKit::WebURLRequest&, const WebKit::WebURLResponse& redirectResponse);
@@ -224,6 +231,7 @@
     std::map<unsigned, WebKit::WebURLRequest> m_requestMap;
 
     bool m_logConsoleOutput;
+    int m_chooserCount;
 
     std::auto_ptr<WebKit::WebGeolocationClientMock> m_geolocationClient;
     std::auto_ptr<WebKit::WebDeviceOrientationClientMock> m_deviceOrientationClient;
@@ -522,12 +530,6 @@
         WebTestProxyBase::willRequestResource(frame, request);
         Base::willRequestResource(frame, request);
     }
-    virtual bool canHandleRequest(WebKit::WebFrame* frame, const WebKit::WebURLRequest& request)
-    {
-        if (!WebTestProxyBase::canHandleRequest(frame, request))
-            return false;
-        return Base::canHandleRequest(frame, request);
-    }
     virtual WebKit::WebURLError cannotHandleRequestError(WebKit::WebFrame* frame, const WebKit::WebURLRequest& request)
     {
         return WebTestProxyBase::cannotHandleRequestError(frame, request);
@@ -604,6 +606,10 @@
             return true;
         return Base::willCheckAndDispatchMessageEvent(sourceFrame, targetFrame, target, event);
     }
+    virtual WebKit::WebColorChooser* createColorChooser(WebKit::WebColorChooserClient* client, const WebKit::WebColor& color)
+    {
+        return WebTestProxyBase::createColorChooser(client, color);
+    }
 };
 
 }
diff --git a/Tools/DumpRenderTree/chromium/TestRunner/src/CppBoundClass.cpp b/Tools/DumpRenderTree/chromium/TestRunner/src/CppBoundClass.cpp
index 49e6149..7211eea 100644
--- a/Tools/DumpRenderTree/chromium/TestRunner/src/CppBoundClass.cpp
+++ b/Tools/DumpRenderTree/chromium/TestRunner/src/CppBoundClass.cpp
@@ -353,7 +353,7 @@
     // BindToWindowObject will take its own reference to the NPObject, and clean
     // up after itself. It will also (indirectly) register the object with V8,
     // so we must remember this so we can unregister it when we're destroyed.
-    frame->bindToWindowObject(classname, NPVARIANT_TO_OBJECT(*getAsCppVariant()));
+    frame->bindToWindowObject(classname, NPVARIANT_TO_OBJECT(*getAsCppVariant()), 0);
     m_boundToFrame = true;
 }
 
diff --git a/Tools/DumpRenderTree/chromium/TestRunner/src/EventSender.cpp b/Tools/DumpRenderTree/chromium/TestRunner/src/EventSender.cpp
index 0686fef..cf579ca 100644
--- a/Tools/DumpRenderTree/chromium/TestRunner/src/EventSender.cpp
+++ b/Tools/DumpRenderTree/chromium/TestRunner/src/EventSender.cpp
@@ -304,6 +304,8 @@
     bindMethod("zoomPageOut", &EventSender::zoomPageOut);
     bindMethod("scalePageBy", &EventSender::scalePageBy);
 
+    bindProperty("forceLayoutOnEvents", &forceLayoutOnEvents);
+
     // When set to true (the default value), we batch mouse move and mouse up
     // events so we can simulate drag & drop.
     bindProperty("dragMode", &dragMode);
@@ -337,6 +339,7 @@
     currentDragEffectsAllowed = WebKit::WebDragOperationNone;
     pressedButton = WebMouseEvent::ButtonNone;
     dragMode.set(true);
+    forceLayoutOnEvents.set(true);
 #ifdef WIN32
     wmKeyDown.set(WM_KEYDOWN);
     wmKeyUp.set(WM_KEYUP);
@@ -424,6 +427,9 @@
     if (result) // Could be 0 if invoked asynchronously.
         result->setNull();
 
+    if (shouldForceLayoutOnEvents())
+        webview()->layout();
+
     int buttonNumber = getButtonNumberFromSingleArg(arguments);
     WEBKIT_ASSERT(buttonNumber != -1);
 
@@ -444,6 +450,9 @@
     if (result) // Could be 0 if invoked asynchronously.
         result->setNull();
 
+    if (shouldForceLayoutOnEvents())
+        webview()->layout();
+
     int buttonNumber = getButtonNumberFromSingleArg(arguments);
     WEBKIT_ASSERT(buttonNumber != -1);
 
@@ -502,6 +511,8 @@
 
     if (arguments.size() < 2 || !arguments[0].isNumber() || !arguments[1].isNumber())
         return;
+    if (shouldForceLayoutOnEvents())
+        webview()->layout();
 
     WebPoint mousePos(arguments[0].toInt32(), arguments[1].toInt32());
 
@@ -605,7 +616,7 @@
         if (!code) {
             WebString webCodeStr = WebString::fromUTF8(codeStr.data(), codeStr.size());
             WEBKIT_ASSERT(webCodeStr.length() == 1);
-            text = code = webCodeStr.data()[0];
+            text = code = webCodeStr.at(0);
             needsShiftKeyModifier = needsShiftModifier(code);
             if ((code & 0xFF) >= 'a' && (code & 0xFF) <= 'z')
                 code -= 'a' - 'A';
@@ -654,7 +665,8 @@
     eventUp.type = WebInputEvent::KeyUp;
     // EventSender.m forces a layout here, with at least one
     // test (fast/forms/focus-control-to-page.html) relying on this.
-    webview()->layout();
+    if (shouldForceLayoutOnEvents())
+        webview()->layout();
 
     // In the browser, if a keyboard event corresponds to an editor command,
     // the command will be dispatched to the renderer just before dispatching
@@ -700,7 +712,8 @@
         if (msg == WM_DEADCHAR || msg == WM_SYSDEADCHAR)
             return;
 
-        webview()->layout();
+        if (shouldForceLayoutOnEvents())
+            webview()->layout();
 
         unsigned long lparam = static_cast<unsigned long>(arguments[2].toDouble());
         webview()->handleInputEvent(WebInputEventFactory::keyboardEvent(0, msg, arguments[1].toInt32(), lparam));
@@ -854,7 +867,8 @@
 
 void EventSender::contextClick(const CppArgumentList& arguments, CppVariant* result)
 {
-    webview()->layout();
+    if (shouldForceLayoutOnEvents())
+        webview()->layout();
 
     updateClickCountForButton(WebMouseEvent::ButtonRight);
 
@@ -1035,7 +1049,8 @@
 void EventSender::sendCurrentTouchEvent(const WebInputEvent::Type type)
 {
     WEBKIT_ASSERT(static_cast<unsigned>(WebTouchEvent::touchesLengthCap) > touchPoints.size());
-    webview()->layout();
+    if (shouldForceLayoutOnEvents())
+        webview()->layout();
 
     WebTouchEvent touchEvent;
     touchEvent.type = type;
@@ -1072,6 +1087,12 @@
     if (arguments.size() < 2 || !arguments[0].isNumber() || !arguments[1].isNumber())
         return;
 
+    // Force a layout here just to make sure every position has been
+    // determined before we send events (as well as all the other methods
+    // that send an event do).
+    if (shouldForceLayoutOnEvents())
+        webview()->layout();
+
     int horizontal = arguments[0].toInt32();
     int vertical = arguments[1].toInt32();
     int paged = false;
@@ -1285,6 +1306,10 @@
     event.globalX = event.x;
     event.globalY = event.y;
     event.timeStampSeconds = getCurrentEventTimeSec(m_delegate);
+
+    if (shouldForceLayoutOnEvents())
+        webview()->layout();
+
     webview()->handleInputEvent(event);
 
     // Long press might start a drag drop session. Complete it if so.
@@ -1302,6 +1327,10 @@
     WebGestureEvent event;
     event.type = WebInputEvent::GestureFlingCancel;
     event.timeStampSeconds = getCurrentEventTimeSec(m_delegate);
+
+    if (shouldForceLayoutOnEvents())
+        webview()->layout();
+
     webview()->handleInputEvent(event);
 }
 
@@ -1326,6 +1355,10 @@
     event.data.flingStart.velocityX = static_cast<float>(arguments[2].toDouble());
     event.data.flingStart.velocityY = static_cast<float>(arguments[3].toDouble());
     event.timeStampSeconds = getCurrentEventTimeSec(m_delegate);
+
+    if (shouldForceLayoutOnEvents())
+        webview()->layout();
+
     webview()->handleInputEvent(event);
 }
 
diff --git a/Tools/DumpRenderTree/chromium/TestRunner/src/EventSender.h b/Tools/DumpRenderTree/chromium/TestRunner/src/EventSender.h
index 9b36b5f..009f595 100644
--- a/Tools/DumpRenderTree/chromium/TestRunner/src/EventSender.h
+++ b/Tools/DumpRenderTree/chromium/TestRunner/src/EventSender.h
@@ -122,6 +122,10 @@
     void gestureTwoFingerTap(const CppArgumentList&, CppVariant*);
     void gestureEvent(WebKit::WebInputEvent::Type, const CppArgumentList&);
 
+    // Setting this to false makes EventSender not force layout() calls.
+    // This makes it possible to test the standard WebCore event dispatch.
+    CppVariant forceLayoutOnEvents;
+
     // Unimplemented stubs
     void enableDOMUIEventLogging(const CppArgumentList&, CppVariant*);
     void fireKeyboardEventsToElement(const CppArgumentList&, CppVariant*);
@@ -147,6 +151,8 @@
     // Returns true if dragMode is true.
     bool isDragMode() { return dragMode.isBool() && dragMode.toBoolean(); }
 
+    bool shouldForceLayoutOnEvents() const { return forceLayoutOnEvents.isBool() && forceLayoutOnEvents.toBoolean(); }
+
     // Sometimes we queue up mouse move and mouse up events for drag drop
     // handling purposes. These methods dispatch the event.
     void doMouseMove(const WebKit::WebMouseEvent&);
diff --git a/Tools/DumpRenderTree/chromium/TestRunner/src/MockColorChooser.cpp b/Tools/DumpRenderTree/chromium/TestRunner/src/MockColorChooser.cpp
new file mode 100644
index 0000000..0147fb7
--- /dev/null
+++ b/Tools/DumpRenderTree/chromium/TestRunner/src/MockColorChooser.cpp
@@ -0,0 +1,85 @@
+/*
+ * Copyright (C) 2013 Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *     * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "MockColorChooser.h"
+
+#include "WebTestDelegate.h"
+#include "WebTestProxy.h"
+
+using namespace WebKit;
+using namespace std;
+
+namespace WebTestRunner {
+
+namespace {
+class HostMethodTask : public WebMethodTask<MockColorChooser> {
+public:
+    typedef void (MockColorChooser::*CallbackMethodType)();
+    HostMethodTask(MockColorChooser* object, CallbackMethodType callback)
+        : WebMethodTask<MockColorChooser>(object)
+        , m_callback(callback)
+    { }
+
+    virtual void runIfValid() { (m_object->*m_callback)(); }
+
+private:
+    CallbackMethodType m_callback;
+};
+}
+
+MockColorChooser::MockColorChooser(WebKit::WebColorChooserClient* client, WebTestDelegate* delegate, WebTestProxyBase* proxy)
+    : m_client(client)
+    , m_delegate(delegate)
+    , m_proxy(proxy)
+{
+    m_proxy->didOpenChooser();
+}
+
+MockColorChooser::~MockColorChooser()
+{
+    m_proxy->didCloseChooser();
+}
+
+void MockColorChooser::setSelectedColor(const WebKit::WebColor)
+{
+}
+
+void MockColorChooser::endChooser()
+{
+    m_delegate->postDelayedTask(new HostMethodTask(this, &MockColorChooser::invokeDidEndChooser), 0);
+}
+
+void MockColorChooser::invokeDidEndChooser()
+{
+    m_client->didEndChooser();
+}
+
+}
diff --git a/Tools/DumpRenderTree/chromium/TestRunner/src/MockColorChooser.h b/Tools/DumpRenderTree/chromium/TestRunner/src/MockColorChooser.h
new file mode 100644
index 0000000..fd1125d
--- /dev/null
+++ b/Tools/DumpRenderTree/chromium/TestRunner/src/MockColorChooser.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2013 Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *     * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef MockColorChooser_h
+#define MockColorChooser_h
+
+#include "TestCommon.h"
+#include "WebColorChooser.h"
+#include "WebColorChooserClient.h"
+#include "WebTask.h"
+
+namespace WebTestRunner {
+
+class WebTestDelegate;
+class WebTestProxyBase;
+class MockColorChooser : public WebKit::WebColorChooser {
+public:
+    MockColorChooser(WebKit::WebColorChooserClient*, WebTestDelegate*, WebTestProxyBase*);
+    virtual ~MockColorChooser();
+
+    // WebKit::WebColorChooser implementation.
+    virtual void setSelectedColor(const WebKit::WebColor) OVERRIDE;
+    virtual void endChooser() OVERRIDE;
+
+    void invokeDidEndChooser();
+    WebTaskList* taskList() { return &m_taskList; }
+private:
+    WebKit::WebColorChooserClient* m_client;
+    WebTestDelegate* m_delegate;
+    WebTestProxyBase* m_proxy;
+    WebTaskList m_taskList;
+};
+
+}
+
+#endif // MockColorChooser_h
diff --git a/Tools/DumpRenderTree/chromium/TestRunner/src/MockGrammarCheck.cpp b/Tools/DumpRenderTree/chromium/TestRunner/src/MockGrammarCheck.cpp
index 3255012..1d3f628 100644
--- a/Tools/DumpRenderTree/chromium/TestRunner/src/MockGrammarCheck.cpp
+++ b/Tools/DumpRenderTree/chromium/TestRunner/src/MockGrammarCheck.cpp
@@ -31,10 +31,10 @@
 #include "config.h"
 #include "MockGrammarCheck.h"
 
-#include "Platform/chromium/public/WebCString.h"
-#include "Platform/chromium/public/WebString.h"
 #include "TestCommon.h"
 #include "WebTextCheckingResult.h"
+#include "public/platform/WebCString.h"
+#include "public/platform/WebString.h"
 #include <algorithm>
 
 using namespace WebKit;
diff --git a/Tools/DumpRenderTree/chromium/TestRunner/src/MockSpellCheck.h b/Tools/DumpRenderTree/chromium/TestRunner/src/MockSpellCheck.h
index 849fb19..e0de937 100644
--- a/Tools/DumpRenderTree/chromium/TestRunner/src/MockSpellCheck.h
+++ b/Tools/DumpRenderTree/chromium/TestRunner/src/MockSpellCheck.h
@@ -31,8 +31,8 @@
 #ifndef MockSpellCheck_h
 #define MockSpellCheck_h
 
-#include "Platform/chromium/public/WebString.h"
-#include "Platform/chromium/public/WebVector.h"
+#include "public/platform/WebString.h"
+#include "public/platform/WebVector.h"
 #include <vector>
 
 namespace WebTestRunner {
diff --git a/Tools/DumpRenderTree/chromium/TestRunner/src/SpellCheckClient.cpp b/Tools/DumpRenderTree/chromium/TestRunner/src/SpellCheckClient.cpp
index 6880053..5ef3294 100644
--- a/Tools/DumpRenderTree/chromium/TestRunner/src/SpellCheckClient.cpp
+++ b/Tools/DumpRenderTree/chromium/TestRunner/src/SpellCheckClient.cpp
@@ -85,12 +85,11 @@
     vector<WebTextCheckingResult> results;
     if (mask & WebTextCheckingTypeSpelling) {
         size_t offset = 0;
-        size_t length = text.length();
-        const WebUChar* data = text.data();
-        while (offset < length) {
+        string16 data = text;
+        while (offset < data.length()) {
             int misspelledPosition = 0;
             int misspelledLength = 0;
-            m_spellcheck.spellCheckWord(WebString(&data[offset], length - offset), &misspelledPosition, &misspelledLength);
+            m_spellcheck.spellCheckWord(data.substr(offset), &misspelledPosition, &misspelledLength);
             if (!misspelledLength)
                 break;
             WebTextCheckingResult result;
diff --git a/Tools/DumpRenderTree/chromium/TestRunner/src/TestRunner.cpp b/Tools/DumpRenderTree/chromium/TestRunner/src/TestRunner.cpp
index 9d38dd9..69ffce3 100644
--- a/Tools/DumpRenderTree/chromium/TestRunner/src/TestRunner.cpp
+++ b/Tools/DumpRenderTree/chromium/TestRunner/src/TestRunner.cpp
@@ -289,6 +289,7 @@
     bindMethod("wasMockSpeechRecognitionAborted", &TestRunner::wasMockSpeechRecognitionAborted);
     bindMethod("display", &TestRunner::display);
     bindMethod("displayInvalidatedRegion", &TestRunner::displayInvalidatedRegion);
+    bindMethod("isChooserShown", &TestRunner::isChooserShown);
 
     // Properties.
     bindProperty("globalFlag", &m_globalFlag);
@@ -359,7 +360,6 @@
         m_webView->setSelectionColors(0xff1e90ff, 0xff000000, 0xffc8c8c8, 0xff323232);
 #endif
         m_webView->removeAllUserContent();
-        m_webView->disableAutoResizeMode();
     }
     m_topLoadingFrame = 0;
     m_waitUntilDone = false;
@@ -378,6 +378,7 @@
         m_delegate->setDeviceScaleFactor(1);
         m_delegate->setAcceptAllCookies(false);
         m_delegate->setLocale("");
+        m_delegate->disableAutoResizeMode(WebSize());
     }
 
     m_dumpEditingCallbacks = false;
@@ -713,7 +714,7 @@
     return m_pointerLocked;
 }
 
-void TestRunner::setToolTipText(WebKit::WebString text)
+void TestRunner::setToolTipText(const WebKit::WebString& text)
 {
     m_tooltipText.set(text.utf8());
 }
@@ -1469,7 +1470,7 @@
     int maxHeight = cppVariantToInt32(arguments[3]);
     WebKit::WebSize maxSize(maxWidth, maxHeight);
 
-    m_webView->enableAutoResizeMode(minSize, maxSize);
+    m_delegate->enableAutoResizeMode(minSize, maxSize);
     result->set(true);
 }
 
@@ -1483,9 +1484,7 @@
     int newHeight = cppVariantToInt32(arguments[1]);
     WebKit::WebSize newSize(newWidth, newHeight);
 
-    m_delegate->setClientWindowRect(WebRect(0, 0, newSize.width, newSize.height));
-    m_webView->disableAutoResizeMode();
-    m_webView->resize(newSize);
+    m_delegate->disableAutoResizeMode(newSize);
     result->set(true);
 }
 
@@ -1619,8 +1618,6 @@
         prefs->experimentalCSSRegionsEnabled = cppVariantToBool(value);
     else if (key == "WebKitCSSGridLayoutEnabled")
         prefs->experimentalCSSGridLayoutEnabled = cppVariantToBool(value);
-    else if (key == "WebKitExperimentalWebSocketEnabled")
-        prefs->experimentalWebSocketEnabled = cppVariantToBool(value);
     else if (key == "WebKitHyperlinkAuditingEnabled")
         prefs->hyperlinkAuditingEnabled = cppVariantToBool(value);
     else if (key == "WebKitEnableCaretBrowsing")
@@ -1664,6 +1661,11 @@
     result->setNull();
 }
 
+void TestRunner::isChooserShown(const CppArgumentList&, CppVariant* result)
+{
+    result->set(m_proxy->isChooserShown());
+}
+
 void TestRunner::evaluateInWebInspector(const CppArgumentList& arguments, CppVariant* result)
 {
     result->setNull();
diff --git a/Tools/DumpRenderTree/chromium/TestRunner/src/TestRunner.h b/Tools/DumpRenderTree/chromium/TestRunner/src/TestRunner.h
index 98e4c09..0d5cecf 100644
--- a/Tools/DumpRenderTree/chromium/TestRunner/src/TestRunner.h
+++ b/Tools/DumpRenderTree/chromium/TestRunner/src/TestRunner.h
@@ -126,7 +126,7 @@
     bool requestPointerLock();
     void requestPointerUnlock();
     bool isPointerLocked();
-    void setToolTipText(WebKit::WebString);
+    void setToolTipText(const WebKit::WebString&);
 
     // A single item in the work queue.
     class WorkItem {
@@ -427,6 +427,9 @@
     void showWebInspector(const CppArgumentList&, CppVariant*);
     void closeWebInspector(const CppArgumentList&, CppVariant*);
 
+    // Inspect chooser state
+    void isChooserShown(const CppArgumentList&, CppVariant*);
+
     // Allows layout tests to exec scripts at WebInspector side.
     void evaluateInWebInspector(const CppArgumentList&, CppVariant*);
 
diff --git a/Tools/DumpRenderTree/chromium/TestRunner/src/WebPreferences.cpp b/Tools/DumpRenderTree/chromium/TestRunner/src/WebPreferences.cpp
index 335f935..0d95e4a 100644
--- a/Tools/DumpRenderTree/chromium/TestRunner/src/WebPreferences.cpp
+++ b/Tools/DumpRenderTree/chromium/TestRunner/src/WebPreferences.cpp
@@ -85,7 +85,6 @@
     experimentalCSSExclusionsEnabled = true;
     experimentalCSSRegionsEnabled = true;
     experimentalCSSGridLayoutEnabled = false;
-    experimentalWebSocketEnabled = false;
     javaEnabled = false;
     javaScriptCanAccessClipboard = true;
     javaScriptCanOpenWindowsAutomatically = true;
@@ -116,6 +115,7 @@
     acceleratedCompositingForVideoEnabled = false;
     acceleratedCompositingForFixedPositionEnabled = false;
     acceleratedCompositingForOverflowScrollEnabled = false;
+    acceleratedCompositingForTransitionEnabled = false;
     acceleratedCompositingEnabled = false;
     accelerated2dCanvasEnabled = false;
     forceCompositingMode = false;
@@ -157,7 +157,6 @@
     WebRuntimeFeatures::enableCSSExclusions(experimentalCSSExclusionsEnabled);
     settings->setExperimentalCSSGridLayoutEnabled(experimentalCSSGridLayoutEnabled);
     settings->setExperimentalCSSCustomFilterEnabled(cssCustomFilterEnabled);
-    settings->setExperimentalWebSocketEnabled(experimentalWebSocketEnabled);
     settings->setJavaEnabled(javaEnabled);
     settings->setJavaScriptCanAccessClipboard(javaScriptCanAccessClipboard);
     settings->setJavaScriptCanOpenWindowsAutomatically(javaScriptCanOpenWindowsAutomatically);
@@ -182,6 +181,7 @@
     settings->setAcceleratedCompositingForVideoEnabled(acceleratedCompositingForVideoEnabled);
     settings->setAcceleratedCompositingForFixedPositionEnabled(acceleratedCompositingForFixedPositionEnabled);
     settings->setAcceleratedCompositingForOverflowScrollEnabled(acceleratedCompositingForOverflowScrollEnabled);
+    settings->setAcceleratedCompositingForTransitionEnabled(acceleratedCompositingForTransitionEnabled);
     settings->setFixedPositionCreatesStackingContext(acceleratedCompositingForFixedPositionEnabled);
     settings->setForceCompositingMode(forceCompositingMode);
     settings->setThreadedHTMLParser(threadedHTMLParser);
diff --git a/Tools/DumpRenderTree/chromium/TestRunner/src/WebTestProxy.cpp b/Tools/DumpRenderTree/chromium/TestRunner/src/WebTestProxy.cpp
index 99d4ae9..d11a18c 100644
--- a/Tools/DumpRenderTree/chromium/TestRunner/src/WebTestProxy.cpp
+++ b/Tools/DumpRenderTree/chromium/TestRunner/src/WebTestProxy.cpp
@@ -33,6 +33,7 @@
 
 #include "AccessibilityControllerChromium.h"
 #include "EventSender.h"
+#include "MockColorChooser.h"
 #include "MockWebSpeechInputController.h"
 #include "MockWebSpeechRecognizer.h"
 #include "SpellCheckClient.h"
@@ -439,6 +440,7 @@
     : m_testInterfaces(0)
     , m_delegate(0)
     , m_spellcheck(new SpellCheckClient)
+    , m_chooserCount(0)
 {
     reset();
 }
@@ -486,6 +488,12 @@
     return m_spellcheck.get();
 }
 
+WebColorChooser* WebTestProxyBase::createColorChooser(WebColorChooserClient* client, const WebKit::WebColor& color)
+{
+    // This instance is deleted by WebCore::ColorInputType
+    return new MockColorChooser(client, m_delegate, this);
+}
+
 string WebTestProxyBase::captureTree(bool debugRenderTree)
 {
     WebScriptController::flushConsoleMessages();
@@ -1073,6 +1081,21 @@
     m_testInterfaces->testRunner()->setToolTipText(text);
 }
 
+void WebTestProxyBase::didOpenChooser()
+{
+    m_chooserCount++;
+}
+
+void WebTestProxyBase::didCloseChooser()
+{
+    m_chooserCount--;
+}
+
+bool WebTestProxyBase::isChooserShown()
+{
+    return 0 < m_chooserCount;
+}
+
 void WebTestProxyBase::willPerformClientRedirect(WebFrame* frame, const WebURL&, const WebURL& to, double, double)
 {
     if (m_testInterfaces->testRunner()->shouldDumpFrameLoadCallbacks()) {
@@ -1232,27 +1255,11 @@
 {
     if (m_testInterfaces->testRunner()->shouldDumpResourceRequestCallbacks()) {
         printFrameDescription(m_delegate, frame);
-        WebElement element = request.initiatorElement();
-        if (!element.isNull()) {
-            m_delegate->printMessage(" - element with ");
-            if (element.hasAttribute("id"))
-                m_delegate->printMessage(string("id '") + element.getAttribute("id").utf8().data() + "'");
-            else
-                m_delegate->printMessage("no id");
-        } else
-            m_delegate->printMessage(string(" - ") + request.initiatorName().utf8().data());
+        m_delegate->printMessage(string(" - ") + request.initiatorName().utf8().data());
         m_delegate->printMessage(string(" requested '") + URLDescription(request.urlRequest().url()).c_str() + "'\n");
     }
 }
 
-bool WebTestProxyBase::canHandleRequest(WebFrame*, const WebURLRequest& request)
-{
-    GURL url = request.url();
-    // Just reject the scheme used in
-    // LayoutTests/http/tests/misc/redirect-to-external-url.html
-    return !url.SchemeIs("spaceballs");
-}
-
 WebURLError WebTestProxyBase::cannotHandleRequestError(WebFrame*, const WebURLRequest& request)
 {
     WebURLError error;
diff --git a/Tools/DumpRenderTree/chromium/TestShell.cpp b/Tools/DumpRenderTree/chromium/TestShell.cpp
index d67be24..f67dbc9 100644
--- a/Tools/DumpRenderTree/chromium/TestShell.cpp
+++ b/Tools/DumpRenderTree/chromium/TestShell.cpp
@@ -109,6 +109,7 @@
     , m_acceleratedCompositingForVideoEnabled(false)
     , m_acceleratedCompositingForFixedPositionEnabled(false)
     , m_acceleratedCompositingForOverflowScrollEnabled(false)
+    , m_acceleratedCompositingForTransitionEnabled(false)
     , m_softwareCompositingEnabled(false)
     , m_threadedCompositingEnabled(false)
     , m_forceCompositingMode(false)
@@ -139,8 +140,6 @@
     webkit_support::SetThemeEngine(m_testInterfaces->themeEngine());
 #endif
 
-    WTF::initializeThreading();
-
     if (m_threadedCompositingEnabled)
         m_webCompositorThread = adoptPtr(WebKit::Platform::current()->createThread("Compositor"));
     webkit_support::SetThreadedCompositorEnabled(m_threadedCompositingEnabled);
@@ -213,6 +212,7 @@
     m_prefs.acceleratedCompositingForVideoEnabled = m_acceleratedCompositingForVideoEnabled;
     m_prefs.acceleratedCompositingForFixedPositionEnabled = m_acceleratedCompositingForFixedPositionEnabled;
     m_prefs.acceleratedCompositingForOverflowScrollEnabled = m_acceleratedCompositingForOverflowScrollEnabled;
+    m_prefs.acceleratedCompositingForTransitionEnabled = m_acceleratedCompositingForTransitionEnabled;
     m_prefs.forceCompositingMode = m_forceCompositingMode;
     m_prefs.accelerated2dCanvasEnabled = m_accelerated2dCanvasEnabled;
     m_prefs.perTilePaintingEnabled = m_perTilePaintingEnabled;
diff --git a/Tools/DumpRenderTree/chromium/TestShell.h b/Tools/DumpRenderTree/chromium/TestShell.h
index 010c320..2263624 100644
--- a/Tools/DumpRenderTree/chromium/TestShell.h
+++ b/Tools/DumpRenderTree/chromium/TestShell.h
@@ -116,6 +116,7 @@
     void setAcceleratedCompositingForVideoEnabled(bool enabled) { m_acceleratedCompositingForVideoEnabled = enabled; }
     void setAcceleratedCompositingForFixedPositionEnabled(bool enabled) { m_acceleratedCompositingForFixedPositionEnabled = enabled; }
     void setAcceleratedCompositingForOverflowScrollEnabled(bool enabled) { m_acceleratedCompositingForOverflowScrollEnabled = enabled; }
+    void setAcceleratedCompositingForTransitionEnabled(bool enabled) { m_acceleratedCompositingForTransitionEnabled = enabled; }
     bool softwareCompositingEnabled() { return m_softwareCompositingEnabled; }
     void setSoftwareCompositingEnabled(bool enabled) { m_softwareCompositingEnabled = enabled; }
     void setThreadedCompositingEnabled(bool enabled) { m_threadedCompositingEnabled = enabled; }
@@ -209,6 +210,7 @@
     bool m_acceleratedCompositingForVideoEnabled;
     bool m_acceleratedCompositingForFixedPositionEnabled;
     bool m_acceleratedCompositingForOverflowScrollEnabled;
+    bool m_acceleratedCompositingForTransitionEnabled;
     bool m_softwareCompositingEnabled;
     bool m_threadedCompositingEnabled;
     bool m_forceCompositingMode;
diff --git a/Tools/DumpRenderTree/chromium/TestShellX11.cpp b/Tools/DumpRenderTree/chromium/TestShellX11.cpp
index 4b25aa3..93aaf49 100644
--- a/Tools/DumpRenderTree/chromium/TestShellX11.cpp
+++ b/Tools/DumpRenderTree/chromium/TestShellX11.cpp
@@ -32,6 +32,7 @@
 #include "TestShell.h"
 
 #include <fontconfig/fontconfig.h>
+#include <unistd.h>
 
 #if USE(GTK)
 #include <gtk/gtk.h>
diff --git a/Tools/DumpRenderTree/chromium/WebViewHost.cpp b/Tools/DumpRenderTree/chromium/WebViewHost.cpp
index 7cc1ab5..9195175 100644
--- a/Tools/DumpRenderTree/chromium/WebViewHost.cpp
+++ b/Tools/DumpRenderTree/chromium/WebViewHost.cpp
@@ -447,11 +447,6 @@
     return defaultPolicy;
 }
 
-bool WebViewHost::canHandleRequest(WebFrame*, const WebURLRequest& request)
-{
-    return true;
-}
-
 WebURLError WebViewHost::cancelledError(WebFrame*, const WebURLRequest& request)
 {
     return webkit_support::CreateCancelledError(request);
@@ -845,6 +840,20 @@
     setWindowRect(rect);
 }
 
+void WebViewHost::enableAutoResizeMode(const WebSize& minSize, const WebSize& maxSize)
+{
+    webView()->enableAutoResizeMode(minSize, maxSize);
+}
+
+void WebViewHost::disableAutoResizeMode(const WebKit::WebSize& newSize)
+{
+    if (!newSize.isEmpty())
+        setWindowRect(WebRect(0, 0, newSize.width, newSize.height));
+    webView()->disableAutoResizeMode();
+    if (!newSize.isEmpty())
+        webView()->resize(newSize);
+}
+
 bool WebViewHost::navigate(const TestNavigationEntry& entry, bool reload)
 {
     // Get the right target frame for the entry.
diff --git a/Tools/DumpRenderTree/chromium/WebViewHost.h b/Tools/DumpRenderTree/chromium/WebViewHost.h
index 80b225d..d0b8e9a 100644
--- a/Tools/DumpRenderTree/chromium/WebViewHost.h
+++ b/Tools/DumpRenderTree/chromium/WebViewHost.h
@@ -101,6 +101,8 @@
     virtual void applyPreferences() OVERRIDE;
     virtual std::string makeURLErrorDescription(const WebKit::WebURLError&) OVERRIDE;
     virtual void setClientWindowRect(const WebKit::WebRect&) OVERRIDE;
+    virtual void enableAutoResizeMode(const WebKit::WebSize&, const WebKit::WebSize&) OVERRIDE;
+    virtual void disableAutoResizeMode(const WebKit::WebSize&) OVERRIDE;
     virtual void showDevTools() OVERRIDE;
     virtual void closeDevTools() OVERRIDE;
     virtual void evaluateInWebInspector(long, const std::string&) OVERRIDE;
@@ -135,6 +137,7 @@
     virtual WebKit::WebWidget* createPopupMenu(WebKit::WebPopupType);
     virtual WebKit::WebWidget* createPopupMenu(const WebKit::WebPopupMenuInfo&);
     virtual WebKit::WebStorageNamespace* createSessionStorageNamespace(unsigned quota);
+
     virtual void didAddMessageToConsole(const WebKit::WebConsoleMessage&, const WebKit::WebString& sourceName, unsigned sourceLine);
     virtual void didStartLoading();
     virtual void didStopLoading();
@@ -182,7 +185,6 @@
     virtual WebKit::WebNavigationPolicy decidePolicyForNavigation(
         WebKit::WebFrame*, const WebKit::WebURLRequest&,
         WebKit::WebNavigationType, WebKit::WebNavigationPolicy, bool isRedirect);
-    virtual bool canHandleRequest(WebKit::WebFrame*, const WebKit::WebURLRequest&);
     virtual WebKit::WebURLError cancelledError(WebKit::WebFrame*, const WebKit::WebURLRequest&);
     virtual void unableToImplementPolicyWithError(WebKit::WebFrame*, const WebKit::WebURLError&);
     virtual void didCreateDataSource(WebKit::WebFrame*, WebKit::WebDataSource*);
diff --git a/Tools/GardeningServer/scripts/base.js b/Tools/GardeningServer/scripts/base.js
index d77e899..fb05c76 100644
--- a/Tools/GardeningServer/scripts/base.js
+++ b/Tools/GardeningServer/scripts/base.js
@@ -176,6 +176,12 @@
 
 base.parseJSONP = function(jsonp)
 {
+    if (!jsonp)
+        return {};
+
+    if (!jsonp.match(/^[^{[]*\(/))
+        return JSON.parse(jsonp);
+
     var startIndex = jsonp.indexOf('(') + 1;
     var endIndex = jsonp.lastIndexOf(')');
     if (startIndex == 0 || endIndex == -1)
diff --git a/Tools/GardeningServer/scripts/base_unittests.js b/Tools/GardeningServer/scripts/base_unittests.js
index 2106e68..d5d182c 100644
--- a/Tools/GardeningServer/scripts/base_unittests.js
+++ b/Tools/GardeningServer/scripts/base_unittests.js
@@ -466,9 +466,13 @@
     ok(!base.getURLParameter('non-existant'));
 });
 
-test("parseJSONP", 2, function() {
+test("parseJSONP", 6, function() {
     deepEqual(base.parseJSONP(""), {});
     deepEqual(base.parseJSONP('p({"key": "value"})'), {"key": "value"});
+    deepEqual(base.parseJSONP('ADD_RESULTS({"dummy":"data"});'), {"dummy":"data"});
+    deepEqual(base.parseJSONP('{"dummy":"data"}'), {"dummy":"data"});
+    deepEqual(base.parseJSONP('ADD_RESULTS({"builder(1)":"data"});'), {"builder(1)":"data"});
+    deepEqual(base.parseJSONP('{"builder(1)":"data"}'), {"builder(1)":"data"});
 });
 
 })();
diff --git a/Tools/GardeningServer/scripts/results.js b/Tools/GardeningServer/scripts/results.js
index 4e21d57..a9346b7 100644
--- a/Tools/GardeningServer/scripts/results.js
+++ b/Tools/GardeningServer/scripts/results.js
@@ -27,7 +27,7 @@
 
 (function() {
 
-var kResultsName = 'full_results.json';
+var kResultsName = 'failing_results.json';
 
 var kBuildLinkRegexp = /a href="\d+\/"/g;
 var kBuildNumberRegexp = /\d+/;
@@ -222,7 +222,6 @@
         this._isUnexpected = resultNode.is_unexpected;
         this._actual = resultNode ? results.failureTypeList(resultNode.actual) : [];
         this._expected = resultNode ? this._addImpliedExpectations(results.failureTypeList(resultNode.expected)) : [];
-        this._wontfix = resultNode ? resultNode.wontfix : false;
     },
     _addImpliedExpectations: function(resultsList)
     {
@@ -250,7 +249,7 @@
     },
     wontfix: function()
     {
-        return this._wontfix;
+        return this._expected.indexOf('WONTFIX') != -1;
     },
     hasUnexpectedFailures: function()
     {
diff --git a/Tools/GardeningServer/scripts/results_unittests.js b/Tools/GardeningServer/scripts/results_unittests.js
index cb96ee1..a04cb19 100644
--- a/Tools/GardeningServer/scripts/results_unittests.js
+++ b/Tools/GardeningServer/scripts/results_unittests.js
@@ -37,15 +37,8 @@
                 "actual": "IMAGE"
             },
             "expected-wontfix": {
-                "expected": "IMAGE",
-                "actual": "IMAGE",
-                "wontfix": true
-            },
-            "unexpected-wontfix": {
-                "expected": "IMAGE",
-                "actual": "TEXT",
-                "is_unexpected": true,
-                "wontfix": true
+                "expected": "WONTFIX",
+                "actual": "SKIP",
             },
             "flaky-scrollbar.html": {
                 "expected": "PASS",
@@ -521,7 +514,7 @@
     {
         simulator.scheduleCallback(function() {
             probedURLs.push(url);
-            callback(base.endsWith(url, 'results/layout-test-results/full_results.json'));
+            callback(base.endsWith(url, 'results/layout-test-results/failing_results.json'));
         });
     };
 
@@ -535,8 +528,8 @@
     });
 
     deepEqual(probedURLs, [
-        "http://build.chromium.org/f/chromium/layout_test_results/MockBuilder1/results/layout-test-results/full_results.json",
-        "http://build.chromium.org/f/chromium/layout_test_results/MockBuilder2/results/layout-test-results/full_results.json"
+        "http://build.chromium.org/f/chromium/layout_test_results/MockBuilder1/results/layout-test-results/failing_results.json",
+        "http://build.chromium.org/f/chromium/layout_test_results/MockBuilder2/results/layout-test-results/failing_results.json"
     ]);
 
 });
diff --git a/Tools/Scripts/check-blink-deps b/Tools/Scripts/check-blink-deps
new file mode 100755
index 0000000..db59ac7
--- /dev/null
+++ b/Tools/Scripts/check-blink-deps
@@ -0,0 +1,53 @@
+#!/usr/bin/env python
+# Copyright (C) 2013 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""A utility script for running Chromium's dependency checker script on Blink."""
+
+import os
+import subprocess
+import sys
+
+def show_help():
+    print 'Usage: %s [dir=Source]' % os.path.basename(sys.argv[0])
+
+def main():
+    start_dir = None
+    if len(sys.argv) > 1:
+        start_dir = sys.argv[1]
+
+    if start_dir == '--help':
+        show_help()
+        return
+
+    root_dir = os.path.realpath(os.path.join(os.path.dirname(__file__), os.pardir, os.pardir))
+    if not start_dir:
+        start_dir = os.path.join(root_dir, 'Source')
+
+    check_deps = os.path.realpath(os.path.join(root_dir, os.pardir, os.pardir, 'tools', 'checkdeps', 'checkdeps.py'))
+    subprocess.call([sys.executable, check_deps, '--root', root_dir, start_dir])
+
+if '__main__' == __name__:
+    main()
diff --git a/Tools/Scripts/import-w3c-tests b/Tools/Scripts/import-w3c-tests
new file mode 100755
index 0000000..bb72096
--- /dev/null
+++ b/Tools/Scripts/import-w3c-tests
@@ -0,0 +1,35 @@
+#!/usr/bin/env python
+
+# Copyright (C) 2013 Adobe Systems Incorporated. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# 1. Redistributions of source code must retain the above
+#    copyright notice, this list of conditions and the following
+#    disclaimer.
+# 2. Redistributions in binary form must reproduce the above
+#    copyright notice, this list of conditions and the following
+#    disclaimer in the documentation and/or other materials
+#    provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER "AS IS" AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
+# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
+# THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+# SUCH DAMAGE.
+
+import sys
+
+from webkitpy.w3c import test_importer
+
+
+sys.exit(test_importer.main(sys.argv[1:], sys.stdout, sys.stderr))
diff --git a/Tools/Scripts/print-json-test-results b/Tools/Scripts/print-json-test-results
new file mode 100755
index 0000000..f252884
--- /dev/null
+++ b/Tools/Scripts/print-json-test-results
@@ -0,0 +1,81 @@
+#!/usr/bin/python
+import json
+import optparse
+import os
+import sys
+
+
+def main(argv):
+    parser = optparse.OptionParser(usage='%prog [path-to-results.json]')
+    parser.add_option('--failures', action='store_true',
+                      help='show failing tests')
+    parser.add_option('--flakes', action='store_true',
+                      help='show flaky tests')
+    parser.add_option('--expected', action='store_true',
+                      help='include expected results along with unexpected')
+    parser.add_option('--passes', action='store_true',
+                      help='show passing tests')
+    options, args = parser.parse_args(argv)
+
+    if args and args[0] != '-':
+        if os.path.exists(args[0]):
+            with open(args[0], 'r') as fp:
+                txt = fp.read()
+        else:
+            print >> sys.stderr, "file not found: %s" % args[0]
+            sys.exit(1)
+    else:
+        txt = sys.stdin.read()
+
+    if txt.startswith('ADD_RESULTS(') and txt.endswith(');'):
+        txt = txt[12:-2]  # ignore optional JSONP wrapper
+    results = json.loads(txt)
+
+    passes, failures, flakes = decode_results(results, options.expected)
+
+    tests_to_print = []
+    if options.passes:
+        tests_to_print += passes.keys()
+    if options.failures:
+        tests_to_print += failures.keys()
+    if options.flakes:
+        tests_to_print += flakes.keys()
+    print "\n".join(sorted(tests_to_print))
+
+
+def decode_results(results, include_expected=False):
+    tests = convert_trie_to_flat_paths(results['tests'])
+    failures = {}
+    flakes = {}
+    passes = {}
+    for (test, result) in tests.iteritems():
+        if include_expected or result.get('is_unexpected'):
+            actual_result = result['actual']
+            if ' PASS' in actual_result:
+                flakes[test] = actual_result
+            elif actual_result == 'PASS':
+                passes[test] = result
+            else:
+                failures[test] = actual_result
+
+    return (passes, failures, flakes)
+
+
+def convert_trie_to_flat_paths(trie, prefix=None):
+    # Cloned from webkitpy.layout_tests.layout_package.json_results_generator
+    # so that this code can stand alone.
+    result = {}
+    for name, data in trie.iteritems():
+        if prefix:
+            name = prefix + "/" + name
+
+        if len(data) and not "actual" in data and not "expected" in data:
+            result.update(convert_trie_to_flat_paths(data, name))
+        else:
+            result[name] = data
+
+    return result
+
+
+if __name__ ==  '__main__':
+    main(sys.argv[1:])
diff --git a/Tools/Scripts/webkitpy/bindings/main.py b/Tools/Scripts/webkitpy/bindings/main.py
index d3115fb..48536c2 100644
--- a/Tools/Scripts/webkitpy/bindings/main.py
+++ b/Tools/Scripts/webkitpy/bindings/main.py
@@ -69,12 +69,12 @@
             os.write(idl_files_list[0], os.path.join(input_directory, input_file) + "\n")
         os.close(idl_files_list[0])
 
-        cmd = ['perl', '-w',
-               '-Ibindings/scripts',
-               'bindings/scripts/preprocess-idls.pl',
-               '--idlFilesList', idl_files_list[1],
-               '--supplementalDependencyFile', supplemental_dependency_file,
-               '--windowConstructorsFile', window_constructors_file]
+        cmd = ['python',
+               'bindings/scripts/preprocess_idls.py',
+               '--idl-files-list', idl_files_list[1],
+               '--supplemental-dependency-file', supplemental_dependency_file,
+               '--window-constructors-file', window_constructors_file,
+               '--write-file-only-if-changed', '0']
 
         exit_code = 0
         try:
diff --git a/Tools/Scripts/webkitpy/common/net/resultsjsonparser.py b/Tools/Scripts/webkitpy/common/net/resultsjsonparser.py
index 581f967..51a6fe0 100644
--- a/Tools/Scripts/webkitpy/common/net/resultsjsonparser.py
+++ b/Tools/Scripts/webkitpy/common/net/resultsjsonparser.py
@@ -99,8 +99,7 @@
 
     def _failure_types_from_actual_result(self, actual):
         # FIXME: There doesn't seem to be a full list of all possible values of
-        # 'actual' anywhere.  However JSONLayoutResultsGenerator.FAILURE_TO_CHAR
-        # is a useful reference as that's for "old" style results.json files
+        # 'actual' anywhere.
         #
         # FIXME: TEXT, IMAGE_PLUS_TEXT, and AUDIO are obsolete but we keep them for
         # now so that we can parse old results.json files.
diff --git a/Tools/Scripts/webkitpy/layout_tests/controllers/layout_test_runner.py b/Tools/Scripts/webkitpy/layout_tests/controllers/layout_test_runner.py
index 6ee6655..e3a4e3d 100644
--- a/Tools/Scripts/webkitpy/layout_tests/controllers/layout_test_runner.py
+++ b/Tools/Scripts/webkitpy/layout_tests/controllers/layout_test_runner.py
@@ -183,13 +183,9 @@
             "Exiting early after %d crashes and %d timeouts." % (run_results.unexpected_crashes, run_results.unexpected_timeouts))
 
     def _update_summary_with_result(self, run_results, result):
-        if result.type == test_expectations.SKIP:
-            exp_str = got_str = 'SKIP'
-            expected = True
-        else:
-            expected = self._expectations.matches_an_expected_result(result.test_name, result.type, self._options.pixel_tests or result.reftest_type)
-            exp_str = self._expectations.get_expectations_string(result.test_name)
-            got_str = self._expectations.expectation_to_string(result.type)
+        expected = self._expectations.matches_an_expected_result(result.test_name, result.type, self._options.pixel_tests or result.reftest_type)
+        exp_str = self._expectations.get_expectations_string(result.test_name)
+        got_str = self._expectations.expectation_to_string(result.type)
 
         run_results.add(result, expected, self._test_is_slow(result.test_name))
 
diff --git a/Tools/Scripts/webkitpy/layout_tests/controllers/manager.py b/Tools/Scripts/webkitpy/layout_tests/controllers/manager.py
index 953b753..58c07b0 100644
--- a/Tools/Scripts/webkitpy/layout_tests/controllers/manager.py
+++ b/Tools/Scripts/webkitpy/layout_tests/controllers/manager.py
@@ -40,10 +40,10 @@
 import sys
 import time
 
+from webkitpy.common.net.file_uploader import FileUploader
 from webkitpy.layout_tests.controllers.layout_test_finder import LayoutTestFinder
 from webkitpy.layout_tests.controllers.layout_test_runner import LayoutTestRunner
 from webkitpy.layout_tests.controllers.test_result_writer import TestResultWriter
-from webkitpy.layout_tests.layout_package import json_layout_results_generator
 from webkitpy.layout_tests.layout_package import json_results_generator
 from webkitpy.layout_tests.models import test_expectations
 from webkitpy.layout_tests.models import test_failures
@@ -117,7 +117,7 @@
             random.shuffle(tests_to_run)
 
         tests_to_run, tests_in_other_chunks = self._finder.split_into_chunks(tests_to_run)
-        self._expectations.add_skipped_tests(tests_in_other_chunks)
+        self._expectations.add_extra_skipped_tests(tests_in_other_chunks)
         tests_to_skip.update(tests_in_other_chunks)
 
         return tests_to_run, tests_to_skip
@@ -230,12 +230,13 @@
             self._look_for_new_crash_logs(retry_results, start_time)
 
         _log.debug("summarizing results")
-        summarized_results = test_run_results.summarize_results(self._port, self._expectations, initial_results, retry_results, enabled_pixel_tests_in_retry)
-        self._printer.print_results(end_time - start_time, initial_results, summarized_results)
+        summarized_full_results = test_run_results.summarize_results(self._port, self._expectations, initial_results, retry_results, enabled_pixel_tests_in_retry)
+        summarized_failing_results = test_run_results.summarize_results(self._port, self._expectations, initial_results, retry_results, enabled_pixel_tests_in_retry, only_include_failing=True)
+        self._printer.print_results(end_time - start_time, initial_results, summarized_failing_results)
 
         if not self._options.dry_run:
-            self._port.print_leaks_summary()
-            self._upload_json_files(summarized_results, initial_results)
+            self._write_json_files(summarized_full_results, summarized_failing_results, initial_results)
+            self._upload_json_files()
 
             results_path = self._filesystem.join(self._results_directory, "results.html")
             self._copy_results_html_file(results_path)
@@ -243,8 +244,8 @@
                                                (self._options.full_results_html and initial_results.total_failures)):
                 self._port.show_results_html_file(results_path)
 
-        return test_run_results.RunDetails(self._port.exit_code_from_summarized_results(summarized_results),
-                                           summarized_results, initial_results, retry_results, enabled_pixel_tests_in_retry)
+        return test_run_results.RunDetails(self._port.exit_code_from_summarized_results(summarized_failing_results),
+                                           summarized_full_results, summarized_failing_results, initial_results, retry_results, enabled_pixel_tests_in_retry)
 
     def _run_tests(self, tests_to_run, tests_to_skip, repeat_each, iterations, num_workers, retrying):
         needs_http = self._port.requires_http_server() or any(self._is_http_test(test) for test in tests_to_run)
@@ -325,14 +326,7 @@
                     (result.type != test_expectations.MISSING) and
                     (result.type != test_expectations.CRASH or include_crashes))]
 
-    def _upload_json_files(self, summarized_results, initial_results):
-        """Writes the results of the test run as JSON files into the results
-        dir and upload the files to the appengine server.
-
-        Args:
-          summarized_results: dict of results
-          initial_results: full summary object
-        """
+    def _write_json_files(self, summarized_full_results, summarized_failing_results, initial_results):
         _log.debug("Writing JSON files in %s." % self._results_directory)
 
         # FIXME: Upload stats.json to the server and delete times_ms.
@@ -345,31 +339,44 @@
         self._filesystem.write_text_file(stats_path, json.dumps(stats_trie))
 
         full_results_path = self._filesystem.join(self._results_directory, "full_results.json")
-        # We write full_results.json out as jsonp because we need to load it from a file url and Chromium doesn't allow that.
-        json_results_generator.write_json(self._filesystem, summarized_results, full_results_path, callback="ADD_RESULTS")
+        json_results_generator.write_json(self._filesystem, summarized_full_results, full_results_path)
 
-        generator = json_layout_results_generator.JSONLayoutResultsGenerator(
-            self._port, self._options.builder_name, self._options.build_name,
-            self._options.build_number, self._results_directory,
-            BUILDER_BASE_URL,
-            self._expectations, initial_results,
-            self._options.test_results_server,
-            "layout-tests",
-            self._options.master_name)
+        full_results_path = self._filesystem.join(self._results_directory, "failing_results.json")
+        # We write failing_results.json out as jsonp because we need to load it from a file url for results.html and Chromium doesn't allow that.
+        json_results_generator.write_json(self._filesystem, summarized_failing_results, full_results_path, callback="ADD_RESULTS")
 
         _log.debug("Finished writing JSON files.")
 
+    def _upload_json_files(self):
+        if not self._options.test_results_server:
+            return
 
-        json_files = ["incremental_results.json", "full_results.json", "times_ms.json"]
+        if not self._options.master_name:
+            _log.error("--test-results-server was set, but --master-name was not.  Not uploading JSON files.")
+            return
 
-        generator.upload_json_files(json_files)
+        _log.debug("Uploading JSON files for builder: %s", self._options.builder_name)
+        attrs = [("builder", self._options.builder_name),
+                 ("testtype", "layout-tests"),
+                 ("master", self._options.master_name)]
 
-        incremental_results_path = self._filesystem.join(self._results_directory, "incremental_results.json")
+        files = [(file, self._filesystem.join(self._results_directory, file)) for file in ["failing_results.json", "full_results.json", "times_ms.json"]]
 
-        # Remove these files from the results directory so they don't take up too much space on the buildbot.
-        # The tools use the version we uploaded to the results server anyway.
-        self._filesystem.remove(times_json_path)
-        self._filesystem.remove(incremental_results_path)
+        url = "http://%s/testfile/upload" % self._options.test_results_server
+        # Set uploading timeout in case appengine server is having problems.
+        # 120 seconds are more than enough to upload test results.
+        uploader = FileUploader(url, 120)
+        try:
+            response = uploader.upload_as_multipart_form_data(self._filesystem, files, attrs)
+            if response:
+                if response.code == 200:
+                    _log.debug("JSON uploaded.")
+                else:
+                    _log.debug("JSON upload failed, %d: '%s'" % (response.code, response.read()))
+            else:
+                _log.error("JSON upload failed; no response returned")
+        except Exception, err:
+            _log.error("Upload failed: %s" % err)
 
     def _copy_results_html_file(self, destination_path):
         base_dir = self._port.path_from_webkit_base('LayoutTests', 'fast', 'harness')
diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/bot_test_expectations.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/bot_test_expectations.py
index 7e39129..fc7ed5b 100644
--- a/Tools/Scripts/webkitpy/layout_tests/layout_package/bot_test_expectations.py
+++ b/Tools/Scripts/webkitpy/layout_tests/layout_package/bot_test_expectations.py
@@ -36,88 +36,153 @@
 import urllib2
 
 from webkitpy.layout_tests.port import builders
+from webkitpy.layout_tests.models.test_expectations import TestExpectationLine
+
 
 _log = logging.getLogger(__name__)
 
 
-class BotTestExpectations(object):
+# results.json v4 format:
+# {
+#  'version': 4,
+#  'builder name' : {
+#     'fixableCounts': {},
+#     'blinkRevision': [],
+#     'tests': {
+#       'directory' { # Each path component is a dictionary.
+#          'testname.html': {
+#             'expected' : 'FAIL', # expectation name
+#             'results': [], # Run-length encoded result.
+#             'times': [],
+#             'bugs': [], # bug urls
+#          }
+#      }
+#   }
+#  'buildNumbers': [],
+#  'secondsSinceEpoch': [],
+#  'fixableCount': [],
+#  'allFixableCount': [],
+#  'chromeRevision': [],
+#  'failure_map': { } # Map from letter code to expectation name.
+# },
+class ResultsJSON(object):
+    TESTS_KEY = 'tests'
+    FAILURE_MAP_KEY = 'failure_map'
+    RESULTS_KEY = 'results'
+    BUGS_KEY = 'bugs'
     RLE_LENGTH = 0
     RLE_VALUE = 1
-    RESULTS_KEY = 'results'
-    TESTS_KEY = 'tests'
+
+    # results.json was originally designed to support
+    # multiple builders in one json file, so the builder_name
+    # is needed to figure out which builder this json file
+    # refers to (and thus where the results are stored)
+    def __init__(self, builder_name, json_dict):
+        self.builder_name = builder_name
+        self._json = json_dict
+
+    def _walk_trie(self, trie, parent_path):
+        for name, value in trie.items():
+            full_path = os.path.join(parent_path, name)
+
+            # FIXME: If we ever have a test directory self.RESULTS_KEY
+            # ("results"), this logic will break!
+            if self.RESULTS_KEY not in value:
+                for path, results in self._walk_trie(value, full_path):
+                    yield path, results
+            else:
+                yield full_path, value
+
+    def walk_results(self, full_path=''):
+        tests_trie = self._json[self.builder_name][self.TESTS_KEY]
+        return self._walk_trie(tests_trie, parent_path='')
+
+    def expectation_for_type(self, type_char):
+        return self._json[self.builder_name][self.FAILURE_MAP_KEY][type_char]
+
+    # Knowing how to parse the run-length-encoded values in results.json
+    # is a detail of this class.
+    def occurances_and_type_from_result_item(self, item):
+        return item[self.RLE_LENGTH], item[self.RLE_VALUE]
+
+
+class BotTestExpecationsFactory(object):
     RESULTS_URL_PREFIX = 'http://test-results.appspot.com/testfile?master=ChromiumWebkit&testtype=layout-tests&name=results-small.json&builder='
 
-    # FIXME: This map should be generated from data in json_layout_results_generator.py and test_expectations.py.
-    MAP_ENCODED_RESULT_STRING_TO_EXPECTATIONS_VALUE = {
-        'P': 'Pass',
-        'N': '', # No data
-        'X': '', # Skip
-        'T': 'Timeout',
-        'F': 'Failure', # text-only
-        'C': 'Crash',
-        'I': 'ImageOnlyFailure',
-        'Z': 'Failure', # image+text
-        'O': 'Missing',
-    };
-
-    def __init__(self, only_ignore_very_flaky):
-        self._only_ignore_very_flaky = only_ignore_very_flaky
-
-    def expectations_string(self, port_name):
+    def _results_json_for_port(self, port_name):
         builder_name = builders.builder_name_for_port_name(port_name)
         if not builder_name:
-            return ""
-
-        url = self.RESULTS_URL_PREFIX + urllib.quote(builder_name)
+            return None
+        results_url = self.RESULTS_URL_PREFIX + urllib.quote(builder_name)
         try:
             _log.debug('Fetching flakiness data from appengine.')
-            data = urllib2.urlopen(url)
-            parsed_data = json.load(data)[builder_name]
-            result = self._generate_expectations_string(parsed_data)
-            return result
+            return ResultsJSON(builder_name, json.load(urllib2.urlopen(results_url)))
         except urllib2.URLError as error:
-            _log.warning('Could not retrieve flakiness data from the bot.')
+            _log.warning('Could not retrieve flakiness data from the bot.  url: %s', results_url)
             _log.warning(error)
-            return ""
 
-    def _generate_expectations_string(self, test_data):
-        out = []
-        self._walk_tests_trie(test_data[self.TESTS_KEY], out)
-        return "\n".join(out)
+    def expectations_for_port(self, port_name):
+        results_json = self._results_json_for_port(port_name)
+        if not results_json:
+            return None
+        return BotTestExpectations(results_json)
 
-    def _actual_results_for_test(self, run_length_encoded_results):
-        resultsMap = {}
 
-        seenResults = {};
-        for result in run_length_encoded_results:
-            numResults = result[self.RLE_LENGTH];
-            result_string = result[self.RLE_VALUE];
+class BotTestExpectations(object):
+    # FIXME: Get this from the json instead of hard-coding it.
+    RESULT_TYPES_TO_IGNORE = ['N', 'X', 'Y']
 
-            if result_string == 'N' or result_string == 'X':
+    def __init__(self, results_json):
+        self.results_json = results_json
+
+    def _line_from_test_and_flaky_types_and_bug_urls(self, test_path, flaky_types, bug_urls):
+        line = TestExpectationLine()
+        line.original_string = test_path
+        line.name = test_path
+        line.filename = test_path
+        line.modifiers = bug_urls if bug_urls else ""
+        line.expectations = sorted(map(self.results_json.expectation_for_type, flaky_types))
+        return line
+
+    def flakes_by_path(self, only_ignore_very_flaky):
+        flakes_by_path = {}
+        for test_path, entry in self.results_json.walk_results():
+            results_dict = entry[self.results_json.RESULTS_KEY]
+            flaky_types = self._flaky_types_in_results(results_dict, only_ignore_very_flaky)
+            if len(flaky_types) <= 1:
+                continue
+            flakes_by_path[test_path] = sorted(map(self.results_json.expectation_for_type, flaky_types))
+        return flakes_by_path
+
+    def expectation_lines(self):
+        lines = []
+        for test_path, entry in self.results_json.walk_results():
+            results_array = entry[self.results_json.RESULTS_KEY]
+            flaky_types = self._flaky_types_in_results(results_array, False)
+            if len(flaky_types) > 1:
+                bug_urls = entry.get(self.results_json.BUGS_KEY)
+                line = self._line_from_test_and_flaky_types_and_bug_urls(test_path, flaky_types, bug_urls)
+                lines.append(line)
+        return lines
+
+    def _flaky_types_in_results(self, run_length_encoded_results, only_ignore_very_flaky=False):
+        results_map = {}
+        seen_results = {}
+
+        for result_item in run_length_encoded_results:
+            _, result_type = self.results_json.occurances_and_type_from_result_item(result_item)
+            if result_type in self.RESULT_TYPES_TO_IGNORE:
                 continue
 
-            if self._only_ignore_very_flaky and result_string not in seenResults:
+            if only_ignore_very_flaky and result_type not in seen_results:
                 # Only consider a short-lived result if we've seen it more than once.
                 # Otherwise, we include lots of false-positives due to tests that fail
                 # for a couple runs and then start passing.
                 # FIXME: Maybe we should make this more liberal and consider it a flake
                 # even if we only see that failure once.
-                seenResults[result_string] = True
+                seen_results[result_type] = True
                 continue
 
-            expectation = self.MAP_ENCODED_RESULT_STRING_TO_EXPECTATIONS_VALUE[result_string]
-            resultsMap[expectation] = True;
+            results_map[result_type] = True
 
-        return resultsMap.keys()
-
-    def _walk_tests_trie(self, trie, out, path_so_far=""):
-        for name in trie:
-            new_path = os.path.join(path_so_far, name)
-            if self.RESULTS_KEY not in trie[name]:
-                self._walk_tests_trie(trie[name], out, new_path)
-                continue
-
-            results = trie[name][self.RESULTS_KEY]
-            actual_results = self._actual_results_for_test(results)
-            if len(actual_results) > 1:
-                out.append('Bug(auto) %s [ %s ]' % (new_path, " ".join(actual_results)))
+        return results_map.keys()
diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/bot_test_expectations_unittest.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/bot_test_expectations_unittest.py
index d98ac84..4e21563 100644
--- a/Tools/Scripts/webkitpy/layout_tests/layout_package/bot_test_expectations_unittest.py
+++ b/Tools/Scripts/webkitpy/layout_tests/layout_package/bot_test_expectations_unittest.py
@@ -29,58 +29,90 @@
 import unittest2 as unittest
 
 from webkitpy.layout_tests.layout_package import bot_test_expectations
+from webkitpy.layout_tests.models import test_expectations
+
 
 class BotTestExpectationsTest(unittest.TestCase):
+    # FIXME: Find a way to import this map from Tools/TestResultServer/model/jsonresults.py.
+    FAILURE_MAP = {"A": "AUDIO", "C": "CRASH", "F": "TEXT", "I": "IMAGE", "O": "MISSING",
+        "N": "NO DATA", "P": "PASS", "T": "TIMEOUT", "Y": "NOTRUN", "X": "SKIP", "Z": "IMAGE+TEXT"}
+
+    # All result_string's in this file expect newest result
+    # on left: "PFF", means it just passed after 2 failures.
+
+    def _assert_is_flaky(self, results_string, should_be_flaky):
+        results_json = self._results_json_from_test_data({})
+        expectations = bot_test_expectations.BotTestExpectations(results_json)
+        length_encoded = self._results_from_string(results_string)['results']
+        num_actual_results = len(expectations._flaky_types_in_results(length_encoded, only_ignore_very_flaky=True))
+        if should_be_flaky:
+            self.assertGreater(num_actual_results, 1)
+        else:
+            self.assertEqual(num_actual_results, 1)
+
+    def test_basic_flaky(self):
+        self._assert_is_flaky('PFF', False)  # Used to fail, but now passes.
+        self._assert_is_flaky('FFP', False)  # Just started failing.
+        self._assert_is_flaky('PFPF', True)  # Seen both failures and passes.
+        # self._assert_is_flaky('PPPF', True)  # Should be counted as flaky but isn't yet.
+        self._assert_is_flaky('FPPP', False)  # Just started failing, not flaky.
+        self._assert_is_flaky('PFFP', True)  # Failed twice in a row, still flaky.
+        # Failing 3+ times in a row is unlikely to be flaky, but rather a transient failure on trunk.
+        # self._assert_is_flaky('PFFFP', False)
+        # self._assert_is_flaky('PFFFFP', False)
+
+    def _results_json_from_test_data(self, test_data):
+        test_data[bot_test_expectations.ResultsJSON.FAILURE_MAP_KEY] = self.FAILURE_MAP
+        json_dict = {
+            'builder': test_data,
+        }
+        return bot_test_expectations.ResultsJSON('builder', json_dict)
+
+    def _results_from_string(self, results_string):
+        results_list = []
+        last_char = None
+        for char in results_string:
+            if char != last_char:
+                results_list.insert(0, [1, char])
+            else:
+                results_list[0][0] += 1
+        return {'results': results_list}
+
+    def _assert_expectations(self, test_data, expectations_string, only_ignore_very_flaky):
+        results_json = self._results_json_from_test_data(test_data)
+        expectations = bot_test_expectations.BotTestExpectations(results_json)
+        self.assertEqual(expectations.flakes_by_path(only_ignore_very_flaky), expectations_string)
 
     def test_basic(self):
-        expectations = bot_test_expectations.BotTestExpectations(only_ignore_very_flaky=True)
         test_data = {
             'tests': {
                 'foo': {
-                    'veryflaky.html': {
-                        'results': [[1, 'F'], [1, 'P'], [1, 'F'], [1, 'P']]
-                    },
-                    'maybeflaky.html': {
-                        'results': [[3, 'P'], [1, 'F'], [3, 'P']]
-                    },
-                    'notflakypass.html': {
-                        'results': [[4, 'P']]
-                    },
-                    'notflakyfail.html': {
-                        'results': [[4, 'F']]
-                    },
+                    'veryflaky.html': self._results_from_string('FPFP'),
+                    'maybeflaky.html': self._results_from_string('PPFP'),
+                    'notflakypass.html': self._results_from_string('PPPP'),
+                    'notflakyfail.html': self._results_from_string('FFFF'),
                 }
             }
         }
-        output = expectations._generate_expectations_string(test_data)
-        expected_output = """Bug(auto) foo/veryflaky.html [ Failure Pass ]"""
-        self.assertMultiLineEqual(output, expected_output)
+        self._assert_expectations(test_data, {
+            'foo/veryflaky.html': sorted(["TEXT", "PASS"]),
+        }, only_ignore_very_flaky=True)
 
-        expectations = bot_test_expectations.BotTestExpectations(only_ignore_very_flaky=False)
-        output = expectations._generate_expectations_string(test_data)
-        expected_output = """Bug(auto) foo/veryflaky.html [ Failure Pass ]
-Bug(auto) foo/maybeflaky.html [ Failure Pass ]"""
-        self.assertMultiLineEqual(output, expected_output)
+        self._assert_expectations(test_data, {
+            'foo/veryflaky.html': sorted(["TEXT", "PASS"]),
+            'foo/maybeflaky.html': sorted(["TEXT", "PASS"]),
+        }, only_ignore_very_flaky=False)
 
     def test_all_failure_types(self):
-        expectations = bot_test_expectations.BotTestExpectations(only_ignore_very_flaky=True)
         test_data = {
             'tests': {
                 'foo': {
-                    'allfailures.html': {
-                        'results': [[1, 'F'], [1, 'P'], [1, 'F'], [1, 'P'],
-                            [1, 'C'], [1, 'N'], [1, 'C'], [1, 'N'],
-                            [1, 'T'], [1, 'X'], [1, 'T'], [1, 'X'],
-                            [1, 'I'], [1, 'Z'], [1, 'I'], [1, 'Z'],
-                            [1, 'O'], [1, 'C'], [1, 'O'], [1, 'C']]
-                    },
-                    'imageplustextflake.html': {
-                        'results': [[1, 'Z'], [1, 'P'], [1, 'Z'], [1, 'P']]
-                    },
+                    'allfailures.html': self._results_from_string('FPFPCNCNTXTXIZIZOCYOCY'),
+                    'imageplustextflake.html': self._results_from_string('ZPZPPPPPPPPPPPPPPPPP'),
                 }
             }
         }
-        output = expectations._generate_expectations_string(test_data)
-        expected_output = """Bug(auto) foo/imageplustextflake.html [ Failure Pass ]
-Bug(auto) foo/allfailures.html [ Crash Missing ImageOnlyFailure Failure Timeout Pass ]"""
-        self.assertMultiLineEqual(output, expected_output)
+        self._assert_expectations(test_data, {
+            'foo/imageplustextflake.html': sorted(["IMAGE+TEXT", "PASS"]),
+            'foo/allfailures.html': sorted(["TEXT", "PASS", "IMAGE+TEXT", "TIMEOUT", "CRASH", "IMAGE", "MISSING"]),
+        }, only_ignore_very_flaky=True)
diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/json_layout_results_generator.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/json_layout_results_generator.py
deleted file mode 100644
index a635f6e..0000000
--- a/Tools/Scripts/webkitpy/layout_tests/layout_package/json_layout_results_generator.py
+++ /dev/null
@@ -1,173 +0,0 @@
-# Copyright (C) 2010 Google Inc. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-#     * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-#     * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-#     * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-import logging
-
-from webkitpy.layout_tests.layout_package import json_results_generator
-from webkitpy.layout_tests.models import test_expectations
-from webkitpy.layout_tests.models import test_failures
-
-class JSONLayoutResultsGenerator(json_results_generator.JSONResultsGeneratorBase):
-    """A JSON results generator for layout tests."""
-
-    LAYOUT_TESTS_PATH = "LayoutTests"
-
-    # Additional JSON fields.
-    WONTFIX = "wontfixCounts"
-
-    FAILURE_TO_CHAR = {test_expectations.PASS: json_results_generator.JSONResultsGeneratorBase.PASS_RESULT,
-                       test_expectations.SKIP: json_results_generator.JSONResultsGeneratorBase.SKIP_RESULT,
-                       test_expectations.CRASH: "C",
-                       test_expectations.TIMEOUT: "T",
-                       test_expectations.IMAGE: "I",
-                       test_expectations.TEXT: "F",
-                       test_expectations.AUDIO: "A",
-                       test_expectations.MISSING: "O",
-                       test_expectations.IMAGE_PLUS_TEXT: "Z"}
-
-    def __init__(self, port, builder_name, build_name, build_number,
-        results_file_base_path, builder_base_url,
-        expectations, run_results,
-        test_results_server=None, test_type="", master_name=""):
-        """Modifies the results.json file. Grabs it off the archive directory
-        if it is not found locally.
-
-        Args:
-          run_results: TestRunResults object storing the details of the test run.
-        """
-        super(JSONLayoutResultsGenerator, self).__init__(
-            port, builder_name, build_name, build_number, results_file_base_path,
-            builder_base_url, {}, port.repository_paths(),
-            test_results_server, test_type, master_name)
-
-        self._expectations = expectations
-
-        self._run_results = run_results
-        self._failures = dict((test_name, run_results.results_by_name[test_name].type) for test_name in run_results.failures_by_name)
-        self._test_timings = run_results.results_by_name
-
-        self.generate_json_output()
-
-    def _get_path_relative_to_layout_test_root(self, test):
-        """Returns the path of the test relative to the layout test root.
-        For example, for:
-          src/third_party/WebKit/LayoutTests/fast/forms/foo.html
-        We would return
-          fast/forms/foo.html
-        """
-        index = test.find(self.LAYOUT_TESTS_PATH)
-        if index is not -1:
-            index += len(self.LAYOUT_TESTS_PATH)
-
-        if index is -1:
-            # Already a relative path.
-            relativePath = test
-        else:
-            relativePath = test[index + 1:]
-
-        # Make sure all paths are unix-style.
-        return relativePath.replace('\\', '/')
-
-    # override
-    def _get_test_timing(self, test_name):
-        if test_name in self._test_timings:
-            # Floor for now to get time in seconds.
-            return int(self._test_timings[test_name].test_run_time)
-        return 0
-
-    # override
-    def _get_failed_test_names(self):
-        return set(self._failures.keys())
-
-    # override
-    def _get_modifier_char(self, test_name):
-        if test_name not in self._run_results.results_by_name:
-            return self.NO_DATA_RESULT
-
-        if test_name in self._failures:
-            return self.FAILURE_TO_CHAR[self._failures[test_name]]
-
-        return self.PASS_RESULT
-
-    # override
-    def _get_result_char(self, test_name):
-        return self._get_modifier_char(test_name)
-
-    # override
-    def _insert_failure_summaries(self, results_for_builder):
-        run_results = self._run_results
-
-        self._insert_item_into_raw_list(results_for_builder,
-            len((set(run_results.failures_by_name.keys()) |
-                run_results.tests_by_expectation[test_expectations.SKIP]) &
-                run_results.tests_by_timeline[test_expectations.NOW]),
-            self.FIXABLE_COUNT)
-        self._insert_item_into_raw_list(results_for_builder,
-            self._get_failure_summary_entry(test_expectations.NOW),
-            self.FIXABLE)
-        self._insert_item_into_raw_list(results_for_builder,
-            len(self._expectations.get_tests_with_timeline(
-                test_expectations.NOW)), self.ALL_FIXABLE_COUNT)
-        self._insert_item_into_raw_list(results_for_builder,
-            self._get_failure_summary_entry(test_expectations.WONTFIX),
-            self.WONTFIX)
-
-    # override
-    def _normalize_results_json(self, test, test_name, tests):
-        super(JSONLayoutResultsGenerator, self)._normalize_results_json(
-            test, test_name, tests)
-
-        # Remove tests that don't exist anymore.
-        full_path = self._filesystem.join(self._port.layout_tests_dir(), test_name)
-        full_path = self._filesystem.normpath(full_path)
-        if not self._filesystem.exists(full_path):
-            del tests[test_name]
-
-    def _get_failure_summary_entry(self, timeline):
-        """Creates a summary object to insert into the JSON.
-
-        Args:
-          timeline  current test_expectations timeline to build entry for
-                    (e.g., test_expectations.NOW, etc.)
-        """
-        entry = {}
-        run_results = self._run_results
-        timeline_tests = run_results.tests_by_timeline[timeline]
-        entry[self.SKIP_RESULT] = len(
-            run_results.tests_by_expectation[test_expectations.SKIP] &
-            timeline_tests)
-        entry[self.PASS_RESULT] = len(
-            run_results.tests_by_expectation[test_expectations.PASS] &
-            timeline_tests)
-        for failure_type in run_results.tests_by_expectation.keys():
-            if failure_type not in self.FAILURE_TO_CHAR:
-                continue
-            count = len(run_results.tests_by_expectation[failure_type] &
-                        timeline_tests)
-            entry[self.FAILURE_TO_CHAR[failure_type]] = count
-        return entry
diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator.py
index a18bc99..d2ce514 100644
--- a/Tools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator.py
+++ b/Tools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator.py
@@ -37,8 +37,15 @@
 from webkitpy.common.checkout.scm.detection import SCMDetector
 from webkitpy.common.net.file_uploader import FileUploader
 
-# A JSON results generator for generic tests.
-# FIXME: move this code out of the layout_package directory.
+# FIXME: This is the left-overs from when we used to generate JSON here.
+# What's still used by webkitpy is just a group of functions used by a
+# hodge-podge of different classes. Those functions should be move to where they are
+# used and this file should just go away entirely.
+#
+# Unfortunately, a big chunk of this file is used by
+# chromium/src/build/android/pylib/utils/flakiness_dashboard_results_uploader.py
+# so we can't delete it until that code is migrated over.
+# See crbug.com/242206
 
 _log = logging.getLogger(__name__)
 
diff --git a/Tools/Scripts/webkitpy/layout_tests/lint_test_expectations_unittest.py b/Tools/Scripts/webkitpy/layout_tests/lint_test_expectations_unittest.py
index 4728029..df51d5d 100644
--- a/Tools/Scripts/webkitpy/layout_tests/lint_test_expectations_unittest.py
+++ b/Tools/Scripts/webkitpy/layout_tests/lint_test_expectations_unittest.py
@@ -47,6 +47,9 @@
         self.host.ports_parsed.append(self.name)
         return {self.path: ''}
 
+    def bot_expectations(self):
+        return {}
+
     def skipped_layout_tests(self, _):
         return set([])
 
diff --git a/Tools/Scripts/webkitpy/layout_tests/models/test_expectations.py b/Tools/Scripts/webkitpy/layout_tests/models/test_expectations.py
index fd35b44..3d9e3cf 100644
--- a/Tools/Scripts/webkitpy/layout_tests/models/test_expectations.py
+++ b/Tools/Scripts/webkitpy/layout_tests/models/test_expectations.py
@@ -48,6 +48,10 @@
 # FIXME: Perhas these two routines should be part of the Port instead?
 BASELINE_SUFFIX_LIST = ('png', 'wav', 'txt')
 
+WEBKIT_BUG_PREFIX = 'webkit.org/b/'
+CHROMIUM_BUG_PREFIX = 'crbug.com/'
+V8_BUG_PREFIX = 'code.google.com/p/v8/issues/detail?id='
+NAMED_BUG_PREFIX = 'Bug('
 
 class ParseError(Exception):
     def __init__(self, warnings):
@@ -64,9 +68,6 @@
 class TestExpectationParser(object):
     """Provides parsing facilities for lines in the test_expectation.txt file."""
 
-    DUMMY_BUG_MODIFIER = "bug_dummy"
-    BUG_MODIFIER_PREFIX = 'bug'
-    BUG_MODIFIER_REGEX = 'bug\d+'
     REBASELINE_MODIFIER = 'rebaseline'
     PASS_EXPECTATION = 'pass'
     SKIP_MODIFIER = 'skip'
@@ -93,21 +94,30 @@
             expectation_lines.append(test_expectation)
         return expectation_lines
 
+    def _create_expectation_line(self, test_name, expectations, file_name):
+        expectation_line = TestExpectationLine()
+        expectation_line.original_string = test_name
+        expectation_line.name = test_name
+        expectation_line.filename = file_name
+        expectation_line.line_number = 0
+        expectation_line.expectations = expectations
+        return expectation_line
+
+    def expectation_line_for_test(self, test_name, expectations):
+        expectation_line = self._create_expectation_line(test_name, expectations, '<Bot TestExpectations>')
+        self._parse_line(expectation_line)
+        return expectation_line
+
+
     def expectation_for_skipped_test(self, test_name):
         if not self._port.test_exists(test_name):
             _log.warning('The following test %s from the Skipped list doesn\'t exist' % test_name)
-        expectation_line = TestExpectationLine()
-        expectation_line.original_string = test_name
-        expectation_line.modifiers = [TestExpectationParser.DUMMY_BUG_MODIFIER, TestExpectationParser.SKIP_MODIFIER]
+        expectation_line = self._create_expectation_line(test_name, [TestExpectationParser.PASS_EXPECTATION], '<Skipped file>')
         # FIXME: It's not clear what the expectations for a skipped test should be; the expectations
         # might be different for different entries in a Skipped file, or from the command line, or from
         # only running parts of the tests. It's also not clear if it matters much.
-        expectation_line.modifiers.append(TestExpectationParser.WONTFIX_MODIFIER)
-        expectation_line.name = test_name
-        # FIXME: we should pass in a more descriptive string here.
-        expectation_line.filename = '<Skipped file>'
-        expectation_line.line_number = 0
-        expectation_line.expectations = [TestExpectationParser.PASS_EXPECTATION]
+        expectation_line.modifiers = [TestExpectationParser.SKIP_MODIFIER, TestExpectationParser.WONTFIX_MODIFIER]
+        expectation_line.is_skipped_outside_expectations_file = True
         self._parse_line(expectation_line)
         return expectation_line
 
@@ -140,19 +150,19 @@
         if self.SLOW_MODIFIER in modifiers and self.TIMEOUT_EXPECTATION in expectations:
             expectation_line.warnings.append('A test can not be both SLOW and TIMEOUT. If it times out indefinitely, then it should be just TIMEOUT.')
 
-        for modifier in modifiers:
-            if modifier in TestExpectations.MODIFIERS:
-                expectation_line.parsed_modifiers.append(modifier)
-                if modifier == self.WONTFIX_MODIFIER:
-                    has_wontfix = True
-            elif modifier.startswith(self.BUG_MODIFIER_PREFIX):
+        for modifier in expectation_line.modifiers:
+            if modifier.startswith(WEBKIT_BUG_PREFIX) or modifier.startswith(CHROMIUM_BUG_PREFIX) or modifier.startswith(V8_BUG_PREFIX) or modifier.startswith(NAMED_BUG_PREFIX):
                 has_bugid = True
-                if re.match(self.BUG_MODIFIER_REGEX, modifier):
-                    expectation_line.warnings.append('BUG\d+ is not allowed, must be one of BUGCR\d+, BUGWK\d+, BUGV8_\d+, or a non-numeric bug identifier.')
-                else:
-                    expectation_line.parsed_bug_modifiers.append(modifier)
+                expectation_line.parsed_bug_modifiers.append(modifier)
             else:
-                parsed_specifiers.add(modifier)
+                # FIXME: Store the unmodified modifier.
+                modifier = modifier.lower()
+                if modifier in TestExpectations.MODIFIERS:
+                    expectation_line.parsed_modifiers.append(modifier)
+                    if modifier == self.WONTFIX_MODIFIER:
+                        has_wontfix = True
+                else:
+                    parsed_specifiers.add(modifier)
 
         if not expectation_line.parsed_bug_modifiers and not has_wontfix and not has_bugid and self._port.warn_if_bug_missing_in_test_expectations():
             expectation_line.warnings.append(self.MISSING_BUG_WARNING)
@@ -275,36 +285,29 @@
         expectations = []
         warnings = []
 
-        WEBKIT_BUG_PREFIX = 'webkit.org/b/'
-        CHROMIUM_BUG_PREFIX = 'crbug.com/'
-        V8_BUG_PREFIX = 'code.google.com/p/v8/issues/detail?id='
-
         tokens = remaining_string.split()
         state = 'start'
         for token in tokens:
             if (token.startswith(WEBKIT_BUG_PREFIX) or
                 token.startswith(CHROMIUM_BUG_PREFIX) or
                 token.startswith(V8_BUG_PREFIX) or
-                token.startswith('Bug(')):
+                token.startswith(NAMED_BUG_PREFIX)):
                 if state != 'start':
                     warnings.append('"%s" is not at the start of the line.' % token)
                     break
                 if token.startswith(WEBKIT_BUG_PREFIX):
-                    bugs.append(token.replace(WEBKIT_BUG_PREFIX, 'BUGWK'))
+                    bugs.append(token)
                 elif token.startswith(CHROMIUM_BUG_PREFIX):
-                    bugs.append(token.replace(CHROMIUM_BUG_PREFIX, 'BUGCR'))
+                    bugs.append(token)
                 elif token.startswith(V8_BUG_PREFIX):
-                    bugs.append(token.replace(V8_BUG_PREFIX, 'BUGV8_'))
+                    bugs.append(token)
                 else:
                     match = re.match('Bug\((\w+)\)$', token)
                     if not match:
                         warnings.append('unrecognized bug identifier "%s"' % token)
                         break
                     else:
-                        bugs.append('BUG' + match.group(1).upper())
-            elif token.startswith('BUG'):
-                warnings.append('unrecognized old-style bug identifier "%s"' % token)
-                break
+                        bugs.append(token)
             elif token == '[':
                 if state == 'start':
                     state = 'configuration'
@@ -389,6 +392,24 @@
         self.comment = None
         self.matching_tests = []
         self.warnings = []
+        self.is_skipped_outside_expectations_file = False
+
+    def __eq__(self, other):
+        return (self.original_string == other.original_string
+            and self.filename == other.filename
+            and self.line_number == other.line_number
+            and self.name == other.name
+            and self.path == other.path
+            and self.modifiers == other.modifiers
+            and self.parsed_modifiers == other.parsed_modifiers
+            and self.parsed_bug_modifiers == other.parsed_bug_modifiers
+            and self.matching_configurations == other.matching_configurations
+            and self.expectations == other.expectations
+            and self.parsed_expectations == other.parsed_expectations
+            and self.comment == other.comment
+            and self.matching_tests == other.matching_tests
+            and self.warnings == other.warnings
+            and self.is_skipped_outside_expectations_file == other.is_skipped_outside_expectations_file)
 
     def is_invalid(self):
         return self.warnings and self.warnings != [TestExpectationParser.MISSING_BUG_WARNING]
@@ -448,23 +469,28 @@
         return ' '.join(result)
 
     @staticmethod
+    def _filter_redundant_expectations(expectations):
+        if set(expectations) == set(['Pass', 'Skip']):
+            return ['Skip']
+        if set(expectations) == set(['Pass', 'Slow']):
+            return ['Slow']
+        return expectations
+
+    @staticmethod
     def _format_line(modifiers, name, expectations, comment, include_modifiers=True, include_expectations=True, include_comment=True):
         bugs = []
         new_modifiers = []
         new_expectations = []
         for modifier in modifiers:
-            modifier = modifier.upper()
-            if modifier.startswith('BUGWK'):
-                bugs.append('webkit.org/b/' + modifier.replace('BUGWK', ''))
-            elif modifier.startswith('BUGCR'):
-                bugs.append('crbug.com/' + modifier.replace('BUGCR', ''))
-            elif modifier.startswith('BUG'):
-                # FIXME: we should preserve case once we can drop the old syntax.
-                bugs.append('Bug(' + modifier[3:].lower() + ')')
-            elif modifier in ('SLOW', 'SKIP', 'REBASELINE', 'WONTFIX'):
-                new_expectations.append(TestExpectationParser._inverted_expectation_tokens.get(modifier))
+            if modifier.startswith(WEBKIT_BUG_PREFIX) or modifier.startswith(CHROMIUM_BUG_PREFIX) or modifier.startswith(V8_BUG_PREFIX) or modifier.startswith(NAMED_BUG_PREFIX):
+                bugs.append(modifier)
             else:
-                new_modifiers.append(TestExpectationParser._inverted_configuration_tokens.get(modifier, modifier))
+                # FIXME: Make this all work with the mixed-cased modifiers (e.g. WontFix, Slow, etc).
+                modifier = modifier.upper()
+                if modifier in ('SLOW', 'SKIP', 'REBASELINE', 'WONTFIX'):
+                    new_expectations.append(TestExpectationParser._inverted_expectation_tokens.get(modifier))
+                else:
+                    new_modifiers.append(TestExpectationParser._inverted_configuration_tokens.get(modifier, modifier))
 
         for expectation in expectations:
             expectation = expectation.upper()
@@ -477,7 +503,8 @@
             if new_modifiers:
                 result += '[ %s ] ' % ' '.join(new_modifiers)
         result += name
-        if include_expectations and new_expectations and set(new_expectations) != set(['Skip', 'Pass']):
+        if include_expectations and new_expectations:
+            new_expectations = TestExpectationLine._filter_redundant_expectations(new_expectations)
             result += ' [ %s ]' % ' '.join(sorted(set(new_expectations)))
         if include_comment and comment is not None:
             result += " #%s" % comment
@@ -572,6 +599,15 @@
     def get_expectations_string(self, test):
         """Returns the expectatons for the given test as an uppercase string.
         If there are no expectations for the test, then "PASS" is returned."""
+        if self.get_expectation_line(test).is_skipped_outside_expectations_file:
+            return 'NOTRUN'
+
+        if self.has_modifier(test, WONTFIX):
+            return TestExpectationParser.WONTFIX_MODIFIER.upper()
+
+        if self.has_modifier(test, SKIP):
+            return TestExpectationParser.SKIP_MODIFIER.upper()
+
         expectations = self.get_expectations(test)
         retval = []
 
@@ -585,6 +621,9 @@
         for item in TestExpectations.EXPECTATIONS.items():
             if item[1] == expectation:
                 return item[0].upper()
+        for item in TestExpectations.MODIFIERS.items():
+            if item[1] == expectation:
+                return item[0].upper()
         raise ValueError(expectation)
 
     def remove_expectation_line(self, test):
@@ -593,14 +632,14 @@
         self._clear_expectations_for_test(test)
         del self._test_to_expectation_line[test]
 
-    def add_expectation_line(self, expectation_line, in_skipped=False):
+    def add_expectation_line(self, expectation_line, override_existing_matches=False):
         """Returns a list of warnings encountered while matching modifiers."""
 
         if expectation_line.is_invalid():
             return
 
         for test in expectation_line.matching_tests:
-            if not in_skipped and self._already_seen_better_match(test, expectation_line):
+            if not override_existing_matches and self._already_seen_better_match(test, expectation_line):
                 continue
 
             self._clear_expectations_for_test(test)
@@ -875,7 +914,8 @@
             expectations_dict_index += 1
 
         # FIXME: move ignore_tests into port.skipped_layout_tests()
-        self.add_skipped_tests(port.skipped_layout_tests(tests).union(set(port.get_option('ignore_tests', []))))
+        self.add_extra_skipped_tests(port.skipped_layout_tests(tests).union(set(port.get_option('ignore_tests', []))))
+        self.add_flaky_expectations_from_bot()
 
         self._has_warnings = False
         self._report_warnings()
@@ -997,7 +1037,7 @@
             if self._model_all_expectations or self._test_config in expectation_line.matching_configurations:
                 self._model.add_expectation_line(expectation_line)
 
-    def add_skipped_tests(self, tests_to_skip):
+    def add_extra_skipped_tests(self, tests_to_skip):
         if not tests_to_skip:
             return
         for test in self._expectations:
@@ -1006,7 +1046,15 @@
 
         for test_name in tests_to_skip:
             expectation_line = self._parser.expectation_for_skipped_test(test_name)
-            self._model.add_expectation_line(expectation_line, in_skipped=True)
+            self._model.add_expectation_line(expectation_line, override_existing_matches=True)
+
+    def add_flaky_expectations_from_bot(self):
+        # FIXME: Right now, this will show the expectations entry in the flakiness dashboard rows for each test
+        # to be whatever the bot thinks they should be. Is this a good thing?
+        bot_expectations = self._port.bot_expectations()
+        for test_name in bot_expectations:
+            expectation_line = self._parser.expectation_line_for_test(test_name, bot_expectations[test_name])
+            self._model.add_expectation_line(expectation_line, override_existing_matches=True)
 
     def add_expectation_line(self, expectation_line):
         self._model.add_expectation_line(expectation_line)
diff --git a/Tools/Scripts/webkitpy/layout_tests/models/test_expectations_unittest.py b/Tools/Scripts/webkitpy/layout_tests/models/test_expectations_unittest.py
index 8e6ffdd..5c5972f 100644
--- a/Tools/Scripts/webkitpy/layout_tests/models/test_expectations_unittest.py
+++ b/Tools/Scripts/webkitpy/layout_tests/models/test_expectations_unittest.py
@@ -243,6 +243,24 @@
                                                      'failures/expected/text.html') in
                          self._exp.get_tests_with_result_type(SKIP))
 
+    def test_bot_test_expectations(self):
+        test_name = 'failures/expected/text.html'
+
+        expectations_dict = OrderedDict()
+        expectations_dict['expectations'] = "Bug(x) %s [ ImageOnlyFailure ]\n" % test_name
+        self._port.expectations_dict = lambda: expectations_dict
+
+        expectations = TestExpectations(self._port, self.get_basic_tests())
+        self.assertEqual(expectations.get_expectations(self.get_test(test_name)), set([IMAGE]))
+
+        def bot_expectations():
+            return {test_name: ['PASS', 'IMAGE']}
+        self._port.bot_expectations = bot_expectations
+        self._port._options.ignore_flaky = 'very-flaky'
+
+        expectations = TestExpectations(self._port, self.get_basic_tests())
+        self.assertEqual(expectations.get_expectations(self.get_test(test_name)), set([PASS, IMAGE]))
+
 
 class SkippedTests(Base):
     def check(self, expectations, overrides, skips, lint=False):
@@ -257,9 +275,9 @@
         expectations_to_lint = expectations_dict if lint else None
         exp = TestExpectations(port, ['failures/expected/text.html'], expectations_to_lint=expectations_to_lint)
 
-        # Check that the expectation is for BUG_DUMMY SKIP : ... [ Pass ]
+        # Check that the expectation is for SKIP : ... [ Pass ]
         self.assertEqual(exp.get_modifiers('failures/expected/text.html'),
-                          [TestExpectationParser.DUMMY_BUG_MODIFIER, TestExpectationParser.SKIP_MODIFIER, TestExpectationParser.WONTFIX_MODIFIER])
+                          [TestExpectationParser.SKIP_MODIFIER, TestExpectationParser.WONTFIX_MODIFIER])
         self.assertEqual(exp.get_expectations('failures/expected/text.html'), set([PASS]))
 
     def test_skipped_tests_work(self):
@@ -296,6 +314,12 @@
         _, _, logs = capture.restore_output()
         self.assertEqual('The following test foo/bar/baz.html from the Skipped list doesn\'t exist\n', logs)
 
+    def test_expectations_string(self):
+        self.parse_exp(self.get_basic_expectations())
+        notrun = 'failures/expected/text.html'
+        self._exp.add_extra_skipped_tests([notrun])
+        self.assertEqual('NOTRUN', self._exp.get_expectations_string(notrun))
+
 
 class ExpectationSyntaxTests(Base):
     def test_unrecognized_expectation(self):
@@ -326,10 +350,10 @@
         self.assert_tokenize_exp('foo.html', modifiers=['SKIP'], expectations=['PASS'])
 
     def test_bare_name_and_bugs(self):
-        self.assert_tokenize_exp('webkit.org/b/12345 foo.html', modifiers=['BUGWK12345', 'SKIP'], expectations=['PASS'])
-        self.assert_tokenize_exp('crbug.com/12345 foo.html', modifiers=['BUGCR12345', 'SKIP'], expectations=['PASS'])
-        self.assert_tokenize_exp('Bug(dpranke) foo.html', modifiers=['BUGDPRANKE', 'SKIP'], expectations=['PASS'])
-        self.assert_tokenize_exp('crbug.com/12345 crbug.com/34567 foo.html', modifiers=['BUGCR12345', 'BUGCR34567', 'SKIP'], expectations=['PASS'])
+        self.assert_tokenize_exp('webkit.org/b/12345 foo.html', modifiers=['webkit.org/b/12345', 'SKIP'], expectations=['PASS'])
+        self.assert_tokenize_exp('crbug.com/12345 foo.html', modifiers=['crbug.com/12345', 'SKIP'], expectations=['PASS'])
+        self.assert_tokenize_exp('Bug(dpranke) foo.html', modifiers=['Bug(dpranke)', 'SKIP'], expectations=['PASS'])
+        self.assert_tokenize_exp('crbug.com/12345 crbug.com/34567 foo.html', modifiers=['crbug.com/12345', 'crbug.com/34567', 'SKIP'], expectations=['PASS'])
 
     def test_comments(self):
         self.assert_tokenize_exp("# comment", name=None, comment="# comment")
@@ -374,10 +398,10 @@
 
     def test_bad_bugid(self):
         try:
-            self.parse_exp('BUG1234 failures/expected/text.html [ Failure ]', is_lint_mode=True)
+            self.parse_exp('crbug/1234 failures/expected/text.html [ Failure ]', is_lint_mode=True)
             self.fail('should have raised an error about a bad bug identifier')
         except ParseError, exp:
-            self.assertEqual(len(exp.warnings), 1)
+            self.assertEqual(len(exp.warnings), 2)
 
     def test_missing_bugid(self):
         self.parse_exp('failures/expected/text.html [ Failure ]')
@@ -563,6 +587,30 @@
         self.assertEqual(len(self._exp.get_rebaselining_failures()), 0)
 
 
+class TestExpectationsParserTests(unittest.TestCase):
+    def __init__(self, testFunc):
+        host = MockHost()
+        test_port = host.port_factory.get('test-win-xp', None)
+        self._converter = TestConfigurationConverter(test_port.all_test_configurations(), test_port.configuration_specifier_macros())
+        unittest.TestCase.__init__(self, testFunc)
+        self._parser = TestExpectationParser(host.port_factory.get('test-win-xp', None), [], allow_rebaseline_modifier=False)
+
+    def test_expectation_line_for_test(self):
+        # This is kind of a silly test, but it at least ensures that we don't throw an error.
+        test_name = 'foo/test.html'
+        expectations = set(["PASS", "IMAGE"])
+
+        expectation_line = TestExpectationLine()
+        expectation_line.original_string = test_name
+        expectation_line.name = test_name
+        expectation_line.filename = '<Bot TestExpectations>'
+        expectation_line.line_number = 0
+        expectation_line.expectations = expectations
+        self._parser._parse_line(expectation_line)
+
+        self.assertEqual(self._parser.expectation_line_for_test(test_name, expectations), expectation_line)
+
+
 class TestExpectationSerializationTests(unittest.TestCase):
     def __init__(self, testFunc):
         host = MockHost()
@@ -620,7 +668,7 @@
 
     def test_parsed_to_string(self):
         expectation_line = TestExpectationLine()
-        expectation_line.parsed_bug_modifiers = ['BUGX']
+        expectation_line.parsed_bug_modifiers = ['Bug(x)']
         expectation_line.name = 'test/name/for/realz.html'
         expectation_line.parsed_expectations = set([IMAGE])
         self.assertEqual(expectation_line.to_string(self._converter), None)
@@ -663,7 +711,6 @@
 
     def test_string_roundtrip(self):
         self.assert_round_trip('')
-        self.assert_round_trip('FOO')
         self.assert_round_trip('[')
         self.assert_round_trip('FOO [')
         self.assert_round_trip('FOO ] bar')
@@ -700,7 +747,7 @@
         def add_line(matching_configurations, reconstitute):
             expectation_line = TestExpectationLine()
             expectation_line.original_string = "Nay"
-            expectation_line.parsed_bug_modifiers = ['BUGX']
+            expectation_line.parsed_bug_modifiers = ['Bug(x)']
             expectation_line.name = 'Yay'
             expectation_line.parsed_expectations = set([IMAGE])
             expectation_line.matching_configurations = matching_configurations
diff --git a/Tools/Scripts/webkitpy/layout_tests/models/test_run_results.py b/Tools/Scripts/webkitpy/layout_tests/models/test_run_results.py
index 6e14eb6..5d78c60 100644
--- a/Tools/Scripts/webkitpy/layout_tests/models/test_run_results.py
+++ b/Tools/Scripts/webkitpy/layout_tests/models/test_run_results.py
@@ -89,9 +89,10 @@
 
 
 class RunDetails(object):
-    def __init__(self, exit_code, summarized_results=None, initial_results=None, retry_results=None, enabled_pixel_tests_in_retry=False):
+    def __init__(self, exit_code, summarized_full_results=None, summarized_failing_results=None, initial_results=None, retry_results=None, enabled_pixel_tests_in_retry=False):
         self.exit_code = exit_code
-        self.summarized_results = summarized_results
+        self.summarized_full_results = summarized_full_results
+        self.summarized_failing_results = summarized_failing_results
         self.initial_results = initial_results
         self.retry_results = retry_results
         self.enabled_pixel_tests_in_retry = enabled_pixel_tests_in_retry
@@ -119,7 +120,7 @@
     return test_dict
 
 
-def summarize_results(port_obj, expectations, initial_results, retry_results, enabled_pixel_tests_in_retry):
+def summarize_results(port_obj, expectations, initial_results, retry_results, enabled_pixel_tests_in_retry, only_include_failing=False):
     """Returns a dictionary containing a summary of the test runs, with the following fields:
         'version': a version indicator
         'fixable': The number of fixable tests (NOW - PASS)
@@ -166,24 +167,12 @@
         result_type = result.type
         actual = [keywords[result_type]]
 
-        test_dict = {}
-
-        rounded_run_time = round(result.test_run_time, 1)
-        if rounded_run_time:
-            test_dict['time'] = rounded_run_time
-
-        if result.has_stderr:
-            test_dict['has_stderr'] = True
-
-        if result.reftest_type:
-            test_dict.update(reftest_type=list(result.reftest_type))
-
-        if expectations.has_modifier(test_name, test_expectations.WONTFIX):
-            test_dict['wontfix'] = True
+        if only_include_failing and result.type == test_expectations.SKIP:
+            continue
 
         if result_type == test_expectations.PASS:
             num_passes += 1
-            if expected == 'PASS' and result.test_run_time < 1 and not result.has_stderr:
+            if not result.has_stderr and only_include_failing:
                 continue
         elif result_type == test_expectations.CRASH:
             if test_name in initial_results.unexpected_results_by_name:
@@ -208,6 +197,22 @@
             else:
                 num_regressions += 1
 
+        test_dict = {}
+
+        rounded_run_time = round(result.test_run_time, 1)
+        if rounded_run_time:
+            test_dict['time'] = rounded_run_time
+
+        if result.has_stderr:
+            test_dict['has_stderr'] = True
+
+        bugs = expectations.model().get_expectation_line(test_name).parsed_bug_modifiers
+        if bugs:
+            test_dict['bugs'] = bugs
+
+        if result.reftest_type:
+            test_dict.update(reftest_type=list(result.reftest_type))
+
         test_dict['expected'] = expected
         test_dict['actual'] = " ".join(actual)
 
diff --git a/Tools/Scripts/webkitpy/layout_tests/models/test_run_results_unittest.py b/Tools/Scripts/webkitpy/layout_tests/models/test_run_results_unittest.py
index 126a5ec..de50534 100644
--- a/Tools/Scripts/webkitpy/layout_tests/models/test_run_results_unittest.py
+++ b/Tools/Scripts/webkitpy/layout_tests/models/test_run_results_unittest.py
@@ -46,17 +46,19 @@
     return test_results.TestResult(test_name, failures=failures, test_run_time=run_time)
 
 
-def run_results(port):
+def run_results(port, extra_skipped_tests=[]):
     tests = ['passes/text.html', 'failures/expected/timeout.html', 'failures/expected/crash.html', 'failures/expected/hang.html',
              'failures/expected/audio.html', 'passes/skipped/skip.html']
     expectations = test_expectations.TestExpectations(port, tests)
+    if extra_skipped_tests:
+        expectations.add_extra_skipped_tests(extra_skipped_tests)
     return test_run_results.TestRunResults(expectations, len(tests))
 
 
-def summarized_results(port, expected, passing, flaky):
+def summarized_results(port, expected, passing, flaky, only_include_failing=False, extra_skipped_tests=[]):
     test_is_slow = False
 
-    initial_results = run_results(port)
+    initial_results = run_results(port, extra_skipped_tests)
     if expected:
         initial_results.add(get_result('passes/text.html', test_expectations.PASS), expected, test_is_slow)
         initial_results.add(get_result('failures/expected/audio.html', test_expectations.AUDIO), expected, test_is_slow)
@@ -81,14 +83,14 @@
         initial_results.add(get_result('failures/expected/hang.html', test_expectations.TIMEOUT), expected, test_is_slow)
 
     if flaky:
-        retry_results = run_results(port)
+        retry_results = run_results(port, extra_skipped_tests)
         retry_results.add(get_result('passes/text.html'), True, test_is_slow)
         retry_results.add(get_result('failures/expected/timeout.html'), True, test_is_slow)
         retry_results.add(get_result('failures/expected/crash.html'), True, test_is_slow)
     else:
         retry_results = None
 
-    return test_run_results.summarize_results(port, initial_results.expectations, initial_results, retry_results, enabled_pixel_tests_in_retry=False)
+    return test_run_results.summarize_results(port, initial_results.expectations, initial_results, retry_results, enabled_pixel_tests_in_retry=False, only_include_failing=only_include_failing)
 
 
 class InterpretTestFailuresTest(unittest.TestCase):
@@ -143,10 +145,21 @@
         summary = summarized_results(self.port, expected=False, passing=False, flaky=False)
         self.assertNotEquals(summary['blink_revision'], '')
 
+    def test_bug_entry(self):
+        self.port._options.builder_name = 'dummy builder'
+        summary = summarized_results(self.port, expected=False, passing=True, flaky=False)
+        self.assertEquals(summary['tests']['passes']['skipped']['skip.html']['bugs'], ['Bug(test)'])
+
+    def test_extra_skipped_tests(self):
+        self.port._options.builder_name = 'dummy builder'
+        summary = summarized_results(self.port, expected=False, passing=True, flaky=False, extra_skipped_tests=['passes/text.html'])
+        self.assertEquals(summary['tests']['passes']['text.html']['expected'], 'NOTRUN')
+        self.assertNotIn('bugs', summary['tests']['passes']['text.html'])
+
     def test_summarized_results_wontfix(self):
         self.port._options.builder_name = 'dummy builder'
         summary = summarized_results(self.port, expected=False, passing=False, flaky=False)
-        self.assertTrue(summary['tests']['failures']['expected']['hang.html']['wontfix'])
+        self.assertEquals(summary['tests']['failures']['expected']['hang.html']['expected'], 'WONTFIX')
         self.assertTrue(summary['tests']['passes']['text.html']['is_unexpected'])
 
     def test_summarized_results_expected_pass(self):
@@ -155,10 +168,23 @@
         self.assertTrue(summary['tests']['passes']['text.html'])
         self.assertTrue('is_unexpected' not in summary['tests']['passes']['text.html'])
 
+    def test_summarized_results_expected_only_include_failing(self):
+        self.port._options.builder_name = 'dummy builder'
+        summary = summarized_results(self.port, expected=True, passing=False, flaky=False, only_include_failing=True)
+        self.assertNotIn('passes', summary['tests'])
+        self.assertTrue(summary['tests']['failures']['expected']['audio.html'])
+        self.assertTrue(summary['tests']['failures']['expected']['timeout.html'])
+        self.assertTrue(summary['tests']['failures']['expected']['crash.html'])
+
     def test_summarized_results_skipped(self):
         self.port._options.builder_name = 'dummy builder'
         summary = summarized_results(self.port, expected=False, passing=True, flaky=False)
-        self.assertTrue(summary['tests']['passes']['skipped']['skip.html'])
+        self.assertEquals(summary['tests']['passes']['skipped']['skip.html']['expected'], 'SKIP')
+
+    def test_summarized_results_only_inlude_failing(self):
+        self.port._options.builder_name = 'dummy builder'
+        summary = summarized_results(self.port, expected=False, passing=True, flaky=False, only_include_failing=True)
+        self.assertTrue('passes' not in summary['tests'])
 
     def test_rounded_run_times(self):
         summary = summarized_results(self.port, expected=False, passing=False, flaky=False)
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/base.py b/Tools/Scripts/webkitpy/layout_tests/port/base.py
index 82a18be..a8805a5 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/base.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/base.py
@@ -54,7 +54,7 @@
 from webkitpy.common.system.executive import ScriptError
 from webkitpy.common.system.systemhost import SystemHost
 from webkitpy.common.webkit_finder import WebKitFinder
-from webkitpy.layout_tests.layout_package.bot_test_expectations import BotTestExpectations
+from webkitpy.layout_tests.layout_package.bot_test_expectations import BotTestExpecationsFactory
 from webkitpy.layout_tests.models.test_configuration import TestConfiguration
 from webkitpy.layout_tests.port import config as port_config
 from webkitpy.layout_tests.port import driver
@@ -379,24 +379,12 @@
                                     actual_filename)
         return ''.join(diff)
 
-    def check_for_leaks(self, process_name, process_pid):
-        # Subclasses should check for leaks in the running process
-        # and print any necessary warnings if leaks are found.
-        # FIXME: We should consider moving much of this logic into
-        # Executive and make it platform-specific instead of port-specific.
-        pass
-
-    def print_leaks_summary(self):
-        # Subclasses can override this to print a summary of leaks found
-        # while running the layout tests.
-        pass
-
     def driver_name(self):
         if self.get_option('driver_name'):
             return self.get_option('driver_name')
-        if self.get_option('content_shell'):
-            return self.CONTENT_SHELL_NAME
-        return 'DumpRenderTree'
+        if self.get_option('dump_render_tree'):
+            return 'DumpRenderTree'
+        return self.CONTENT_SHELL_NAME
 
     def expected_baselines_by_extension(self, test_name):
         """Returns a dict mapping baseline suffix to relative path for each baseline in
@@ -577,33 +565,19 @@
         return reftest_list.get(self._filesystem.join(self.layout_tests_dir(), test_name), [])  # pylint: disable=E1103
 
     def tests(self, paths):
-        """Return the list of tests found. Both generic and platform-specific tests matching paths should be returned."""
-        expanded_paths = self._expanded_paths(paths)
-        tests = self._real_tests(expanded_paths)
-        tests.extend(self._virtual_tests(expanded_paths, self.populated_virtual_test_suites()))
+        """Return the list of tests found matching paths."""
+        tests = self._real_tests(paths)
+        tests.extend(self._virtual_tests(paths, self.populated_virtual_test_suites()))
         return tests
 
-    def _expanded_paths(self, paths):
-        expanded_paths = []
-        fs = self._filesystem
-        all_platform_dirs = [path for path in fs.glob(fs.join(self.layout_tests_dir(), 'platform', '*')) if fs.isdir(path)]
-        for path in paths:
-            expanded_paths.append(path)
-            if self.test_isdir(path) and not path.startswith('platform'):
-                for platform_dir in all_platform_dirs:
-                    if fs.isdir(fs.join(platform_dir, path)) and platform_dir in self.baseline_search_path():
-                        expanded_paths.append(self.relative_test_filename(fs.join(platform_dir, path)))
-
-        return expanded_paths
-
     def _real_tests(self, paths):
         # When collecting test cases, skip these directories
-        skipped_directories = set(['.svn', '_svn', 'resources', 'script-tests', 'reference', 'reftest'])
+        skipped_directories = set(['.svn', '_svn', 'platform', 'resources', 'script-tests', 'reference', 'reftest'])
         files = find_files.find(self._filesystem, self.layout_tests_dir(), paths, skipped_directories, Port.is_test_file, self.test_key)
         return [self.relative_test_filename(f) for f in files]
 
     # When collecting test cases, we include any file with these extensions.
-    _supported_file_extensions = set(['.html', '.xml', '.xhtml', '.pl',
+    _supported_file_extensions = set(['.html', '.xml', '.xhtml', '.xht', '.pl',
                                       '.htm', '.php', '.svg', '.mht'])
 
     @staticmethod
@@ -732,7 +706,7 @@
 
     def skipped_layout_tests(self, test_list):
         """Returns tests skipped outside of the TestExpectations files."""
-        return set(self._tests_for_other_platforms()).union(self._skipped_tests_for_unsupported_features(test_list))
+        return set(self._skipped_tests_for_unsupported_features(test_list))
 
     def _tests_from_skipped_file_contents(self, skipped_file_contents):
         tests_to_skip = []
@@ -906,7 +880,7 @@
 
     def create_driver(self, worker_number, no_timeout=False):
         """Return a newly created Driver subclass for starting/stopping the test driver."""
-        return driver.DriverProxy(self, worker_number, self._driver_class(), pixel_tests=self.get_option('pixel_tests'), no_timeout=no_timeout)
+        return self._driver_class()(self, worker_number, pixel_tests=self.get_option('pixel_tests'), no_timeout=no_timeout)
 
     def start_helper(self):
         """If a port needs to reconfigure graphics settings or do other
@@ -1038,12 +1012,6 @@
         # FIXME: rename this to test_expectations() once all the callers are updated to know about the ordered dict.
         expectations = OrderedDict()
 
-        ignore_flaky_tests = self.get_option('ignore_flaky_tests')
-        if ignore_flaky_tests == 'very-flaky' or ignore_flaky_tests == 'maybe-flaky':
-            ignore_only_very_flaky = self.get_option('ignore_flaky_tests') == 'very-flaky'
-            full_port_name = self.determine_full_port_name(self.host, self._options, self.port_name)
-            expectations['autogenerated'] = BotTestExpectations(ignore_only_very_flaky).expectations_string(full_port_name)
-
         for path in self.expectations_files():
             if self._filesystem.exists(path):
                 expectations[path] = self._filesystem.read_text_file(path)
@@ -1057,6 +1025,18 @@
                 _log.warning("additional_expectations path '%s' does not exist" % path)
         return expectations
 
+    def bot_expectations(self):
+        if not self.get_option('ignore_flaky_tests'):
+            return {}
+
+        full_port_name = self.determine_full_port_name(self.host, self._options, self.port_name)
+        ignore_only_very_flaky = self.get_option('ignore_flaky_tests') == 'very-flaky'
+        factory = BotTestExpecationsFactory()
+        expectations = factory.expectations_for_port(full_port_name)
+        if not expectations:
+            return {}
+        return expectations.flakes_by_path(ignore_only_very_flaky)
+
     def _port_specific_expectations_files(self):
         # Unlike baseline_search_path, we only want to search [WK2-PORT, PORT-VERSION, PORT] and any directories
         # included via --additional-platform-directory, not the full casade.
@@ -1368,61 +1348,8 @@
             return False
         if self._options.pixel_test_directories:
             return any(test_input.test_name.startswith(directory) for directory in self._options.pixel_test_directories)
-        return self._should_run_as_pixel_test(test_input)
-
-    def _should_run_as_pixel_test(self, test_input):
-        # Default behavior is to allow all test to run as pixel tests if --pixel-tests is on and
-        # --pixel-test-directory is not specified.
         return True
 
-    # FIXME: Eventually we should standarize port naming, and make this method smart enough
-    # to use for all port configurations (including architectures, graphics types, etc).
-    def _port_flag_for_scripts(self):
-        # This is overrriden by ports which need a flag passed to scripts to distinguish the use of that port.
-        # For example --qt on linux, since a user might have both Gtk and Qt libraries installed.
-        # FIXME: Chromium should override this once ChromiumPort is a WebKitPort.
-        return None
-
-    # This is modeled after webkitdirs.pm argumentsForConfiguration() from old-run-webkit-tests
-    def _arguments_for_configuration(self):
-        config_args = []
-        config_args.append(self._config.flag_for_configuration(self.get_option('configuration')))
-        # FIXME: We may need to add support for passing --32-bit like old-run-webkit-tests had.
-        port_flag = self._port_flag_for_scripts()
-        if port_flag:
-            config_args.append(port_flag)
-        return config_args
-
-    def _run_script(self, script_name, args=None, include_configuration_arguments=True, decode_output=True, env=None):
-        run_script_command = [self.path_to_script(script_name)]
-        if include_configuration_arguments:
-            run_script_command.extend(self._arguments_for_configuration())
-        if args:
-            run_script_command.extend(args)
-        output = self._executive.run_command(run_script_command, cwd=self.webkit_base(), decode_output=decode_output, env=env)
-        _log.debug('Output of %s:\n%s' % (run_script_command, output))
-        return output
-
-    def _tests_for_other_platforms(self):
-        # By default we will skip any directory under LayoutTests/platform
-        # that isn't in our baseline search path (this mirrors what
-        # old-run-webkit-tests does in findTestsToRun()).
-        # Note this returns LayoutTests/platform/*, not platform/*/*.
-        entries = self._filesystem.glob(self._webkit_baseline_path('*'))
-        dirs_to_skip = []
-        for entry in entries:
-            if self._filesystem.isdir(entry) and entry not in self.baseline_search_path():
-                basename = self._filesystem.basename(entry)
-                dirs_to_skip.append('platform/%s' % basename)
-        return dirs_to_skip
-
-    def _runtime_feature_list(self):
-        """If a port makes certain features available only through runtime flags, it can override this routine to indicate which ones are available."""
-        return None
-
-    def nm_command(self):
-        return 'nm'
-
     def _modules_to_search_for_symbols(self):
         path = self._path_to_webcore_library()
         if path:
@@ -1433,39 +1360,20 @@
         symbols = ''
         for path_to_module in self._modules_to_search_for_symbols():
             try:
-                symbols += self._executive.run_command([self.nm_command(), path_to_module], error_handler=self._executive.ignore_error)
+                symbols += self._executive.run_command(['nm', path_to_module], error_handler=self._executive.ignore_error)
             except OSError, e:
                 _log.warn("Failed to run nm: %s.  Can't determine supported features correctly." % e)
         return symbols
 
-    # Ports which use run-time feature detection should define this method and return
-    # a dictionary mapping from Feature Names to skipped directoires.  NRWT will
-    # run DumpRenderTree --print-supported-features and parse the output.
-    # If the Feature Names are not found in the output, the corresponding directories
-    # will be skipped.
-    def _missing_feature_to_skipped_tests(self):
-        """Return the supported feature dictionary. Keys are feature names and values
-        are the lists of directories to skip if the feature name is not matched."""
-        # FIXME: This list matches WebKitWin and should be moved onto the Win port.
-        return {
-            "Accelerated Compositing": ["compositing"],
-            "3D Rendering": ["animations/3d", "transforms/3d"],
-        }
-
     # Ports which use compile-time feature detection should define this method and return
     # a dictionary mapping from symbol substrings to possibly disabled test directories.
     # When the symbol substrings are not matched, the directories will be skipped.
     # If ports don't ever enable certain features, then those directories can just be
     # in the Skipped list instead of compile-time-checked here.
     def _missing_symbol_to_skipped_tests(self):
-        """Return the supported feature dictionary. The keys are symbol-substrings
-        and the values are the lists of directories to skip if that symbol is missing."""
         return {
-            "MathMLElement": ["mathml"],
-            "GraphicsLayer": ["compositing"],
-            "WebGLShader": ["fast/canvas/webgl", "compositing/webgl", "http/tests/canvas/webgl", "webgl"],
-            "MHTMLArchive": ["mhtml"],
-            "CSSVariableValue": ["fast/css/variables", "inspector/styles/variables"],
+            "ff_mp3_decoder": ["webaudio/codec-tests/mp3"],
+            "ff_aac_decoder": ["webaudio/codec-tests/aac"],
         }
 
     def _has_test_in_directories(self, directory_lists, test_list):
@@ -1479,33 +1387,16 @@
         return False
 
     def _skipped_tests_for_unsupported_features(self, test_list):
-        # Only check the runtime feature list of there are tests in the test_list that might get skipped.
-        # This is a performance optimization to avoid the subprocess call to DRT.
-        # If the port supports runtime feature detection, disable any tests
-        # for features missing from the runtime feature list.
-        # If _runtime_feature_list returns a non-None value, then prefer
-        # runtime feature detection over static feature detection.
-        if self._has_test_in_directories(self._missing_feature_to_skipped_tests().values(), test_list):
-            supported_feature_list = self._runtime_feature_list()
-            if supported_feature_list is not None:
-                return reduce(operator.add, [directories for feature, directories in self._missing_feature_to_skipped_tests().items() if feature not in supported_feature_list])
-
         # Only check the symbols of there are tests in the test_list that might get skipped.
         # This is a performance optimization to avoid the calling nm.
-        # Runtime feature detection not supported, fallback to static dectection:
+        # Runtime feature detection not supported, fallback to static detection:
         # Disable any tests for symbols missing from the executable or libraries.
         if self._has_test_in_directories(self._missing_symbol_to_skipped_tests().values(), test_list):
             symbols_string = self._symbols_string()
             if symbols_string is not None:
                 return reduce(operator.add, [directories for symbol_substring, directories in self._missing_symbol_to_skipped_tests().items() if symbol_substring not in symbols_string], [])
-
         return []
 
-    def _wk2_port_name(self):
-        # By current convention, the WebKit2 name is always mac-wk2, win-wk2, not mac-leopard-wk2, etc,
-        # except for Qt because WebKit2 is only supported by Qt 5.0 (therefore: qt-5.0-wk2).
-        return "%s-wk2" % self.port_name
-
 
 class VirtualTestSuite(object):
     def __init__(self, name, base, args, tests=None):
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/chromium.py b/Tools/Scripts/webkitpy/layout_tests/port/chromium.py
index be8905b..497cf29 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/chromium.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/chromium.py
@@ -294,13 +294,6 @@
         except AssertionError:
             return self._build_path('layout-test-results')
 
-    def _missing_symbol_to_skipped_tests(self):
-        # FIXME: Should WebKitPort have these definitions also?
-        return {
-            "ff_mp3_decoder": ["webaudio/codec-tests/mp3"],
-            "ff_aac_decoder": ["webaudio/codec-tests/aac"],
-        }
-
     def setup_test_run(self):
         super(ChromiumPort, self).setup_test_run()
         # Delete the disk cache if any to ensure a clean test run.
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/chromium_android.py b/Tools/Scripts/webkitpy/layout_tests/port/chromium_android.py
index 046317b..786f2c7 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/chromium_android.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/chromium_android.py
@@ -167,7 +167,7 @@
     def command_line_file(self):
         return '/data/local/tmp/chrome-native-tests-command-line'
     def additional_command_line_flags(self):
-        return ['--create-stdin-fifo', '--separate-stderr-fifo']
+        return ['--create-stdin-fifo', '--separate-stderr-fifo', '--disable-impl-side-painting']
     def device_directory(self):
         return DEVICE_SOURCE_ROOT_DIR + 'drt/'
 
@@ -368,6 +368,13 @@
         # The driver doesn't respond to closing stdin, so we might as well stop the driver immediately.
         return 0.0
 
+    def driver_name(self):
+        if self.get_option('driver_name'):
+            return self.get_option('driver_name')
+        if self.get_option('content_shell'):
+            return self.CONTENT_SHELL_NAME
+        return 'DumpRenderTree'
+
     def default_child_processes(self):
         if self._devices:
             return len(self._devices)
@@ -417,8 +424,6 @@
         super(ChromiumAndroidPort, self).start_http_server(additional_dirs, number_of_servers)
 
     def create_driver(self, worker_number, no_timeout=False):
-        # We don't want the default DriverProxy which is not compatible with our driver.
-        # See comments in ChromiumAndroidDriver.start().
         return ChromiumAndroidDriver(self, worker_number, pixel_tests=self.get_option('pixel_tests'), driver_details=self._driver_details,
                                      # Force no timeout to avoid test driver timeouts before NRWT.
                                      no_timeout=True)
@@ -606,7 +611,6 @@
 class ChromiumAndroidDriver(driver.Driver):
     def __init__(self, port, worker_number, pixel_tests, driver_details, no_timeout=False):
         super(ChromiumAndroidDriver, self).__init__(port, worker_number, pixel_tests, no_timeout)
-        self._cmd_line = None
         self._in_fifo_path = driver_details.device_fifo_directory() + 'stdin.fifo'
         self._out_fifo_path = driver_details.device_fifo_directory() + 'test.fifo'
         self._err_fifo_path = driver_details.device_fifo_directory() + 'stderr.fifo'
@@ -865,12 +869,11 @@
         return super(ChromiumAndroidDriver, self).run_test(driver_input, stop_when_done)
 
     def start(self, pixel_tests, per_test_args):
-        # Only one driver instance is allowed because of the nature of Android activity.
-        # The single driver needs to restart content_shell when the command line changes.
-        cmd_line = self._android_driver_cmd_line(pixel_tests, per_test_args)
-        if cmd_line != self._cmd_line:
+        # We override the default start() so that we can call _android_driver_cmd_line()
+        # instead of cmd_line().
+        new_cmd_line = self._android_driver_cmd_line(pixel_tests, per_test_args)
+        if new_cmd_line != self._current_cmd_line:
             self.stop()
-            self._cmd_line = cmd_line
         super(ChromiumAndroidDriver, self).start(pixel_tests, per_test_args)
 
     def _start(self, pixel_tests, per_test_args):
@@ -893,7 +896,7 @@
         self._forwarder_process.start()
 
         self._android_commands.run(['logcat', '-c'])
-        self._android_commands.run(['shell', 'echo'] + self._cmd_line + ['>', self._driver_details.command_line_file()])
+        self._android_commands.run(['shell', 'echo'] + self._android_driver_cmd_line(pixel_tests, per_test_args) + ['>', self._driver_details.command_line_file()])
         start_result = self._android_commands.run(['shell', 'am', 'start', '-e', 'RunInSubThread', '-n', self._driver_details.activity_name()])
         if start_result.find('Exception') != -1:
             self._log_error('Failed to start the content_shell application. Exception:\n' + start_result)
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/chromium_linux_unittest.py b/Tools/Scripts/webkitpy/layout_tests/port/chromium_linux_unittest.py
index 2af4fd8..01a7ed7 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/chromium_linux_unittest.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/chromium_linux_unittest.py
@@ -104,7 +104,7 @@
         self.assert_build_path(options, ['/mock-checkout/Source/WebKit/chromium/sconsbuild/Release', '/mock-checkout/Source/WebKit/chromium/out/Release'], '/mock-checkout/Source/WebKit/chromium/sconsbuild/Release')
 
     def test_driver_name_option(self):
-        self.assertTrue(self.make_port()._path_to_driver().endswith('DumpRenderTree'))
+        self.assertTrue(self.make_port()._path_to_driver().endswith('content_shell'))
         self.assertTrue(self.make_port(options=MockOptions(driver_name='OtherDriver'))._path_to_driver().endswith('OtherDriver'))
 
     def test_path_to_image_diff(self):
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/chromium_mac_unittest.py b/Tools/Scripts/webkitpy/layout_tests/port/chromium_mac_unittest.py
index 15476f8..da01a4c 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/chromium_mac_unittest.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/chromium_mac_unittest.py
@@ -96,7 +96,7 @@
         self.assertEqual(port._build_path(), '/mock-checkout/xcodebuild/Release')
 
     def test_driver_name_option(self):
-        self.assertTrue(self.make_port()._path_to_driver().endswith('DumpRenderTree'))
+        self.assertTrue(self.make_port()._path_to_driver().endswith('Content Shell'))
         self.assertTrue(self.make_port(options=MockOptions(driver_name='OtherDriver'))._path_to_driver().endswith('OtherDriver'))
 
     def test_path_to_image_diff(self):
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/chromium_win_unittest.py b/Tools/Scripts/webkitpy/layout_tests/port/chromium_win_unittest.py
index 2c51598..03c8181 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/chromium_win_unittest.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/chromium_win_unittest.py
@@ -126,7 +126,7 @@
         self.assertEqual('win', self.make_port().operating_system())
 
     def test_driver_name_option(self):
-        self.assertTrue(self.make_port()._path_to_driver().endswith('DumpRenderTree.exe'))
+        self.assertTrue(self.make_port()._path_to_driver().endswith('content_shell.exe'))
         self.assertTrue(self.make_port(options=MockOptions(driver_name='OtherDriver'))._path_to_driver().endswith('OtherDriver.exe'))
 
     def test_path_to_image_diff(self):
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/driver.py b/Tools/Scripts/webkitpy/layout_tests/port/driver.py
index 5858f55..4fa4488 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/driver.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/driver.py
@@ -140,6 +140,7 @@
         self.error_from_test = str()
         self.err_seen_eof = False
         self._server_process = None
+        self._current_cmd_line = None
 
         self._measurements = {}
         if self._port.get_option("profile"):
@@ -161,6 +162,13 @@
 
         Returns a DriverOutput object.
         """
+        base = self._port.lookup_virtual_test_base(driver_input.test_name)
+        if base:
+            virtual_driver_input = copy.copy(driver_input)
+            virtual_driver_input.test_name = base
+            virtual_driver_input.args = self._port.lookup_virtual_test_args(driver_input.test_name)
+            return self.run_test(virtual_driver_input, stop_when_done)
+
         start_time = time.time()
         self.start(driver_input.should_run_pixel_test, driver_input.args)
         test_begin_time = time.time()
@@ -269,14 +277,8 @@
         return False
 
     def start(self, pixel_tests, per_test_args):
-        # FIXME: Callers shouldn't normally call this, since this routine
-        # may not be specifying the correct combination of pixel test and
-        # per_test args.
-        #
-        # The only reason we have this routine at all is so the perftestrunner
-        # can pause before running a test; it might be better to push that
-        # into run_test() directly.
-        if not self._server_process:
+        new_cmd_line = self.cmd_line(pixel_tests, per_test_args)
+        if not self._server_process or new_cmd_line != self._current_cmd_line:
             self._start(pixel_tests, per_test_args)
             self._run_post_start_tasks()
 
@@ -300,8 +302,10 @@
         environment = self._setup_environ_for_driver(environment)
         self._crashed_process_name = None
         self._crashed_pid = None
-        self._server_process = self._port._server_process_constructor(self._port, server_name, self.cmd_line(pixel_tests, per_test_args), environment)
+        cmd_line = self.cmd_line(pixel_tests, per_test_args)
+        self._server_process = self._port._server_process_constructor(self._port, server_name, cmd_line, environment)
         self._server_process.start()
+        self._current_cmd_line = cmd_line
 
     def _run_post_start_tasks(self):
         # Remote drivers may override this to delay post-start tasks until the server has ack'd.
@@ -323,6 +327,8 @@
             self._port._filesystem.rmtree(str(self._driver_tempdir))
             self._driver_tempdir = None
 
+        self._current_cmd_line = None
+
     def cmd_line(self, pixel_tests, per_test_args):
         cmd = self._command_wrapper(self._port.get_option('wrapper'))
         cmd.append(self._port._path_to_driver())
@@ -497,63 +503,3 @@
             self.decoded_content = base64.b64decode(self.content)
         else:
             self.decoded_content = self.content
-
-class DriverProxy(object):
-    """A wrapper for managing two Driver instances, one with pixel tests and
-    one without. This allows us to handle plain text tests and ref tests with a
-    single driver."""
-
-    def __init__(self, port, worker_number, driver_instance_constructor, pixel_tests, no_timeout):
-        self._port = port
-        self._worker_number = worker_number
-        self._driver_instance_constructor = driver_instance_constructor
-        self._no_timeout = no_timeout
-
-        # FIXME: We shouldn't need to create a driver until we actually run a test.
-        self._driver = self._make_driver(pixel_tests)
-        self._running_drivers = {}
-        self._running_drivers[self._cmd_line_as_key(pixel_tests, [])] = self._driver
-
-    def _make_driver(self, pixel_tests):
-        return self._driver_instance_constructor(self._port, self._worker_number, pixel_tests, self._no_timeout)
-
-    # FIXME: this should be a @classmethod (or implemented on Port instead).
-    def is_http_test(self, test_name):
-        return self._driver.is_http_test(test_name)
-
-    # FIXME: this should be a @classmethod (or implemented on Port instead).
-    def test_to_uri(self, test_name):
-        return self._driver.test_to_uri(test_name)
-
-    # FIXME: this should be a @classmethod (or implemented on Port instead).
-    def uri_to_test(self, uri):
-        return self._driver.uri_to_test(uri)
-
-    def run_test(self, driver_input, stop_when_done):
-        base = self._port.lookup_virtual_test_base(driver_input.test_name)
-        if base:
-            virtual_driver_input = copy.copy(driver_input)
-            virtual_driver_input.test_name = base
-            virtual_driver_input.args = self._port.lookup_virtual_test_args(driver_input.test_name)
-            return self.run_test(virtual_driver_input, stop_when_done)
-
-        pixel_tests_needed = driver_input.should_run_pixel_test
-        cmd_line_key = self._cmd_line_as_key(pixel_tests_needed, driver_input.args)
-        if not cmd_line_key in self._running_drivers:
-            self._running_drivers[cmd_line_key] = self._make_driver(pixel_tests_needed)
-
-        return self._running_drivers[cmd_line_key].run_test(driver_input, stop_when_done)
-
-    def has_crashed(self):
-        return any(driver.has_crashed() for driver in self._running_drivers.values())
-
-    def stop(self):
-        for driver in self._running_drivers.values():
-            driver.stop()
-
-    # FIXME: this should be a @classmethod (or implemented on Port instead).
-    def cmd_line(self, pixel_tests=None, per_test_args=None):
-        return self._driver.cmd_line(pixel_tests, per_test_args or [])
-
-    def _cmd_line_as_key(self, pixel_tests, per_test_args):
-        return ' '.join(self.cmd_line(pixel_tests, per_test_args))
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/driver_unittest.py b/Tools/Scripts/webkitpy/layout_tests/port/driver_unittest.py
index 32173a8..d18c522 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/driver_unittest.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/driver_unittest.py
@@ -168,7 +168,7 @@
         port = TestWebKitPort()
         port._config.build_directory = lambda configuration: '/mock-build'
         driver = Driver(port, 0, pixel_tests=True, no_timeout=True)
-        self.assertEqual(driver.cmd_line(True, []), ['/mock-build/DumpRenderTree', '--no-timeout', '-'])
+        self.assertEqual(driver.cmd_line(True, []), ['/mock-build/content_shell', '--no-timeout', '--dump-render-tree', '-'])
 
     def test_check_for_driver_crash(self):
         port = TestWebKitPort()
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/mock_drt.py b/Tools/Scripts/webkitpy/layout_tests/port/mock_drt.py
index 50a9a20..8ab92cf 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/mock_drt.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/mock_drt.py
@@ -49,7 +49,7 @@
     sys.path.append(script_dir)
 
 from webkitpy.common.system.systemhost import SystemHost
-from webkitpy.layout_tests.port.driver import DriverInput, DriverOutput, DriverProxy
+from webkitpy.layout_tests.port.driver import DriverInput, DriverOutput
 from webkitpy.layout_tests.port.factory import PortFactory
 
 _log = logging.getLogger(__name__)
@@ -74,10 +74,8 @@
     def check_sys_deps(self, needs_http):
         return True
 
-    def create_driver(self, worker_number, no_timeout=False):
-        # The magic of the MockDRTPort is that we create a driver that has a
-        # cmd_line() method monkey-patched to invoke this script instead of DRT.
-        return DriverProxy(self, worker_number, self._mocked_driver_maker, pixel_tests=self.get_option('pixel_tests'), no_timeout=no_timeout)
+    def _driver_class(self):
+        return self._mocked_driver_maker
 
     @staticmethod
     def _mocked_driver_maker(port, worker_number, pixel_tests, no_timeout=False):
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/port_testcase.py b/Tools/Scripts/webkitpy/layout_tests/port/port_testcase.py
index 2573608..52d3e64 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/port_testcase.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/port_testcase.py
@@ -64,9 +64,6 @@
     def _symbols_string(self):
         return self.symbols_string
 
-    def _tests_for_other_platforms(self):
-        return ["media", ]
-
     def _tests_for_disabled_features(self):
         return ["accessibility", ]
 
@@ -438,55 +435,28 @@
 
     def test_skipped_directories_for_symbols(self):
         # This first test confirms that the commonly found symbols result in the expected skipped directories.
-        symbols_string = " ".join(["GraphicsLayer", "WebCoreHas3DRendering", "isXHTMLMPDocument", "fooSymbol"])
+        symbols_string = " ".join(["fooSymbol"])
         expected_directories = set([
-            "mathml",  # Requires MathMLElement
-            "fast/canvas/webgl",  # Requires WebGLShader
-            "compositing/webgl",  # Requires WebGLShader
-            "http/tests/canvas/webgl",  # Requires WebGLShader
-            "webgl",  # Requires WebGLShader
-            "mhtml",  # Requires MHTMLArchive
-            "fast/css/variables",  # Requires CSS Variables
-            "inspector/styles/variables",  # Requires CSS Variables
+            "webaudio/codec-tests/mp3",
+            "webaudio/codec-tests/aac",
         ])
 
-        result_directories = set(TestWebKitPort(symbols_string=symbols_string)._skipped_tests_for_unsupported_features(test_list=['mathml/foo.html']))
+        result_directories = set(TestWebKitPort(symbols_string=symbols_string)._skipped_tests_for_unsupported_features(test_list=['webaudio/codec-tests/mp3/foo.html']))
         self.assertEqual(result_directories, expected_directories)
 
         # Test that the nm string parsing actually works:
         symbols_string = """
-000000000124f498 s __ZZN7WebCore13GraphicsLayer12replaceChildEPS0_S1_E19__PRETTY_FUNCTION__
-000000000124f500 s __ZZN7WebCore13GraphicsLayer13addChildAboveEPS0_S1_E19__PRETTY_FUNCTION__
-000000000124f670 s __ZZN7WebCore13GraphicsLayer13addChildBelowEPS0_S1_E19__PRETTY_FUNCTION__
+000000000124f498 s __ZZN7WebCore13ff_mp3_decoder12replaceChildEPS0_S1_E19__PRETTY_FUNCTION__
+000000000124f500 s __ZZN7WebCore13ff_mp3_decoder13addChildAboveEPS0_S1_E19__PRETTY_FUNCTION__
+000000000124f670 s __ZZN7WebCore13ff_mp3_decoder13addChildBelowEPS0_S1_E19__PRETTY_FUNCTION__
 """
         # Note 'compositing' is not in the list of skipped directories (hence the parsing of GraphicsLayer worked):
-        expected_directories = set(['mathml', 'compositing/webgl', 'fast/canvas/webgl', 'webgl', 'mhtml', 'http/tests/canvas/webgl', 'fast/css/variables', 'inspector/styles/variables'])
-        result_directories = set(TestWebKitPort(symbols_string=symbols_string)._skipped_tests_for_unsupported_features(test_list=['mathml/foo.html']))
+        expected_directories = set([
+            "webaudio/codec-tests/aac",
+        ])
+        result_directories = set(TestWebKitPort(symbols_string=symbols_string)._skipped_tests_for_unsupported_features(test_list=['webaudio/codec-tests/mp3/foo.html']))
         self.assertEqual(result_directories, expected_directories)
 
-    def test_skipped_directories_for_features(self):
-        supported_features = ["Accelerated Compositing", "Foo Feature"]
-        expected_directories = set(["animations/3d", "transforms/3d"])
-        port = TestWebKitPort(supported_features=supported_features)
-        port._runtime_feature_list = lambda: supported_features
-        result_directories = set(port._skipped_tests_for_unsupported_features(test_list=["animations/3d/foo.html"]))
-        self.assertEqual(result_directories, expected_directories)
-
-    def test_skipped_directories_for_features_no_matching_tests_in_test_list(self):
-        supported_features = ["Accelerated Compositing", "Foo Feature"]
-        expected_directories = set([])
-        result_directories = set(TestWebKitPort(supported_features=supported_features)._skipped_tests_for_unsupported_features(test_list=['foo.html']))
-        self.assertEqual(result_directories, expected_directories)
-
-    def test_skipped_tests_for_unsupported_features_empty_test_list(self):
-        supported_features = ["Accelerated Compositing", "Foo Feature"]
-        expected_directories = set([])
-        result_directories = set(TestWebKitPort(supported_features=supported_features)._skipped_tests_for_unsupported_features(test_list=None))
-        self.assertEqual(result_directories, expected_directories)
-
-    def test_skipped_layout_tests(self):
-        self.assertEqual(TestWebKitPort().skipped_layout_tests(test_list=[]), set(['media']))
-
     def test_expectations_files(self):
         port = TestWebKitPort()
 
@@ -505,7 +475,7 @@
     def test_root_option(self):
         port = TestWebKitPort()
         port._options = MockOptions(root='/foo')
-        self.assertEqual(port._path_to_driver(), "/foo/DumpRenderTree")
+        self.assertEqual(port._path_to_driver(), "/foo/content_shell")
 
     def test_test_expectations(self):
         # Check that we read the expectations file
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/server_process.py b/Tools/Scripts/webkitpy/layout_tests/port/server_process.py
index 7ce1e06..7983321 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/server_process.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/server_process.py
@@ -330,10 +330,6 @@
         if not self._proc:
             return (None, None)
 
-        # Only bother to check for leaks or stderr if the process is still running.
-        if self.poll() is None:
-            self._port.check_for_leaks(self.name(), self.pid())
-
         now = time.time()
         if self._proc.stdin:
             self._proc.stdin.close()
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/server_process_unittest.py b/Tools/Scripts/webkitpy/layout_tests/port/server_process_unittest.py
index 97376aa..665fbfe 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/server_process_unittest.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/server_process_unittest.py
@@ -47,9 +47,6 @@
     def results_directory(self):
         return "/mock-results"
 
-    def check_for_leaks(self, process_name, process_pid):
-        pass
-
     def process_kill_time(self):
         return 1
 
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/test.py b/Tools/Scripts/webkitpy/layout_tests/port/test.py
index dad7396..0c9e946 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/test.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/test.py
@@ -27,6 +27,7 @@
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 import base64
+import copy
 import sys
 import time
 
@@ -100,8 +101,8 @@
 #
 # These numbers may need to be updated whenever we add or delete tests.
 #
-TOTAL_TESTS = 104
-TOTAL_SKIPS = 28
+TOTAL_TESTS = 102
+TOTAL_SKIPS = 25
 TOTAL_RETRIES = 14
 
 UNEXPECTED_PASSES = 6
@@ -231,14 +232,10 @@
 
     tests.add('websocket/tests/passes/text.html')
 
-    # For testing test are properly included from platform directories.
+    # For testing that we don't run tests under platform/. Note that these don't contribute to TOTAL_TESTS.
     tests.add('platform/test-mac-leopard/http/test.html')
     tests.add('platform/test-win-win7/http/test.html')
 
-    # For --no-http tests, test that platform specific HTTP tests are properly skipped.
-    tests.add('platform/test-snow-leopard/http/test.html')
-    tests.add('platform/test-snow-leopard/websocket/test.html')
-
     # For testing if perf tests are running in a locked shard.
     tests.add('perf/foo/test.html')
     tests.add('perf/foo/test-ref.html')
@@ -555,22 +552,29 @@
         pixel_tests_flag = '-p' if pixel_tests else ''
         return [self._port._path_to_driver()] + [pixel_tests_flag] + self._port.get_option('additional_drt_flag', []) + per_test_args
 
-    def run_test(self, test_input, stop_when_done):
+    def run_test(self, driver_input, stop_when_done):
+        base = self._port.lookup_virtual_test_base(driver_input.test_name)
+        if base:
+            virtual_driver_input = copy.copy(driver_input)
+            virtual_driver_input.test_name = base
+            virtual_driver_input.args = self._port.lookup_virtual_test_args(driver_input.test_name)
+            return self.run_test(virtual_driver_input, stop_when_done)
+
         if not self.started:
             self.started = True
             self.pid = TestDriver.next_pid
             TestDriver.next_pid += 1
 
         start_time = time.time()
-        test_name = test_input.test_name
-        test_args = test_input.args or []
+        test_name = driver_input.test_name
+        test_args = driver_input.args or []
         test = self._port._tests[test_name]
         if test.keyboard:
             raise KeyboardInterrupt
         if test.exception:
             raise ValueError('exception from ' + test_name)
         if test.hang:
-            time.sleep((float(test_input.timeout) * 4) / 1000.0 + 1.0)  # The 1.0 comes from thread_padding_sec in layout_test_runnery.
+            time.sleep((float(driver_input.timeout) * 4) / 1000.0 + 1.0)  # The 1.0 comes from thread_padding_sec in layout_test_runnery.
 
         audio = None
         actual_text = test.actual_text
@@ -601,7 +605,7 @@
         if stop_when_done:
             self.stop()
 
-        if test.actual_checksum == test_input.image_hash:
+        if test.actual_checksum == driver_input.image_hash:
             image = None
         else:
             image = test.actual_image
diff --git a/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests.py b/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests.py
index 17b47cd..86fc2f6 100644
--- a/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests.py
+++ b/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests.py
@@ -173,6 +173,8 @@
             help="Alternative DumpRenderTree binary to use"),
         optparse.make_option("--content-shell", action="store_true",
             help="Use Content Shell instead of DumpRenderTree"),
+        optparse.make_option("--dump-render-tree", action="store_true",
+            help="Use DumpRenderTree instead of Content Shell"),
         optparse.make_option("--additional-platform-directory", action="append",
             default=[], help="Additional directory where to look for test "
                  "baselines (will take precendence over platform baselines). "
@@ -217,9 +219,8 @@
                  "running. (Example: --wrapper='valgrind --smc-check=all')"),
         optparse.make_option("-i", "--ignore-tests", action="append", default=[],
             help="directories or test to ignore (may specify multiple times)"),
-        optparse.make_option("--ignore-flaky-tests", action="store", default="default",
+        optparse.make_option("--ignore-flaky-tests", action="store",
             help=("Control whether tests that are flaky on the bots get ignored."
-                "'default' == Don't use the bot data."
                 "'very-flaky' == Ignore any tests that flaked more than once on the bot."
                 "'maybe-flaky' == Ignore any tests that flaked once on the bot.")),
         optparse.make_option("--test-list", action="append",
diff --git a/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests_integrationtest.py b/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests_integrationtest.py
index 7a82bb3..98bc911 100644
--- a/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests_integrationtest.py
+++ b/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests_integrationtest.py
@@ -206,15 +206,18 @@
         self.assertTrue(one_line_summary in logging_stream.buflist)
 
         # Ensure the results were summarized properly.
-        self.assertEqual(details.summarized_results['num_regressions'], details.exit_code)
+        self.assertEqual(details.summarized_failing_results['num_regressions'], details.exit_code)
 
         # Ensure the image diff percentage is in the results.
-        self.assertEqual(details.summarized_results['tests']['failures']['expected']['image.html']['image_diff_percent'], 1)
+        self.assertEqual(details.summarized_failing_results['tests']['failures']['expected']['image.html']['image_diff_percent'], 1)
 
         # Ensure the results were written out and displayed.
+        failing_results_text = host.filesystem.read_text_file('/tmp/layout-test-results/failing_results.json')
+        json_to_eval = failing_results_text.replace("ADD_RESULTS(", "").replace(");", "")
+        self.assertEqual(json.loads(json_to_eval), details.summarized_failing_results)
+
         full_results_text = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
-        json_to_eval = full_results_text.replace("ADD_RESULTS(", "").replace(");", "")
-        self.assertEqual(json.loads(json_to_eval), details.summarized_results)
+        self.assertEqual(json.loads(full_results_text), details.summarized_full_results)
 
         self.assertEqual(host.user.opened_urls, [path.abspath_to_uri(MockHost().platform, '/tmp/layout-test-results/results.html')])
 
@@ -814,9 +817,14 @@
         self.assertTrue(RunTest.has_test_of_type(batch_tests_run_http, 'http'))
         self.assertTrue(RunTest.has_test_of_type(batch_tests_run_http, 'websocket'))
 
-    def test_platform_tests_are_found(self):
+    def test_platform_directories_ignored_when_searching_for_tests(self):
+        tests_run = get_tests_run(['--platform', 'test-mac-leopard'])
+        self.assertFalse('platform/test-mac-leopard/http/test.html' in tests_run)
+        self.assertFalse('platform/test-win-win7/http/test.html' in tests_run)
+
+    def test_platform_directories_not_searched_for_additional_tests(self):
         tests_run = get_tests_run(['--platform', 'test-mac-leopard', 'http'])
-        self.assertTrue('platform/test-mac-leopard/http/test.html' in tests_run)
+        self.assertFalse('platform/test-mac-leopard/http/test.html' in tests_run)
         self.assertFalse('platform/test-win-win7/http/test.html' in tests_run)
 
     def test_output_diffs(self):
@@ -880,7 +888,7 @@
         _, _, _ = logging_run(['--no-show-results', 'reftests/foo/'], tests_included=True, host=host)
         file_list = host.filesystem.written_files.keys()
 
-        json_string = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
+        json_string = host.filesystem.read_text_file('/tmp/layout-test-results/failing_results.json')
         json = parse_full_results(json_string)
         self.assertTrue("multiple-match-success.html" not in json["tests"]["reftests"]["foo"])
         self.assertTrue("multiple-mismatch-success.html" not in json["tests"]["reftests"]["foo"])
diff --git a/Tools/Scripts/webkitpy/layout_tests/views/buildbot_results.py b/Tools/Scripts/webkitpy/layout_tests/views/buildbot_results.py
index e7dabab..2cb5d6f 100644
--- a/Tools/Scripts/webkitpy/layout_tests/views/buildbot_results.py
+++ b/Tools/Scripts/webkitpy/layout_tests/views/buildbot_results.py
@@ -49,7 +49,7 @@
     def print_results(self, run_details):
         if self.debug_logging:
             self.print_run_results(run_details.initial_results)
-        self.print_unexpected_results(run_details.summarized_results, run_details.enabled_pixel_tests_in_retry)
+        self.print_unexpected_results(run_details.summarized_failing_results, run_details.enabled_pixel_tests_in_retry)
 
     def _print(self, msg):
         self.stream.write(msg + '\n')
diff --git a/Tools/Scripts/webkitpy/layout_tests/views/buildbot_results_unittest.py b/Tools/Scripts/webkitpy/layout_tests/views/buildbot_results_unittest.py
index 8951a63..5b7a7a6 100644
--- a/Tools/Scripts/webkitpy/layout_tests/views/buildbot_results_unittest.py
+++ b/Tools/Scripts/webkitpy/layout_tests/views/buildbot_results_unittest.py
@@ -95,7 +95,8 @@
         port = MockHost().port_factory.get('test')
         printer, out = self.get_printer()
         initial_results = test_run_results_unittest.run_results(port)
-        summary = test_run_results_unittest.summarized_results(port, expected=False, passing=True, flaky=False)
-        details = test_run_results.RunDetails(summary['num_regressions'], summary, initial_results, None)
+        full_summary = test_run_results_unittest.summarized_results(port, expected=False, passing=True, flaky=False)
+        failing_summary = test_run_results_unittest.summarized_results(port, expected=False, passing=True, flaky=False, only_include_failing=True)
+        details = test_run_results.RunDetails(failing_summary['num_regressions'], full_summary, failing_summary, initial_results, None)
         printer.print_results(details)
         self.assertNotEmpty(out)
diff --git a/Tools/Scripts/webkitpy/tool/bot/commitqueuetask.py b/Tools/Scripts/webkitpy/tool/bot/commitqueuetask.py
deleted file mode 100644
index 5b5a5af..0000000
--- a/Tools/Scripts/webkitpy/tool/bot/commitqueuetask.py
+++ /dev/null
@@ -1,94 +0,0 @@
-# Copyright (c) 2010 Google Inc. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-#     * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-#     * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-#     * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-from webkitpy.tool.bot.patchanalysistask import PatchAnalysisTask, PatchAnalysisTaskDelegate
-
-
-class CommitQueueTaskDelegate(PatchAnalysisTaskDelegate):
-    def parent_command(self):
-        return "commit-queue"
-
-    def did_pass_testing_ews(self, patch):
-        raise NotImplementedError("subclasses must implement")
-
-
-class CommitQueueTask(PatchAnalysisTask):
-    def validate(self):
-        # Bugs might get closed, or patches might be obsoleted or r-'d while the
-        # commit-queue is processing.
-        self._patch = self._delegate.refetch_patch(self._patch)
-        if self._patch.is_obsolete():
-            return False
-        if self._patch.bug().is_closed():
-            return False
-        if not self._patch.committer():
-            return False
-        if self._patch.review() == "-":
-            return False
-        return True
-
-    def _validate_changelog(self):
-        return self._run_command([
-            "validate-changelog",
-            "--non-interactive",
-            self._patch.id(),
-        ],
-        "ChangeLog validated",
-        "ChangeLog did not pass validation")
-
-    def _did_pass_tests_recently(self):
-        if self._delegate.did_pass_testing_ews(self._patch):
-            return True
-        return self._test_patch()
-
-    def run(self):
-        if not self.validate():
-            return False
-        if not self._clean():
-            return False
-        if not self._update():
-            return False
-        if not self._apply():
-            return self.report_failure()
-        if not self._validate_changelog():
-            return self.report_failure()
-        if not self._patch.is_rollout():
-            if not self._build():
-                if not self._build_without_patch():
-                    return False
-                return self.report_failure()
-            if not self._did_pass_tests_recently():
-                return False
-        # Make sure the patch is still valid before landing (e.g., make sure
-        # no one has set commit-queue- since we started working on the patch.)
-        if not self.validate():
-            return False
-        # FIXME: We should understand why the land failure occurred and retry if possible.
-        if not self._land():
-            return self.report_failure()
-        return True
diff --git a/Tools/Scripts/webkitpy/tool/bot/commitqueuetask_unittest.py b/Tools/Scripts/webkitpy/tool/bot/commitqueuetask_unittest.py
deleted file mode 100644
index ba9254b..0000000
--- a/Tools/Scripts/webkitpy/tool/bot/commitqueuetask_unittest.py
+++ /dev/null
@@ -1,582 +0,0 @@
-# Copyright (c) 2010 Google Inc. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-#     * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-#     * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-#     * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-from datetime import datetime
-import logging
-import unittest2 as unittest
-
-from webkitpy.common.net import bugzilla
-from webkitpy.common.net.layouttestresults import LayoutTestResults
-from webkitpy.common.system.executive import ScriptError
-from webkitpy.common.system.outputcapture import OutputCapture
-from webkitpy.layout_tests.models import test_results
-from webkitpy.layout_tests.models import test_failures
-from webkitpy.thirdparty.mock import Mock
-from webkitpy.tool.bot.commitqueuetask import *
-from webkitpy.tool.bot.expectedfailures import ExpectedFailures
-from webkitpy.tool.mocktool import MockTool
-
-_log = logging.getLogger(__name__)
-
-
-class MockCommitQueue(CommitQueueTaskDelegate):
-    def __init__(self, error_plan):
-        self._error_plan = error_plan
-        self._failure_status_id = 0
-
-    def run_command(self, command):
-        _log.info("run_webkit_patch: %s" % command)
-        if self._error_plan:
-            error = self._error_plan.pop(0)
-            if error:
-                raise error
-
-    def command_passed(self, success_message, patch):
-        _log.info("command_passed: success_message='%s' patch='%s'" % (
-            success_message, patch.id()))
-
-    def command_failed(self, failure_message, script_error, patch):
-        _log.info("command_failed: failure_message='%s' script_error='%s' patch='%s'" % (
-            failure_message, script_error, patch.id()))
-        self._failure_status_id += 1
-        return self._failure_status_id
-
-    def refetch_patch(self, patch):
-        return patch
-
-    def expected_failures(self):
-        return ExpectedFailures()
-
-    def test_results(self):
-        return None
-
-    def report_flaky_tests(self, patch, flaky_results, results_archive):
-        flaky_tests = [result.filename for result in flaky_results]
-        _log.info("report_flaky_tests: patch='%s' flaky_tests='%s' archive='%s'" % (patch.id(), flaky_tests, results_archive.filename))
-
-    def archive_last_test_results(self, patch):
-        _log.info("archive_last_test_results: patch='%s'" % patch.id())
-        archive = Mock()
-        archive.filename = "mock-archive-%s.zip" % patch.id()
-        return archive
-
-    def build_style(self):
-        return "both"
-
-    def did_pass_testing_ews(self, patch):
-        return False
-
-
-class FailingTestCommitQueue(MockCommitQueue):
-    def __init__(self, error_plan, test_failure_plan):
-        MockCommitQueue.__init__(self, error_plan)
-        self._test_run_counter = -1  # Special value to indicate tests have never been run.
-        self._test_failure_plan = test_failure_plan
-
-    def run_command(self, command):
-        if command[0] == "build-and-test":
-            self._test_run_counter += 1
-        MockCommitQueue.run_command(self, command)
-
-    def _mock_test_result(self, testname):
-        return test_results.TestResult(testname, [test_failures.FailureTextMismatch()])
-
-    def test_results(self):
-        # Doesn't make sense to ask for the test_results until the tests have run at least once.
-        assert(self._test_run_counter >= 0)
-        failures_for_run = self._test_failure_plan[self._test_run_counter]
-        results = LayoutTestResults(map(self._mock_test_result, failures_for_run))
-        # This makes the results trustable by ExpectedFailures.
-        results.set_failure_limit_count(10)
-        return results
-
-
-# We use GoldenScriptError to make sure that the code under test throws the
-# correct (i.e., golden) exception.
-class GoldenScriptError(ScriptError):
-    pass
-
-
-class CommitQueueTaskTest(unittest.TestCase):
-    def _run_through_task(self, commit_queue, expected_logs, expected_exception=None, expect_retry=False):
-        tool = MockTool(log_executive=True)
-        patch = tool.bugs.fetch_attachment(10000)
-        task = CommitQueueTask(commit_queue, patch)
-        success = OutputCapture().assert_outputs(self, task.run, expected_logs=expected_logs, expected_exception=expected_exception)
-        if not expected_exception:
-            self.assertEqual(success, not expect_retry)
-        return task
-
-    def test_success_case(self):
-        commit_queue = MockCommitQueue([])
-        expected_logs = """run_webkit_patch: ['clean']
-command_passed: success_message='Cleaned working directory' patch='10000'
-run_webkit_patch: ['update']
-command_passed: success_message='Updated working directory' patch='10000'
-run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 10000]
-command_passed: success_message='Applied patch' patch='10000'
-run_webkit_patch: ['validate-changelog', '--non-interactive', 10000]
-command_passed: success_message='ChangeLog validated' patch='10000'
-run_webkit_patch: ['build', '--no-clean', '--no-update', '--build-style=both']
-command_passed: success_message='Built patch' patch='10000'
-run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive']
-command_passed: success_message='Passed tests' patch='10000'
-run_webkit_patch: ['land-attachment', '--force-clean', '--non-interactive', '--parent-command=commit-queue', 10000]
-command_passed: success_message='Landed patch' patch='10000'
-"""
-        self._run_through_task(commit_queue, expected_logs)
-
-    def test_fast_success_case(self):
-        commit_queue = MockCommitQueue([])
-        commit_queue.did_pass_testing_ews = lambda patch: True
-        expected_logs = """run_webkit_patch: ['clean']
-command_passed: success_message='Cleaned working directory' patch='10000'
-run_webkit_patch: ['update']
-command_passed: success_message='Updated working directory' patch='10000'
-run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 10000]
-command_passed: success_message='Applied patch' patch='10000'
-run_webkit_patch: ['validate-changelog', '--non-interactive', 10000]
-command_passed: success_message='ChangeLog validated' patch='10000'
-run_webkit_patch: ['build', '--no-clean', '--no-update', '--build-style=both']
-command_passed: success_message='Built patch' patch='10000'
-run_webkit_patch: ['land-attachment', '--force-clean', '--non-interactive', '--parent-command=commit-queue', 10000]
-command_passed: success_message='Landed patch' patch='10000'
-"""
-        self._run_through_task(commit_queue, expected_logs)
-
-    def test_clean_failure(self):
-        commit_queue = MockCommitQueue([
-            ScriptError("MOCK clean failure"),
-        ])
-        expected_logs = """run_webkit_patch: ['clean']
-command_failed: failure_message='Unable to clean working directory' script_error='MOCK clean failure' patch='10000'
-"""
-        self._run_through_task(commit_queue, expected_logs, expect_retry=True)
-
-    def test_update_failure(self):
-        commit_queue = MockCommitQueue([
-            None,
-            ScriptError("MOCK update failure"),
-        ])
-        expected_logs = """run_webkit_patch: ['clean']
-command_passed: success_message='Cleaned working directory' patch='10000'
-run_webkit_patch: ['update']
-command_failed: failure_message='Unable to update working directory' script_error='MOCK update failure' patch='10000'
-"""
-        self._run_through_task(commit_queue, expected_logs, expect_retry=True)
-
-    def test_apply_failure(self):
-        commit_queue = MockCommitQueue([
-            None,
-            None,
-            GoldenScriptError("MOCK apply failure"),
-        ])
-        expected_logs = """run_webkit_patch: ['clean']
-command_passed: success_message='Cleaned working directory' patch='10000'
-run_webkit_patch: ['update']
-command_passed: success_message='Updated working directory' patch='10000'
-run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 10000]
-command_failed: failure_message='Patch does not apply' script_error='MOCK apply failure' patch='10000'
-"""
-        self._run_through_task(commit_queue, expected_logs, GoldenScriptError)
-
-    def test_validate_changelog_failure(self):
-        commit_queue = MockCommitQueue([
-            None,
-            None,
-            None,
-            GoldenScriptError("MOCK validate failure"),
-        ])
-        expected_logs = """run_webkit_patch: ['clean']
-command_passed: success_message='Cleaned working directory' patch='10000'
-run_webkit_patch: ['update']
-command_passed: success_message='Updated working directory' patch='10000'
-run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 10000]
-command_passed: success_message='Applied patch' patch='10000'
-run_webkit_patch: ['validate-changelog', '--non-interactive', 10000]
-command_failed: failure_message='ChangeLog did not pass validation' script_error='MOCK validate failure' patch='10000'
-"""
-        self._run_through_task(commit_queue, expected_logs, GoldenScriptError)
-
-    def test_build_failure(self):
-        commit_queue = MockCommitQueue([
-            None,
-            None,
-            None,
-            None,
-            GoldenScriptError("MOCK build failure"),
-        ])
-        expected_logs = """run_webkit_patch: ['clean']
-command_passed: success_message='Cleaned working directory' patch='10000'
-run_webkit_patch: ['update']
-command_passed: success_message='Updated working directory' patch='10000'
-run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 10000]
-command_passed: success_message='Applied patch' patch='10000'
-run_webkit_patch: ['validate-changelog', '--non-interactive', 10000]
-command_passed: success_message='ChangeLog validated' patch='10000'
-run_webkit_patch: ['build', '--no-clean', '--no-update', '--build-style=both']
-command_failed: failure_message='Patch does not build' script_error='MOCK build failure' patch='10000'
-run_webkit_patch: ['build', '--force-clean', '--no-update', '--build-style=both']
-command_passed: success_message='Able to build without patch' patch='10000'
-"""
-        self._run_through_task(commit_queue, expected_logs, GoldenScriptError)
-
-    def test_red_build_failure(self):
-        commit_queue = MockCommitQueue([
-            None,
-            None,
-            None,
-            None,
-            ScriptError("MOCK build failure"),
-            ScriptError("MOCK clean build failure"),
-        ])
-        expected_logs = """run_webkit_patch: ['clean']
-command_passed: success_message='Cleaned working directory' patch='10000'
-run_webkit_patch: ['update']
-command_passed: success_message='Updated working directory' patch='10000'
-run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 10000]
-command_passed: success_message='Applied patch' patch='10000'
-run_webkit_patch: ['validate-changelog', '--non-interactive', 10000]
-command_passed: success_message='ChangeLog validated' patch='10000'
-run_webkit_patch: ['build', '--no-clean', '--no-update', '--build-style=both']
-command_failed: failure_message='Patch does not build' script_error='MOCK build failure' patch='10000'
-run_webkit_patch: ['build', '--force-clean', '--no-update', '--build-style=both']
-command_failed: failure_message='Unable to build without patch' script_error='MOCK clean build failure' patch='10000'
-"""
-        self._run_through_task(commit_queue, expected_logs, expect_retry=True)
-
-    def test_flaky_test_failure(self):
-        commit_queue = MockCommitQueue([
-            None,
-            None,
-            None,
-            None,
-            None,
-            ScriptError("MOCK tests failure"),
-        ])
-        # CommitQueueTask will only report flaky tests if we successfully parsed
-        # results.json and returned a LayoutTestResults object, so we fake one.
-        commit_queue.test_results = lambda: LayoutTestResults([])
-        expected_logs = """run_webkit_patch: ['clean']
-command_passed: success_message='Cleaned working directory' patch='10000'
-run_webkit_patch: ['update']
-command_passed: success_message='Updated working directory' patch='10000'
-run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 10000]
-command_passed: success_message='Applied patch' patch='10000'
-run_webkit_patch: ['validate-changelog', '--non-interactive', 10000]
-command_passed: success_message='ChangeLog validated' patch='10000'
-run_webkit_patch: ['build', '--no-clean', '--no-update', '--build-style=both']
-command_passed: success_message='Built patch' patch='10000'
-run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive']
-command_failed: failure_message='Patch does not pass tests' script_error='MOCK tests failure' patch='10000'
-archive_last_test_results: patch='10000'
-run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive']
-command_passed: success_message='Passed tests' patch='10000'
-report_flaky_tests: patch='10000' flaky_tests='[]' archive='mock-archive-10000.zip'
-run_webkit_patch: ['land-attachment', '--force-clean', '--non-interactive', '--parent-command=commit-queue', 10000]
-command_passed: success_message='Landed patch' patch='10000'
-"""
-        self._run_through_task(commit_queue, expected_logs)
-
-    def test_failed_archive(self):
-        commit_queue = MockCommitQueue([
-            None,
-            None,
-            None,
-            None,
-            None,
-            ScriptError("MOCK tests failure"),
-        ])
-        commit_queue.test_results = lambda: LayoutTestResults([])
-        # It's possible delegate to fail to archive layout tests, don't try to report
-        # flaky tests when that happens.
-        commit_queue.archive_last_test_results = lambda patch: None
-        expected_logs = """run_webkit_patch: ['clean']
-command_passed: success_message='Cleaned working directory' patch='10000'
-run_webkit_patch: ['update']
-command_passed: success_message='Updated working directory' patch='10000'
-run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 10000]
-command_passed: success_message='Applied patch' patch='10000'
-run_webkit_patch: ['validate-changelog', '--non-interactive', 10000]
-command_passed: success_message='ChangeLog validated' patch='10000'
-run_webkit_patch: ['build', '--no-clean', '--no-update', '--build-style=both']
-command_passed: success_message='Built patch' patch='10000'
-run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive']
-command_failed: failure_message='Patch does not pass tests' script_error='MOCK tests failure' patch='10000'
-run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive']
-command_passed: success_message='Passed tests' patch='10000'
-run_webkit_patch: ['land-attachment', '--force-clean', '--non-interactive', '--parent-command=commit-queue', 10000]
-command_passed: success_message='Landed patch' patch='10000'
-"""
-        self._run_through_task(commit_queue, expected_logs)
-
-    def test_double_flaky_test_failure(self):
-        commit_queue = FailingTestCommitQueue([
-            None,
-            None,
-            None,
-            None,
-            None,
-            ScriptError("MOCK test failure"),
-            ScriptError("MOCK test failure again"),
-        ], [
-            "foo.html",
-            "bar.html",
-            "foo.html",
-        ])
-        # The (subtle) point of this test is that report_flaky_tests does not appear
-        # in the expected_logs for this run.
-        # Note also that there is no attempt to run the tests w/o the patch.
-        expected_logs = """run_webkit_patch: ['clean']
-command_passed: success_message='Cleaned working directory' patch='10000'
-run_webkit_patch: ['update']
-command_passed: success_message='Updated working directory' patch='10000'
-run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 10000]
-command_passed: success_message='Applied patch' patch='10000'
-run_webkit_patch: ['validate-changelog', '--non-interactive', 10000]
-command_passed: success_message='ChangeLog validated' patch='10000'
-run_webkit_patch: ['build', '--no-clean', '--no-update', '--build-style=both']
-command_passed: success_message='Built patch' patch='10000'
-run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive']
-command_failed: failure_message='Patch does not pass tests' script_error='MOCK test failure' patch='10000'
-archive_last_test_results: patch='10000'
-run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive']
-command_failed: failure_message='Patch does not pass tests' script_error='MOCK test failure again' patch='10000'
-"""
-        tool = MockTool(log_executive=True)
-        patch = tool.bugs.fetch_attachment(10000)
-        task = CommitQueueTask(commit_queue, patch)
-        success = OutputCapture().assert_outputs(self, task.run, expected_logs=expected_logs)
-        self.assertFalse(success)
-
-    def test_test_failure(self):
-        commit_queue = MockCommitQueue([
-            None,
-            None,
-            None,
-            None,
-            None,
-            GoldenScriptError("MOCK test failure"),
-            ScriptError("MOCK test failure again"),
-        ])
-        expected_logs = """run_webkit_patch: ['clean']
-command_passed: success_message='Cleaned working directory' patch='10000'
-run_webkit_patch: ['update']
-command_passed: success_message='Updated working directory' patch='10000'
-run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 10000]
-command_passed: success_message='Applied patch' patch='10000'
-run_webkit_patch: ['validate-changelog', '--non-interactive', 10000]
-command_passed: success_message='ChangeLog validated' patch='10000'
-run_webkit_patch: ['build', '--no-clean', '--no-update', '--build-style=both']
-command_passed: success_message='Built patch' patch='10000'
-run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive']
-command_failed: failure_message='Patch does not pass tests' script_error='MOCK test failure' patch='10000'
-archive_last_test_results: patch='10000'
-run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive']
-command_failed: failure_message='Patch does not pass tests' script_error='MOCK test failure again' patch='10000'
-archive_last_test_results: patch='10000'
-run_webkit_patch: ['build-and-test', '--force-clean', '--no-update', '--build', '--test', '--non-interactive']
-command_passed: success_message='Able to pass tests without patch' patch='10000'
-"""
-        self._run_through_task(commit_queue, expected_logs, GoldenScriptError)
-
-    def test_red_test_failure(self):
-        commit_queue = FailingTestCommitQueue([
-            None,
-            None,
-            None,
-            None,
-            None,
-            ScriptError("MOCK test failure"),
-            ScriptError("MOCK test failure again"),
-            ScriptError("MOCK clean test failure"),
-        ], [
-            "foo.html",
-            "foo.html",
-            "foo.html",
-        ])
-
-        # Tests always fail, and always return the same results, but we
-        # should still be able to land in this case!
-        expected_logs = """run_webkit_patch: ['clean']
-command_passed: success_message='Cleaned working directory' patch='10000'
-run_webkit_patch: ['update']
-command_passed: success_message='Updated working directory' patch='10000'
-run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 10000]
-command_passed: success_message='Applied patch' patch='10000'
-run_webkit_patch: ['validate-changelog', '--non-interactive', 10000]
-command_passed: success_message='ChangeLog validated' patch='10000'
-run_webkit_patch: ['build', '--no-clean', '--no-update', '--build-style=both']
-command_passed: success_message='Built patch' patch='10000'
-run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive']
-command_failed: failure_message='Patch does not pass tests' script_error='MOCK test failure' patch='10000'
-archive_last_test_results: patch='10000'
-run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive']
-command_failed: failure_message='Patch does not pass tests' script_error='MOCK test failure again' patch='10000'
-archive_last_test_results: patch='10000'
-run_webkit_patch: ['build-and-test', '--force-clean', '--no-update', '--build', '--test', '--non-interactive']
-command_failed: failure_message='Unable to pass tests without patch (tree is red?)' script_error='MOCK clean test failure' patch='10000'
-run_webkit_patch: ['land-attachment', '--force-clean', '--non-interactive', '--parent-command=commit-queue', 10000]
-command_passed: success_message='Landed patch' patch='10000'
-"""
-        self._run_through_task(commit_queue, expected_logs)
-
-    def test_very_red_tree_retry(self):
-        lots_of_failing_tests = map(lambda num: "test-%s.html" % num, range(0, 100))
-        commit_queue = FailingTestCommitQueue([
-            None,
-            None,
-            None,
-            None,
-            None,
-            ScriptError("MOCK test failure"),
-            ScriptError("MOCK test failure again"),
-            ScriptError("MOCK clean test failure"),
-        ], [
-            lots_of_failing_tests,
-            lots_of_failing_tests,
-            lots_of_failing_tests,
-        ])
-
-        # Tests always fail, and return so many failures that we do not
-        # trust the results (see ExpectedFailures._can_trust_results) so we
-        # just give up and retry the patch.
-        expected_logs = """run_webkit_patch: ['clean']
-command_passed: success_message='Cleaned working directory' patch='10000'
-run_webkit_patch: ['update']
-command_passed: success_message='Updated working directory' patch='10000'
-run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 10000]
-command_passed: success_message='Applied patch' patch='10000'
-run_webkit_patch: ['validate-changelog', '--non-interactive', 10000]
-command_passed: success_message='ChangeLog validated' patch='10000'
-run_webkit_patch: ['build', '--no-clean', '--no-update', '--build-style=both']
-command_passed: success_message='Built patch' patch='10000'
-run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive']
-command_failed: failure_message='Patch does not pass tests' script_error='MOCK test failure' patch='10000'
-archive_last_test_results: patch='10000'
-run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive']
-command_failed: failure_message='Patch does not pass tests' script_error='MOCK test failure again' patch='10000'
-archive_last_test_results: patch='10000'
-run_webkit_patch: ['build-and-test', '--force-clean', '--no-update', '--build', '--test', '--non-interactive']
-command_failed: failure_message='Unable to pass tests without patch (tree is red?)' script_error='MOCK clean test failure' patch='10000'
-"""
-        self._run_through_task(commit_queue, expected_logs, expect_retry=True)
-
-    def test_red_tree_patch_rejection(self):
-        commit_queue = FailingTestCommitQueue([
-            None,
-            None,
-            None,
-            None,
-            None,
-            GoldenScriptError("MOCK test failure"),
-            ScriptError("MOCK test failure again"),
-            ScriptError("MOCK clean test failure"),
-        ], [
-            ["foo.html", "bar.html"],
-            ["foo.html", "bar.html"],
-            ["foo.html"],
-        ])
-
-        # Tests always fail, but the clean tree only fails one test
-        # while the patch fails two.  So we should reject the patch!
-        expected_logs = """run_webkit_patch: ['clean']
-command_passed: success_message='Cleaned working directory' patch='10000'
-run_webkit_patch: ['update']
-command_passed: success_message='Updated working directory' patch='10000'
-run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 10000]
-command_passed: success_message='Applied patch' patch='10000'
-run_webkit_patch: ['validate-changelog', '--non-interactive', 10000]
-command_passed: success_message='ChangeLog validated' patch='10000'
-run_webkit_patch: ['build', '--no-clean', '--no-update', '--build-style=both']
-command_passed: success_message='Built patch' patch='10000'
-run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive']
-command_failed: failure_message='Patch does not pass tests' script_error='MOCK test failure' patch='10000'
-archive_last_test_results: patch='10000'
-run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive']
-command_failed: failure_message='Patch does not pass tests' script_error='MOCK test failure again' patch='10000'
-archive_last_test_results: patch='10000'
-run_webkit_patch: ['build-and-test', '--force-clean', '--no-update', '--build', '--test', '--non-interactive']
-command_failed: failure_message='Unable to pass tests without patch (tree is red?)' script_error='MOCK clean test failure' patch='10000'
-"""
-        task = self._run_through_task(commit_queue, expected_logs, GoldenScriptError)
-        self.assertEqual(task.results_from_patch_test_run(task._patch).failing_tests(), ["foo.html", "bar.html"])
-        # failure_status_id should be of the test with patch (1), not the test without patch (2).
-        self.assertEqual(task.failure_status_id, 1)
-
-    def test_land_failure(self):
-        commit_queue = MockCommitQueue([
-            None,
-            None,
-            None,
-            None,
-            None,
-            None,
-            GoldenScriptError("MOCK land failure"),
-        ])
-        expected_logs = """run_webkit_patch: ['clean']
-command_passed: success_message='Cleaned working directory' patch='10000'
-run_webkit_patch: ['update']
-command_passed: success_message='Updated working directory' patch='10000'
-run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 10000]
-command_passed: success_message='Applied patch' patch='10000'
-run_webkit_patch: ['validate-changelog', '--non-interactive', 10000]
-command_passed: success_message='ChangeLog validated' patch='10000'
-run_webkit_patch: ['build', '--no-clean', '--no-update', '--build-style=both']
-command_passed: success_message='Built patch' patch='10000'
-run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive']
-command_passed: success_message='Passed tests' patch='10000'
-run_webkit_patch: ['land-attachment', '--force-clean', '--non-interactive', '--parent-command=commit-queue', 10000]
-command_failed: failure_message='Unable to land patch' script_error='MOCK land failure' patch='10000'
-"""
-        # FIXME: This should really be expect_retry=True for a better user experiance.
-        self._run_through_task(commit_queue, expected_logs, GoldenScriptError)
-
-    def _expect_validate(self, patch, is_valid):
-        class MockDelegate(object):
-            def refetch_patch(self, patch):
-                return patch
-
-            def expected_failures(self):
-                return ExpectedFailures()
-
-        task = CommitQueueTask(MockDelegate(), patch)
-        self.assertEqual(task.validate(), is_valid)
-
-    def _mock_patch(self, attachment_dict={}, bug_dict={'bug_status': 'NEW'}, committer="fake"):
-        bug = bugzilla.Bug(bug_dict, None)
-        patch = bugzilla.Attachment(attachment_dict, bug)
-        patch._committer = committer
-        return patch
-
-    def test_validate(self):
-        self._expect_validate(self._mock_patch(), True)
-        self._expect_validate(self._mock_patch({'is_obsolete': True}), False)
-        self._expect_validate(self._mock_patch(bug_dict={'bug_status': 'CLOSED'}), False)
-        self._expect_validate(self._mock_patch(committer=None), False)
-        self._expect_validate(self._mock_patch({'review': '-'}), False)
diff --git a/Tools/Scripts/webkitpy/tool/bot/expectedfailures.py b/Tools/Scripts/webkitpy/tool/bot/expectedfailures.py
deleted file mode 100644
index c0cfe21..0000000
--- a/Tools/Scripts/webkitpy/tool/bot/expectedfailures.py
+++ /dev/null
@@ -1,60 +0,0 @@
-# Copyright (c) 2011 Google Inc. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-#     * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-#     * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-#     * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-class ExpectedFailures(object):
-    def __init__(self):
-        self._failures = set()
-        self._is_trustworthy = True
-
-    @classmethod
-    def _has_failures(cls, results):
-        return bool(results and results.failing_tests())
-
-    @classmethod
-    def _should_trust(cls, results):
-        return bool(cls._has_failures(results) and results.failure_limit_count() and len(results.failing_tests()) < results.failure_limit_count())
-
-    def failures_were_expected(self, results):
-        if not self._is_trustworthy:
-            return False
-        if not self._should_trust(results):
-            return False
-        return set(results.failing_tests()) <= self._failures
-
-    def unexpected_failures_observed(self, results):
-        if not self._is_trustworthy:
-            return None
-        if not self._has_failures(results):
-            return None
-        return set(results.failing_tests()) - self._failures
-
-    def update(self, results):
-        if results:
-            self._failures = set(results.failing_tests())
-            self._is_trustworthy = self._should_trust(results)
diff --git a/Tools/Scripts/webkitpy/tool/bot/expectedfailures_unittest.py b/Tools/Scripts/webkitpy/tool/bot/expectedfailures_unittest.py
deleted file mode 100644
index b639856..0000000
--- a/Tools/Scripts/webkitpy/tool/bot/expectedfailures_unittest.py
+++ /dev/null
@@ -1,95 +0,0 @@
-# Copyright (c) 2009 Google Inc. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-#     * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-#     * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-#     * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-import unittest2 as unittest
-
-from webkitpy.tool.bot.expectedfailures import ExpectedFailures
-
-
-class MockResults(object):
-    def __init__(self, failing_tests=[], failure_limit=10):
-        self._failing_tests = failing_tests
-        self._failure_limit_count = failure_limit
-
-    def failure_limit_count(self):
-        return self._failure_limit_count
-
-    def failing_tests(self):
-        return self._failing_tests
-
-
-class ExpectedFailuresTest(unittest.TestCase):
-    def _assert_can_trust(self, results, can_trust):
-        self.assertEqual(ExpectedFailures._should_trust(results), can_trust)
-
-    def test_can_trust_results(self):
-        self._assert_can_trust(None, False)
-        self._assert_can_trust(MockResults(failing_tests=[], failure_limit=None), False)
-        self._assert_can_trust(MockResults(failing_tests=[], failure_limit=10), False)
-        self._assert_can_trust(MockResults(failing_tests=[1], failure_limit=None), False)
-        self._assert_can_trust(MockResults(failing_tests=[1], failure_limit=2), True)
-        self._assert_can_trust(MockResults(failing_tests=[1], failure_limit=1), False)
-        self._assert_can_trust(MockResults(failing_tests=[1, 2], failure_limit=1), False)
-
-    def _assert_expected(self, expected_failures, failures, expected):
-        self.assertEqual(expected_failures.failures_were_expected(MockResults(failures)), expected)
-
-    def test_failures_were_expected(self):
-        failures = ExpectedFailures()
-        failures.update(MockResults(['foo.html']))
-        self._assert_expected(failures, ['foo.html'], True)
-        self._assert_expected(failures, ['bar.html'], False)
-        self._assert_expected(failures, ['bar.html', 'foo.html'], False)
-
-        failures.update(MockResults(['baz.html']))
-        self._assert_expected(failures, ['baz.html'], True)
-        self._assert_expected(failures, ['foo.html'], False)
-
-        failures.update(MockResults([]))
-        self._assert_expected(failures, ['baz.html'], False)
-        self._assert_expected(failures, ['foo.html'], False)
-
-    def test_unexpected_failures_observed(self):
-        failures = ExpectedFailures()
-        failures.update(MockResults(['foo.html']))
-        self.assertEqual(failures.unexpected_failures_observed(MockResults(['foo.html', 'bar.html'])), set(['bar.html']))
-        self.assertEqual(failures.unexpected_failures_observed(MockResults(['baz.html'])), set(['baz.html']))
-        unbounded_results = MockResults(['baz.html', 'qux.html', 'taco.html'], failure_limit=3)
-        self.assertEqual(failures.unexpected_failures_observed(unbounded_results), set(['baz.html', 'qux.html', 'taco.html']))
-        unbounded_results_with_existing_failure = MockResults(['foo.html', 'baz.html', 'qux.html', 'taco.html'], failure_limit=4)
-        self.assertEqual(failures.unexpected_failures_observed(unbounded_results_with_existing_failure), set(['baz.html', 'qux.html', 'taco.html']))
-
-    def test_unexpected_failures_observed_when_tree_is_hosed(self):
-        failures = ExpectedFailures()
-        failures.update(MockResults(['foo.html', 'banana.html'], failure_limit=2))
-        self.assertEqual(failures.unexpected_failures_observed(MockResults(['foo.html', 'bar.html'])), None)
-        self.assertEqual(failures.unexpected_failures_observed(MockResults(['baz.html'])), None)
-        unbounded_results = MockResults(['baz.html', 'qux.html', 'taco.html'], failure_limit=3)
-        self.assertEqual(failures.unexpected_failures_observed(unbounded_results), None)
-        unbounded_results_with_existing_failure = MockResults(['foo.html', 'baz.html', 'qux.html', 'taco.html'], failure_limit=4)
-        self.assertEqual(failures.unexpected_failures_observed(unbounded_results_with_existing_failure), None)
diff --git a/Tools/Scripts/webkitpy/tool/bot/feeders.py b/Tools/Scripts/webkitpy/tool/bot/feeders.py
deleted file mode 100644
index f4bc4b9..0000000
--- a/Tools/Scripts/webkitpy/tool/bot/feeders.py
+++ /dev/null
@@ -1,98 +0,0 @@
-# Copyright (c) 2010 Google Inc. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-#     * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-#     * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-#     * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-import logging
-
-from webkitpy.common.config.committervalidator import CommitterValidator
-from webkitpy.tool.grammar import pluralize
-
-_log = logging.getLogger(__name__)
-
-
-class AbstractFeeder(object):
-    def __init__(self, tool):
-        self._tool = tool
-
-    def feed(self):
-        raise NotImplementedError("subclasses must implement")
-
-
-class CommitQueueFeeder(AbstractFeeder):
-    queue_name = "commit-queue"
-
-    def __init__(self, tool):
-        AbstractFeeder.__init__(self, tool)
-        self.committer_validator = CommitterValidator(self._tool)
-
-    def _update_work_items(self, item_ids):
-        # FIXME: This is the last use of update_work_items, the commit-queue
-        # should move to feeding patches one at a time like the EWS does.
-        self._tool.status_server.update_work_items(self.queue_name, item_ids)
-        _log.info("Feeding %s items %s" % (self.queue_name, item_ids))
-
-    def feed(self):
-        patches = self._validate_patches()
-        patches = self._patches_with_acceptable_review_flag(patches)
-        patches = sorted(patches, self._patch_cmp)
-        patch_ids = [patch.id() for patch in patches]
-        self._update_work_items(patch_ids)
-
-    def _patches_for_bug(self, bug_id):
-        return self._tool.bugs.fetch_bug(bug_id).commit_queued_patches(include_invalid=True)
-
-    # Filters out patches with r? or r-, only r+ or no review are OK to land.
-    def _patches_with_acceptable_review_flag(self, patches):
-        return [patch for patch in patches if patch.review() in [None, '+']]
-
-    def _validate_patches(self):
-        # Not using BugzillaQueries.fetch_patches_from_commit_queue() so we can reject patches with invalid committers/reviewers.
-        bug_ids = self._tool.bugs.queries.fetch_bug_ids_from_commit_queue()
-        all_patches = sum([self._patches_for_bug(bug_id) for bug_id in bug_ids], [])
-        return self.committer_validator.patches_after_rejecting_invalid_commiters_and_reviewers(all_patches)
-
-    def _patch_cmp(self, a, b):
-        # Sort first by is_rollout, then by attach_date.
-        # Reversing the order so that is_rollout is first.
-        rollout_cmp = cmp(b.is_rollout(), a.is_rollout())
-        if rollout_cmp != 0:
-            return rollout_cmp
-        return cmp(a.attach_date(), b.attach_date())
-
-
-class EWSFeeder(AbstractFeeder):
-    def __init__(self, tool):
-        self._ids_sent_to_server = set()
-        AbstractFeeder.__init__(self, tool)
-
-    def feed(self):
-        ids_needing_review = set(self._tool.bugs.queries.fetch_attachment_ids_from_review_queue())
-        new_ids = ids_needing_review.difference(self._ids_sent_to_server)
-        _log.info("Feeding EWS (%s, %s new)" % (pluralize("r? patch", len(ids_needing_review)), len(new_ids)))
-        for attachment_id in new_ids:  # Order doesn't really matter for the EWS.
-            self._tool.status_server.submit_to_ews(attachment_id)
-            self._ids_sent_to_server.add(attachment_id)
diff --git a/Tools/Scripts/webkitpy/tool/bot/feeders_unittest.py b/Tools/Scripts/webkitpy/tool/bot/feeders_unittest.py
deleted file mode 100644
index b70a637..0000000
--- a/Tools/Scripts/webkitpy/tool/bot/feeders_unittest.py
+++ /dev/null
@@ -1,80 +0,0 @@
-# Copyright (c) 2010 Google Inc. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-#     * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-#     * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-#     * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-from datetime import datetime
-import unittest2 as unittest
-
-from webkitpy.common.system.outputcapture import OutputCapture
-from webkitpy.thirdparty.mock import Mock
-from webkitpy.tool.bot.feeders import *
-from webkitpy.tool.mocktool import MockTool
-
-
-class FeedersTest(unittest.TestCase):
-    def test_commit_queue_feeder(self):
-        feeder = CommitQueueFeeder(MockTool())
-        expected_logs = """Warning, attachment 10001 on bug 50000 has invalid committer (non-committer@example.com)
-Warning, attachment 10001 on bug 50000 has invalid committer (non-committer@example.com)
-MOCK setting flag 'commit-queue' to '-' on attachment '10001' with comment 'Rejecting attachment 10001 from commit-queue.\n\nnon-committer@example.com does not have committer permissions according to http://trac.webkit.org/browser/trunk/Tools/Scripts/webkitpy/common/config/committers.py.
-
-- If you do not have committer rights please read http://webkit.org/coding/contributing.html for instructions on how to use bugzilla flags.
-
-- If you have committer rights please correct the error in Tools/Scripts/webkitpy/common/config/committers.py by adding yourself to the file (no review needed).  The commit-queue restarts itself every 2 hours.  After restart the commit-queue will correctly respect your committer rights.'
-MOCK: update_work_items: commit-queue [10005, 10000]
-Feeding commit-queue items [10005, 10000]
-"""
-        OutputCapture().assert_outputs(self, feeder.feed, expected_logs=expected_logs)
-
-    def _mock_attachment(self, is_rollout, attach_date):
-        attachment = Mock()
-        attachment.is_rollout = lambda: is_rollout
-        attachment.attach_date = lambda: attach_date
-        return attachment
-
-    def test_patch_cmp(self):
-        long_ago_date = datetime(1900, 1, 21)
-        recent_date = datetime(2010, 1, 21)
-        attachment1 = self._mock_attachment(is_rollout=False, attach_date=recent_date)
-        attachment2 = self._mock_attachment(is_rollout=False, attach_date=long_ago_date)
-        attachment3 = self._mock_attachment(is_rollout=True, attach_date=recent_date)
-        attachment4 = self._mock_attachment(is_rollout=True, attach_date=long_ago_date)
-        attachments = [attachment1, attachment2, attachment3, attachment4]
-        expected_sort = [attachment4, attachment3, attachment2, attachment1]
-        queue = CommitQueueFeeder(MockTool())
-        attachments.sort(queue._patch_cmp)
-        self.assertEqual(attachments, expected_sort)
-
-    def test_patches_with_acceptable_review_flag(self):
-        class MockPatch(object):
-            def __init__(self, patch_id, review):
-                self.id = patch_id
-                self.review = lambda: review
-
-        feeder = CommitQueueFeeder(MockTool())
-        patches = [MockPatch(1, None), MockPatch(2, '-'), MockPatch(3, "+")]
-        self.assertEqual([patch.id for patch in feeder._patches_with_acceptable_review_flag(patches)], [1, 3])
diff --git a/Tools/Scripts/webkitpy/tool/bot/flakytestreporter.py b/Tools/Scripts/webkitpy/tool/bot/flakytestreporter.py
deleted file mode 100644
index 086a35b..0000000
--- a/Tools/Scripts/webkitpy/tool/bot/flakytestreporter.py
+++ /dev/null
@@ -1,200 +0,0 @@
-# Copyright (c) 2010 Google Inc. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-#     * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-#     * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-#     * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-import codecs
-import logging
-import os.path
-
-from webkitpy.common.net.layouttestresults import path_for_layout_test, LayoutTestResults
-from webkitpy.common.config import urls
-from webkitpy.tool.bot.botinfo import BotInfo
-from webkitpy.tool.grammar import plural, pluralize, join_with_separators
-
-_log = logging.getLogger(__name__)
-
-
-class FlakyTestReporter(object):
-    def __init__(self, tool, bot_name):
-        self._tool = tool
-        self._bot_name = bot_name
-        # FIXME: Use the real port object
-        self._bot_info = BotInfo(tool, tool.deprecated_port().name())
-
-    def _author_emails_for_test(self, flaky_test):
-        test_path = path_for_layout_test(flaky_test)
-        commit_infos = self._tool.checkout().recent_commit_infos_for_files([test_path])
-        # This ignores authors which are not committers because we don't have their bugzilla_email.
-        return set([commit_info.author().bugzilla_email() for commit_info in commit_infos if commit_info.author()])
-
-    def _bugzilla_email(self):
-        # FIXME: This is kinda a funny way to get the bugzilla email,
-        # we could also just create a Credentials object directly
-        # but some of the Credentials logic is in bugzilla.py too...
-        self._tool.bugs.authenticate()
-        return self._tool.bugs.username
-
-    # FIXME: This should move into common.config
-    _bot_emails = set([
-        "commit-queue@webkit.org",  # commit-queue
-        "eseidel@chromium.org",  # old commit-queue
-        "webkit.review.bot@gmail.com",  # style-queue, sheriff-bot, CrLx/Gtk EWS
-        "buildbot@hotmail.com",  # Win EWS
-        # Mac EWS currently uses eric@webkit.org, but that's not normally a bot
-    ])
-
-    def _lookup_bug_for_flaky_test(self, flaky_test):
-        bugs = self._tool.bugs.queries.fetch_bugs_matching_search(search_string=flaky_test)
-        if not bugs:
-            return None
-        # Match any bugs which are from known bots or the email this bot is using.
-        allowed_emails = self._bot_emails | set([self._bugzilla_email])
-        bugs = filter(lambda bug: bug.reporter_email() in allowed_emails, bugs)
-        if not bugs:
-            return None
-        if len(bugs) > 1:
-            # FIXME: There are probably heuristics we could use for finding
-            # the right bug instead of the first, like open vs. closed.
-            _log.warn("Found %s %s matching '%s' filed by a bot, using the first." % (pluralize('bug', len(bugs)), [bug.id() for bug in bugs], flaky_test))
-        return bugs[0]
-
-    def _view_source_url_for_test(self, test_path):
-        return urls.view_source_url("LayoutTests/%s" % test_path)
-
-    def _create_bug_for_flaky_test(self, flaky_test, author_emails, latest_flake_message):
-        format_values = {
-            'test': flaky_test,
-            'authors': join_with_separators(sorted(author_emails)),
-            'flake_message': latest_flake_message,
-            'test_url': self._view_source_url_for_test(flaky_test),
-            'bot_name': self._bot_name,
-        }
-        title = "Flaky Test: %(test)s" % format_values
-        description = """This is an automatically generated bug from the %(bot_name)s.
-%(test)s has been flaky on the %(bot_name)s.
-
-%(test)s was authored by %(authors)s.
-%(test_url)s
-
-%(flake_message)s
-
-The bots will update this with information from each new failure.
-
-If you believe this bug to be fixed or invalid, feel free to close.  The bots will re-open if the flake re-occurs.
-
-If you would like to track this test fix with another bug, please close this bug as a duplicate.  The bots will follow the duplicate chain when making future comments.
-""" % format_values
-
-        master_flake_bug = 50856  # MASTER: Flaky tests found by the commit-queue
-        return self._tool.bugs.create_bug(title, description,
-            component="Tools / Tests",
-            cc=",".join(author_emails),
-            blocked="50856")
-
-    # This is over-engineered, but it makes for pretty bug messages.
-    def _optional_author_string(self, author_emails):
-        if not author_emails:
-            return ""
-        heading_string = plural('author') if len(author_emails) > 1 else 'author'
-        authors_string = join_with_separators(sorted(author_emails))
-        return " (%s: %s)" % (heading_string, authors_string)
-
-    def _latest_flake_message(self, flaky_result, patch):
-        failure_messages = [failure.message() for failure in flaky_result.failures]
-        flake_message = "The %s just saw %s flake (%s) while processing attachment %s on bug %s." % (self._bot_name, flaky_result.test_name, ", ".join(failure_messages), patch.id(), patch.bug_id())
-        return "%s\n%s" % (flake_message, self._bot_info.summary_text())
-
-    def _results_diff_path_for_test(self, test_path):
-        # FIXME: This is a big hack.  We should get this path from results.json
-        # except that old-run-webkit-tests doesn't produce a results.json
-        # so we just guess at the file path.
-        (test_path_root, _) = os.path.splitext(test_path)
-        return "%s-diffs.txt" % test_path_root
-
-    def _follow_duplicate_chain(self, bug):
-        while bug.is_closed() and bug.duplicate_of():
-            bug = self._tool.bugs.fetch_bug(bug.duplicate_of())
-        return bug
-
-    # Maybe this logic should move into Bugzilla? a reopen=True arg to post_comment?
-    def _update_bug_for_flaky_test(self, bug, latest_flake_message):
-        if bug.is_closed():
-            self._tool.bugs.reopen_bug(bug.id(), latest_flake_message)
-        else:
-            self._tool.bugs.post_comment_to_bug(bug.id(), latest_flake_message)
-
-    # This method is needed because our archive paths include a leading tmp/layout-test-results
-    def _find_in_archive(self, path, archive):
-        for archived_path in archive.namelist():
-            # Archives are currently created with full paths.
-            if archived_path.endswith(path):
-                return archived_path
-        return None
-
-    def _attach_failure_diff(self, flake_bug_id, flaky_test, results_archive_zip):
-        results_diff_path = self._results_diff_path_for_test(flaky_test)
-        # Check to make sure that the path makes sense.
-        # Since we're not actually getting this path from the results.html
-        # there is a chance it's wrong.
-        bot_id = self._tool.status_server.bot_id or "bot"
-        archive_path = self._find_in_archive(results_diff_path, results_archive_zip)
-        if archive_path:
-            results_diff = results_archive_zip.read(archive_path)
-            description = "Failure diff from %s" % bot_id
-            self._tool.bugs.add_attachment_to_bug(flake_bug_id, results_diff, description, filename="failure.diff")
-        else:
-            _log.warn("%s does not exist in results archive, uploading entire archive." % results_diff_path)
-            description = "Archive of layout-test-results from %s" % bot_id
-            # results_archive is a ZipFile object, grab the File object (.fp) to pass to Mechanize for uploading.
-            results_archive_file = results_archive_zip.fp
-            # Rewind the file object to start (since Mechanize won't do that automatically)
-            # See https://bugs.webkit.org/show_bug.cgi?id=54593
-            results_archive_file.seek(0)
-            self._tool.bugs.add_attachment_to_bug(flake_bug_id, results_archive_file, description, filename="layout-test-results.zip")
-
-    def report_flaky_tests(self, patch, flaky_test_results, results_archive):
-        message = "The %s encountered the following flaky tests while processing attachment %s:\n\n" % (self._bot_name, patch.id())
-        for flaky_result in flaky_test_results:
-            flaky_test = flaky_result.test_name
-            bug = self._lookup_bug_for_flaky_test(flaky_test)
-            latest_flake_message = self._latest_flake_message(flaky_result, patch)
-            author_emails = self._author_emails_for_test(flaky_test)
-            if not bug:
-                _log.info("Bug does not already exist for %s, creating." % flaky_test)
-                flake_bug_id = self._create_bug_for_flaky_test(flaky_test, author_emails, latest_flake_message)
-            else:
-                bug = self._follow_duplicate_chain(bug)
-                # FIXME: Ideally we'd only make one comment per flake, not two.  But that's not possible
-                # in all cases (e.g. when reopening), so for now file attachment and comment are separate.
-                self._update_bug_for_flaky_test(bug, latest_flake_message)
-                flake_bug_id = bug.id()
-
-            self._attach_failure_diff(flake_bug_id, flaky_test, results_archive)
-            message += "%s bug %s%s\n" % (flaky_test, flake_bug_id, self._optional_author_string(author_emails))
-
-        message += "The %s is continuing to process your patch." % self._bot_name
-        self._tool.bugs.post_comment_to_bug(patch.bug_id(), message)
diff --git a/Tools/Scripts/webkitpy/tool/bot/flakytestreporter_unittest.py b/Tools/Scripts/webkitpy/tool/bot/flakytestreporter_unittest.py
deleted file mode 100644
index 3fb3c83..0000000
--- a/Tools/Scripts/webkitpy/tool/bot/flakytestreporter_unittest.py
+++ /dev/null
@@ -1,169 +0,0 @@
-# Copyright (c) 2010 Google Inc. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-#     * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-#     * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-#     * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-import unittest2 as unittest
-
-from webkitpy.common.config.committers import Committer
-from webkitpy.common.system.filesystem_mock import MockFileSystem
-from webkitpy.common.system.outputcapture import OutputCapture
-from webkitpy.layout_tests.models import test_results
-from webkitpy.layout_tests.models import test_failures
-from webkitpy.thirdparty.mock import Mock
-from webkitpy.tool.bot.flakytestreporter import FlakyTestReporter
-from webkitpy.tool.mocktool import MockTool
-from webkitpy.common.net.statusserver_mock import MockStatusServer
-
-
-# Creating fake CommitInfos is a pain, so we use a mock one here.
-class MockCommitInfo(object):
-    def __init__(self, author_email):
-        self._author_email = author_email
-
-    def author(self):
-        # It's definitely possible to have commits with authors who
-        # are not in our committers.py list.
-        if not self._author_email:
-            return None
-        return Committer("Mock Committer", self._author_email)
-
-
-class FlakyTestReporterTest(unittest.TestCase):
-    def _mock_test_result(self, testname):
-        return test_results.TestResult(testname, [test_failures.FailureTextMismatch()])
-
-    def _assert_emails_for_test(self, emails):
-        tool = MockTool()
-        reporter = FlakyTestReporter(tool, 'dummy-queue')
-        commit_infos = [MockCommitInfo(email) for email in emails]
-        tool.checkout().recent_commit_infos_for_files = lambda paths: set(commit_infos)
-        self.assertEqual(reporter._author_emails_for_test([]), set(emails))
-
-    def test_author_emails_for_test(self):
-        self._assert_emails_for_test([])
-        self._assert_emails_for_test(["test1@test.com", "test1@test.com"])
-        self._assert_emails_for_test(["test1@test.com", "test2@test.com"])
-
-    def test_create_bug_for_flaky_test(self):
-        reporter = FlakyTestReporter(MockTool(), 'dummy-queue')
-        expected_logs = """MOCK create_bug
-bug_title: Flaky Test: foo/bar.html
-bug_description: This is an automatically generated bug from the dummy-queue.
-foo/bar.html has been flaky on the dummy-queue.
-
-foo/bar.html was authored by test@test.com.
-http://trac.webkit.org/browser/trunk/LayoutTests/foo/bar.html
-
-FLAKE_MESSAGE
-
-The bots will update this with information from each new failure.
-
-If you believe this bug to be fixed or invalid, feel free to close.  The bots will re-open if the flake re-occurs.
-
-If you would like to track this test fix with another bug, please close this bug as a duplicate.  The bots will follow the duplicate chain when making future comments.
-
-component: Tools / Tests
-cc: test@test.com
-blocked: 50856
-"""
-        OutputCapture().assert_outputs(self, reporter._create_bug_for_flaky_test, ['foo/bar.html', ['test@test.com'], 'FLAKE_MESSAGE'], expected_logs=expected_logs)
-
-    def test_follow_duplicate_chain(self):
-        tool = MockTool()
-        reporter = FlakyTestReporter(tool, 'dummy-queue')
-        bug = tool.bugs.fetch_bug(50004)
-        self.assertEqual(reporter._follow_duplicate_chain(bug).id(), 50002)
-
-    def test_report_flaky_tests_creating_bug(self):
-        tool = MockTool()
-        tool.filesystem = MockFileSystem({"/mock-results/foo/bar-diffs.txt": "mock"})
-        tool.status_server = MockStatusServer(bot_id="mock-bot-id")
-        reporter = FlakyTestReporter(tool, 'dummy-queue')
-        reporter._lookup_bug_for_flaky_test = lambda bug_id: None
-        patch = tool.bugs.fetch_attachment(10000)
-        expected_logs = """Bug does not already exist for foo/bar.html, creating.
-MOCK create_bug
-bug_title: Flaky Test: foo/bar.html
-bug_description: This is an automatically generated bug from the dummy-queue.
-foo/bar.html has been flaky on the dummy-queue.
-
-foo/bar.html was authored by abarth@webkit.org.
-http://trac.webkit.org/browser/trunk/LayoutTests/foo/bar.html
-
-The dummy-queue just saw foo/bar.html flake (text diff) while processing attachment 10000 on bug 50000.
-Bot: mock-bot-id  Port: MockPort  Platform: MockPlatform 1.0
-
-The bots will update this with information from each new failure.
-
-If you believe this bug to be fixed or invalid, feel free to close.  The bots will re-open if the flake re-occurs.
-
-If you would like to track this test fix with another bug, please close this bug as a duplicate.  The bots will follow the duplicate chain when making future comments.
-
-component: Tools / Tests
-cc: abarth@webkit.org
-blocked: 50856
-MOCK add_attachment_to_bug: bug_id=60001, description=Failure diff from mock-bot-id filename=failure.diff mimetype=None
-MOCK bug comment: bug_id=50000, cc=None
---- Begin comment ---
-The dummy-queue encountered the following flaky tests while processing attachment 10000:
-
-foo/bar.html bug 60001 (author: abarth@webkit.org)
-The dummy-queue is continuing to process your patch.
---- End comment ---
-
-"""
-        test_results = [self._mock_test_result('foo/bar.html')]
-
-        class MockZipFile(object):
-            def read(self, path):
-                return ""
-
-            def namelist(self):
-                return ['foo/bar-diffs.txt']
-
-        OutputCapture().assert_outputs(self, reporter.report_flaky_tests, [patch, test_results, MockZipFile()], expected_logs=expected_logs)
-
-    def test_optional_author_string(self):
-        reporter = FlakyTestReporter(MockTool(), 'dummy-queue')
-        self.assertEqual(reporter._optional_author_string([]), "")
-        self.assertEqual(reporter._optional_author_string(["foo@bar.com"]), " (author: foo@bar.com)")
-        self.assertEqual(reporter._optional_author_string(["a@b.com", "b@b.com"]), " (authors: a@b.com and b@b.com)")
-
-    def test_results_diff_path_for_test(self):
-        reporter = FlakyTestReporter(MockTool(), 'dummy-queue')
-        self.assertEqual(reporter._results_diff_path_for_test("test.html"), "test-diffs.txt")
-
-    def test_find_in_archive(self):
-        reporter = FlakyTestReporter(MockTool(), 'dummy-queue')
-
-        class MockZipFile(object):
-            def namelist(self):
-                return ["tmp/layout-test-results/foo/bar-diffs.txt"]
-
-        reporter._find_in_archive("foo/bar-diffs.txt", MockZipFile())
-        # This is not ideal, but its
-        reporter._find_in_archive("txt", MockZipFile())
diff --git a/Tools/Scripts/webkitpy/tool/bot/patchanalysistask.py b/Tools/Scripts/webkitpy/tool/bot/patchanalysistask.py
deleted file mode 100644
index b01c6c7..0000000
--- a/Tools/Scripts/webkitpy/tool/bot/patchanalysistask.py
+++ /dev/null
@@ -1,253 +0,0 @@
-# Copyright (c) 2010 Google Inc. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-#     * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-#     * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-#     * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-from webkitpy.common.system.executive import ScriptError
-from webkitpy.common.net.layouttestresults import LayoutTestResults
-
-
-class UnableToApplyPatch(Exception):
-    def __init__(self, patch):
-        Exception.__init__(self)
-        self.patch = patch
-
-
-class PatchAnalysisTaskDelegate(object):
-    def parent_command(self):
-        raise NotImplementedError("subclasses must implement")
-
-    def run_command(self, command):
-        raise NotImplementedError("subclasses must implement")
-
-    def command_passed(self, message, patch):
-        raise NotImplementedError("subclasses must implement")
-
-    def command_failed(self, message, script_error, patch):
-        raise NotImplementedError("subclasses must implement")
-
-    def refetch_patch(self, patch):
-        raise NotImplementedError("subclasses must implement")
-
-    def expected_failures(self):
-        raise NotImplementedError("subclasses must implement")
-
-    def test_results(self):
-        raise NotImplementedError("subclasses must implement")
-
-    def archive_last_test_results(self, patch):
-        raise NotImplementedError("subclasses must implement")
-
-    def build_style(self):
-        raise NotImplementedError("subclasses must implement")
-
-    # We could make results_archive optional, but for now it's required.
-    def report_flaky_tests(self, patch, flaky_tests, results_archive):
-        raise NotImplementedError("subclasses must implement")
-
-
-class PatchAnalysisTask(object):
-    def __init__(self, delegate, patch):
-        self._delegate = delegate
-        self._patch = patch
-        self._script_error = None
-        self._results_archive_from_patch_test_run = None
-        self._results_from_patch_test_run = None
-        self._expected_failures = delegate.expected_failures()
-
-    def _run_command(self, command, success_message, failure_message):
-        try:
-            self._delegate.run_command(command)
-            self._delegate.command_passed(success_message, patch=self._patch)
-            return True
-        except ScriptError, e:
-            self._script_error = e
-            self.failure_status_id = self._delegate.command_failed(failure_message, script_error=self._script_error, patch=self._patch)
-            return False
-
-    def _clean(self):
-        return self._run_command([
-            "clean",
-        ],
-        "Cleaned working directory",
-        "Unable to clean working directory")
-
-    def _update(self):
-        # FIXME: Ideally the status server log message should include which revision we updated to.
-        return self._run_command([
-            "update",
-        ],
-        "Updated working directory",
-        "Unable to update working directory")
-
-    def _apply(self):
-        return self._run_command([
-            "apply-attachment",
-            "--no-update",
-            "--non-interactive",
-            self._patch.id(),
-        ],
-        "Applied patch",
-        "Patch does not apply")
-
-    def _build(self):
-        return self._run_command([
-            "build",
-            "--no-clean",
-            "--no-update",
-            "--build-style=%s" % self._delegate.build_style(),
-        ],
-        "Built patch",
-        "Patch does not build")
-
-    def _build_without_patch(self):
-        return self._run_command([
-            "build",
-            "--force-clean",
-            "--no-update",
-            "--build-style=%s" % self._delegate.build_style(),
-        ],
-        "Able to build without patch",
-        "Unable to build without patch")
-
-    def _test(self):
-        return self._run_command([
-            "build-and-test",
-            "--no-clean",
-            "--no-update",
-            # Notice that we don't pass --build, which means we won't build!
-            "--test",
-            "--non-interactive",
-        ],
-        "Passed tests",
-        "Patch does not pass tests")
-
-    def _build_and_test_without_patch(self):
-        return self._run_command([
-            "build-and-test",
-            "--force-clean",
-            "--no-update",
-            "--build",
-            "--test",
-            "--non-interactive",
-        ],
-        "Able to pass tests without patch",
-        "Unable to pass tests without patch (tree is red?)")
-
-    def _land(self):
-        # Unclear if this should pass --quiet or not.  If --parent-command always does the reporting, then it should.
-        return self._run_command([
-            "land-attachment",
-            "--force-clean",
-            "--non-interactive",
-            "--parent-command=" + self._delegate.parent_command(),
-            self._patch.id(),
-        ],
-        "Landed patch",
-        "Unable to land patch")
-
-    def _report_flaky_tests(self, flaky_test_results, results_archive):
-        self._delegate.report_flaky_tests(self._patch, flaky_test_results, results_archive)
-
-    def _results_failed_different_tests(self, first, second):
-        first_failing_tests = [] if not first else first.failing_tests()
-        second_failing_tests = [] if not second else second.failing_tests()
-        return first_failing_tests != second_failing_tests
-
-    def _test_patch(self):
-        if self._test():
-            return True
-
-        # Note: archive_last_test_results deletes the results directory, making these calls order-sensitve.
-        # We could remove this dependency by building the test_results from the archive.
-        first_results = self._delegate.test_results()
-        first_results_archive = self._delegate.archive_last_test_results(self._patch)
-        first_script_error = self._script_error
-        first_failure_status_id = self.failure_status_id
-
-        if self._expected_failures.failures_were_expected(first_results):
-            return True
-
-        if self._test():
-            # Only report flaky tests if we were successful at parsing results.json and archiving results.
-            if first_results and first_results_archive:
-                self._report_flaky_tests(first_results.failing_test_results(), first_results_archive)
-            return True
-
-        second_results = self._delegate.test_results()
-        if self._results_failed_different_tests(first_results, second_results):
-            # We could report flaky tests here, but we would need to be careful
-            # to use similar checks to ExpectedFailures._can_trust_results
-            # to make sure we don't report constant failures as flakes when
-            # we happen to hit the --exit-after-N-failures limit.
-            # See https://bugs.webkit.org/show_bug.cgi?id=51272
-            return False
-
-        # Archive (and remove) second results so test_results() after
-        # build_and_test_without_patch won't use second results instead of the clean-tree results.
-        second_results_archive = self._delegate.archive_last_test_results(self._patch)
-
-        if self._build_and_test_without_patch():
-            # The error from the previous ._test() run is real, report it.
-            return self.report_failure(first_results_archive, first_results, first_script_error)
-
-        clean_tree_results = self._delegate.test_results()
-        self._expected_failures.update(clean_tree_results)
-
-        # Re-check if the original results are now to be expected to avoid a full re-try.
-        if self._expected_failures.failures_were_expected(first_results):
-            return True
-
-        # Now that we have updated information about failing tests with a clean checkout, we can
-        # tell if our original failures were unexpected and fail the patch if necessary.
-        if self._expected_failures.unexpected_failures_observed(first_results):
-            self.failure_status_id = first_failure_status_id
-            return self.report_failure(first_results_archive, first_results, first_script_error)
-
-        # We don't know what's going on.  The tree is likely very red (beyond our layout-test-results
-        # failure limit), just keep retrying the patch. until someone fixes the tree.
-        return False
-
-    def results_archive_from_patch_test_run(self, patch):
-        assert(self._patch.id() == patch.id())  # PatchAnalysisTask is not currently re-useable.
-        return self._results_archive_from_patch_test_run
-
-    def results_from_patch_test_run(self, patch):
-        assert(self._patch.id() == patch.id())  # PatchAnalysisTask is not currently re-useable.
-        return self._results_from_patch_test_run
-
-    def report_failure(self, results_archive=None, results=None, script_error=None):
-        if not self.validate():
-            return False
-        self._results_archive_from_patch_test_run = results_archive
-        self._results_from_patch_test_run = results
-        raise script_error or self._script_error
-
-    def validate(self):
-        raise NotImplementedError("subclasses must implement")
-
-    def run(self):
-        raise NotImplementedError("subclasses must implement")
diff --git a/Tools/Scripts/webkitpy/tool/bot/queueengine.py b/Tools/Scripts/webkitpy/tool/bot/queueengine.py
deleted file mode 100644
index 90e553f..0000000
--- a/Tools/Scripts/webkitpy/tool/bot/queueengine.py
+++ /dev/null
@@ -1,165 +0,0 @@
-# Copyright (c) 2009 Google Inc. All rights reserved.
-# Copyright (c) 2009 Apple Inc. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-# 
-#     * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-#     * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-#     * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-# 
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-import logging
-import sys
-import traceback
-
-from datetime import datetime, timedelta
-
-from webkitpy.common.system.executive import ScriptError
-from webkitpy.common.system.outputtee import OutputTee
-
-_log = logging.getLogger(__name__)
-
-
-# FIXME: This will be caught by "except Exception:" blocks, we should consider
-# making this inherit from SystemExit instead (or BaseException, except that's not recommended).
-class TerminateQueue(Exception):
-    pass
-
-
-class QueueEngineDelegate:
-    def queue_log_path(self):
-        raise NotImplementedError, "subclasses must implement"
-
-    def work_item_log_path(self, work_item):
-        raise NotImplementedError, "subclasses must implement"
-
-    def begin_work_queue(self):
-        raise NotImplementedError, "subclasses must implement"
-
-    def should_continue_work_queue(self):
-        raise NotImplementedError, "subclasses must implement"
-
-    def next_work_item(self):
-        raise NotImplementedError, "subclasses must implement"
-
-    def process_work_item(self, work_item):
-        raise NotImplementedError, "subclasses must implement"
-
-    def handle_unexpected_error(self, work_item, message):
-        raise NotImplementedError, "subclasses must implement"
-
-
-class QueueEngine:
-    def __init__(self, name, delegate, wakeup_event, seconds_to_sleep=120):
-        self._name = name
-        self._delegate = delegate
-        self._wakeup_event = wakeup_event
-        self._output_tee = OutputTee()
-        self._seconds_to_sleep = seconds_to_sleep
-
-    log_date_format = "%Y-%m-%d %H:%M:%S"
-    handled_error_code = 2
-
-    # Child processes exit with a special code to the parent queue process can detect the error was handled.
-    @classmethod
-    def exit_after_handled_error(cls, error):
-        _log.error(error)
-        sys.exit(cls.handled_error_code)
-
-    def run(self):
-        self._begin_logging()
-
-        self._delegate.begin_work_queue()
-        while (self._delegate.should_continue_work_queue()):
-            try:
-                self._ensure_work_log_closed()
-                work_item = self._delegate.next_work_item()
-                if not work_item:
-                    self._sleep("No work item.")
-                    continue
-
-                # FIXME: Work logs should not depend on bug_id specificaly.
-                #        This looks fixed, no?
-                self._open_work_log(work_item)
-                try:
-                    if not self._delegate.process_work_item(work_item):
-                        _log.warning("Unable to process work item.")
-                        continue
-                except ScriptError, e:
-                    # Use a special exit code to indicate that the error was already
-                    # handled in the child process and we should just keep looping.
-                    if e.exit_code == self.handled_error_code:
-                        continue
-                    message = "Unexpected failure when processing patch!  Please file a bug against webkit-patch.\n%s" % e.message_with_output()
-                    self._delegate.handle_unexpected_error(work_item, message)
-            except TerminateQueue, e:
-                self._stopping("TerminateQueue exception received.")
-                return 0
-            except KeyboardInterrupt, e:
-                self._stopping("User terminated queue.")
-                return 1
-            except Exception, e:
-                traceback.print_exc()
-                # Don't try tell the status bot, in case telling it causes an exception.
-                self._sleep("Exception while preparing queue")
-        self._stopping("Delegate terminated queue.")
-        return 0
-
-    def _stopping(self, message):
-        _log.info("\n%s" % message)
-        self._delegate.stop_work_queue(message)
-        # Be careful to shut down our OutputTee or the unit tests will be unhappy.
-        self._ensure_work_log_closed()
-        self._output_tee.remove_log(self._queue_log)
-
-    def _begin_logging(self):
-        self._queue_log = self._output_tee.add_log(self._delegate.queue_log_path())
-        self._work_log = None
-
-    def _open_work_log(self, work_item):
-        work_item_log_path = self._delegate.work_item_log_path(work_item)
-        if not work_item_log_path:
-            return
-        self._work_log = self._output_tee.add_log(work_item_log_path)
-
-    def _ensure_work_log_closed(self):
-        # If we still have a bug log open, close it.
-        if self._work_log:
-            self._output_tee.remove_log(self._work_log)
-            self._work_log = None
-
-    def _now(self):
-        """Overriden by the unit tests to allow testing _sleep_message"""
-        return datetime.now()
-
-    def _sleep_message(self, message):
-        wake_time = self._now() + timedelta(seconds=self._seconds_to_sleep)
-        if self._seconds_to_sleep < 3 * 60:
-            sleep_duration_text = str(self._seconds_to_sleep) + ' seconds'
-        else:
-            sleep_duration_text = str(round(self._seconds_to_sleep / 60)) + ' minutes'
-        return "%s Sleeping until %s (%s)." % (message, wake_time.strftime(self.log_date_format), sleep_duration_text)
-
-    def _sleep(self, message):
-        _log.info(self._sleep_message(message))
-        self._wakeup_event.wait(self._seconds_to_sleep)
-        self._wakeup_event.clear()
diff --git a/Tools/Scripts/webkitpy/tool/bot/queueengine_unittest.py b/Tools/Scripts/webkitpy/tool/bot/queueengine_unittest.py
deleted file mode 100644
index de9fa23..0000000
--- a/Tools/Scripts/webkitpy/tool/bot/queueengine_unittest.py
+++ /dev/null
@@ -1,183 +0,0 @@
-# Copyright (c) 2009 Google Inc. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-# 
-#     * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-#     * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-#     * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-# 
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-import datetime
-import os
-import shutil
-import tempfile
-import threading
-import unittest2 as unittest
-
-from webkitpy.common.system.executive import ScriptError
-from webkitpy.common.system.outputcapture import OutputCapture
-from webkitpy.tool.bot.queueengine import QueueEngine, QueueEngineDelegate, TerminateQueue
-
-
-class LoggingDelegate(QueueEngineDelegate):
-    def __init__(self, test):
-        self._test = test
-        self._callbacks = []
-        self._run_before = False
-        self.stop_message = None
-
-    expected_callbacks = [
-        'queue_log_path',
-        'begin_work_queue',
-        'should_continue_work_queue',
-        'next_work_item',
-        'work_item_log_path',
-        'process_work_item',
-        'should_continue_work_queue',
-        'stop_work_queue',
-    ]
-
-    def record(self, method_name):
-        self._callbacks.append(method_name)
-
-    def queue_log_path(self):
-        self.record("queue_log_path")
-        return os.path.join(self._test.temp_dir, "queue_log_path")
-
-    def work_item_log_path(self, work_item):
-        self.record("work_item_log_path")
-        return os.path.join(self._test.temp_dir, "work_log_path", "%s.log" % work_item)
-
-    def begin_work_queue(self):
-        self.record("begin_work_queue")
-
-    def should_continue_work_queue(self):
-        self.record("should_continue_work_queue")
-        if not self._run_before:
-            self._run_before = True
-            return True
-        return False
-
-    def next_work_item(self):
-        self.record("next_work_item")
-        return "work_item"
-
-    def process_work_item(self, work_item):
-        self.record("process_work_item")
-        self._test.assertEqual(work_item, "work_item")
-        return True
-
-    def handle_unexpected_error(self, work_item, message):
-        self.record("handle_unexpected_error")
-        self._test.assertEqual(work_item, "work_item")
-
-    def stop_work_queue(self, message):
-        self.record("stop_work_queue")
-        self.stop_message = message
-
-
-class RaisingDelegate(LoggingDelegate):
-    def __init__(self, test, exception):
-        LoggingDelegate.__init__(self, test)
-        self._exception = exception
-
-    def process_work_item(self, work_item):
-        self.record("process_work_item")
-        raise self._exception
-
-
-class FastQueueEngine(QueueEngine):
-    def __init__(self, delegate):
-        QueueEngine.__init__(self, "fast-queue", delegate, threading.Event())
-
-    # No sleep for the wicked.
-    seconds_to_sleep = 0
-
-    def _sleep(self, message):
-        pass
-
-
-class QueueEngineTest(unittest.TestCase):
-    def test_trivial(self):
-        delegate = LoggingDelegate(self)
-        self._run_engine(delegate)
-        self.assertEqual(delegate.stop_message, "Delegate terminated queue.")
-        self.assertEqual(delegate._callbacks, LoggingDelegate.expected_callbacks)
-        self.assertTrue(os.path.exists(os.path.join(self.temp_dir, "queue_log_path")))
-        self.assertTrue(os.path.exists(os.path.join(self.temp_dir, "work_log_path", "work_item.log")))
-
-    def test_unexpected_error(self):
-        delegate = RaisingDelegate(self, ScriptError(exit_code=3))
-        self._run_engine(delegate)
-        expected_callbacks = LoggingDelegate.expected_callbacks[:]
-        work_item_index = expected_callbacks.index('process_work_item')
-        # The unexpected error should be handled right after process_work_item starts
-        # but before any other callback.  Otherwise callbacks should be normal.
-        expected_callbacks.insert(work_item_index + 1, 'handle_unexpected_error')
-        self.assertEqual(delegate._callbacks, expected_callbacks)
-
-    def test_handled_error(self):
-        delegate = RaisingDelegate(self, ScriptError(exit_code=QueueEngine.handled_error_code))
-        self._run_engine(delegate)
-        self.assertEqual(delegate._callbacks, LoggingDelegate.expected_callbacks)
-
-    def _run_engine(self, delegate, engine=None, termination_message=None):
-        if not engine:
-            engine = QueueEngine("test-queue", delegate, threading.Event())
-        if not termination_message:
-            termination_message = "Delegate terminated queue."
-        expected_logs = "\n%s\n" % termination_message
-        OutputCapture().assert_outputs(self, engine.run, expected_logs=expected_logs)
-
-    def _test_terminating_queue(self, exception, termination_message):
-        work_item_index = LoggingDelegate.expected_callbacks.index('process_work_item')
-        # The terminating error should be handled right after process_work_item.
-        # There should be no other callbacks after stop_work_queue.
-        expected_callbacks = LoggingDelegate.expected_callbacks[:work_item_index + 1]
-        expected_callbacks.append("stop_work_queue")
-
-        delegate = RaisingDelegate(self, exception)
-        self._run_engine(delegate, termination_message=termination_message)
-
-        self.assertEqual(delegate._callbacks, expected_callbacks)
-        self.assertEqual(delegate.stop_message, termination_message)
-
-    def test_terminating_error(self):
-        self._test_terminating_queue(KeyboardInterrupt(), "User terminated queue.")
-        self._test_terminating_queue(TerminateQueue(), "TerminateQueue exception received.")
-
-    def test_now(self):
-        """Make sure there are no typos in the QueueEngine.now() method."""
-        engine = QueueEngine("test", None, None)
-        self.assertIsInstance(engine._now(), datetime.datetime)
-
-    def test_sleep_message(self):
-        engine = QueueEngine("test", None, None)
-        engine._now = lambda: datetime.datetime(2010, 1, 1)
-        expected_sleep_message = "MESSAGE Sleeping until 2010-01-01 00:02:00 (120 seconds)."
-        self.assertEqual(engine._sleep_message("MESSAGE"), expected_sleep_message)
-
-    def setUp(self):
-        self.temp_dir = tempfile.mkdtemp(suffix="work_queue_test_logs")
-
-    def tearDown(self):
-        shutil.rmtree(self.temp_dir)
diff --git a/Tools/Scripts/webkitpy/tool/bot/stylequeuetask.py b/Tools/Scripts/webkitpy/tool/bot/stylequeuetask.py
deleted file mode 100644
index 01f7f72..0000000
--- a/Tools/Scripts/webkitpy/tool/bot/stylequeuetask.py
+++ /dev/null
@@ -1,75 +0,0 @@
-# Copyright (c) 2012 Google Inc. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-#     * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-#     * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-#     * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-from webkitpy.tool.bot.patchanalysistask import PatchAnalysisTask, PatchAnalysisTaskDelegate, UnableToApplyPatch
-
-
-class StyleQueueTaskDelegate(PatchAnalysisTaskDelegate):
-    def parent_command(self):
-        return "style-queue"
-
-
-class StyleQueueTask(PatchAnalysisTask):
-    def validate(self):
-        self._patch = self._delegate.refetch_patch(self._patch)
-        if self._patch.is_obsolete():
-            return False
-        if self._patch.bug().is_closed():
-            return False
-        if self._patch.review() == "-":
-            return False
-        return True
-
-    def _check_style(self):
-        return self._run_command([
-            "check-style-local",
-            "--non-interactive",
-            "--quiet",
-        ],
-        "Style checked",
-        "Patch did not pass style check")
-
-    def _apply_watch_list(self):
-        return self._run_command([
-            "apply-watchlist-local",
-            self._patch.bug_id(),
-        ],
-        "Watchlist applied",
-        "Unabled to apply watchlist")
-
-    def run(self):
-        if not self._clean():
-            return False
-        if not self._update():
-            return False
-        if not self._apply():
-            raise UnableToApplyPatch(self._patch)
-        self._apply_watch_list()
-        if not self._check_style():
-            return self.report_failure()
-        return True
diff --git a/Tools/Scripts/webkitpy/tool/commands/__init__.py b/Tools/Scripts/webkitpy/tool/commands/__init__.py
index acad2e0..50ed755 100644
--- a/Tools/Scripts/webkitpy/tool/commands/__init__.py
+++ b/Tools/Scripts/webkitpy/tool/commands/__init__.py
@@ -1,13 +1,11 @@
 # Required for Python to search this directory for module files
 
-from webkitpy.tool.commands.adduserstogroups import AddUsersToGroups
 from webkitpy.tool.commands.applywatchlistlocal import ApplyWatchListLocal
-from webkitpy.tool.commands.bugfortest import BugForTest
 from webkitpy.tool.commands.chromechannels import ChromeChannels
-from webkitpy.tool.commands.download import *
-from webkitpy.tool.commands.findusers import FindUsers
-from webkitpy.tool.commands.gardenomatic import GardenOMatic
 from webkitpy.tool.commands.commitannouncer import CommitAnnouncerCommand
+from webkitpy.tool.commands.download import *
+from webkitpy.tool.commands.flakytests import FlakyTests
+from webkitpy.tool.commands.gardenomatic import GardenOMatic
 from webkitpy.tool.commands.prettydiff import PrettyDiff
 from webkitpy.tool.commands.queries import *
 from webkitpy.tool.commands.rebaseline import Rebaseline
diff --git a/Tools/Scripts/webkitpy/tool/commands/bugfortest.py b/Tools/Scripts/webkitpy/tool/commands/flakytests.py
similarity index 66%
rename from Tools/Scripts/webkitpy/tool/commands/bugfortest.py
rename to Tools/Scripts/webkitpy/tool/commands/flakytests.py
index 36aa6b5..88a6dd1 100644
--- a/Tools/Scripts/webkitpy/tool/commands/bugfortest.py
+++ b/Tools/Scripts/webkitpy/tool/commands/flakytests.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2010 Google Inc. All rights reserved.
+# Copyright (c) 2011 Google Inc. All rights reserved.
 #
 # Redistribution and use in source and binary forms, with or without
 # modification, are permitted provided that the following conditions are
@@ -27,22 +27,16 @@
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 from webkitpy.tool.multicommandtool import AbstractDeclarativeCommand
-from webkitpy.tool.bot.flakytestreporter import FlakyTestReporter
+from webkitpy.layout_tests.layout_package.bot_test_expectations import BotTestExpecationsFactory
+from webkitpy.layout_tests.models.test_expectations import TestExpectationParser, TestExpectationsModel, TestExpectations
 
 
-# This is mostly a command for testing FlakyTestReporter, however
-# it could be easily expanded to auto-create bugs, etc. if another
-# command outside of webkitpy wanted to use it.
-class BugForTest(AbstractDeclarativeCommand):
-    name = "bug-for-test"
-    help_text = "Finds the bugzilla bug for a given test"
+class FlakyTests(AbstractDeclarativeCommand):
+    name = "flaky-tests"
+    help_text = "Generate FlakyTests file from the flakiness dashboard"
 
     def execute(self, options, args, tool):
-        reporter = FlakyTestReporter(tool, "webkitpy")
-        search_string = args[0]
-        bug = reporter._lookup_bug_for_flaky_test(search_string)
-        if bug:
-            bug = reporter._follow_duplicate_chain(bug)
-            print "%5s %s" % (bug.id(), bug.title())
-        else:
-            print "No bugs found matching '%s'" % search_string
+        port = tool.port_factory.get()
+        full_port_name = port.determine_full_port_name(tool, options, port.port_name)
+        expectations = BotTestExpecationsFactory().expectations_for_port(full_port_name)
+        print TestExpectations.list_to_string(expectations.expectation_lines())
diff --git a/Tools/Scripts/webkitpy/tool/commands/queries_unittest.py b/Tools/Scripts/webkitpy/tool/commands/queries_unittest.py
index b94b031..2947e4a 100644
--- a/Tools/Scripts/webkitpy/tool/commands/queries_unittest.py
+++ b/Tools/Scripts/webkitpy/tool/commands/queries_unittest.py
@@ -217,8 +217,8 @@
 
     def test_csv(self):
         self.run_test(['failures/expected/text.html', 'failures/expected/image.html'],
-                      ('test-win-xp,failures/expected/image.html,BUGTEST,IMAGE\n'
-                       'test-win-xp,failures/expected/text.html,BUGTEST,FAIL\n'),
+                      ('test-win-xp,failures/expected/image.html,Bug(test),IMAGE\n'
+                       'test-win-xp,failures/expected/text.html,Bug(test),FAIL\n'),
                       csv=True)
 
     def test_paths(self):
@@ -228,15 +228,6 @@
                        'LayoutTests/platform/test-win-xp/TestExpectations\n'),
                       paths=True)
 
-    def test_platform(self):
-        self.run_test(['platform/test-mac-leopard/http/test.html'],
-                      ('// For test-mac-snowleopard\n'
-                       'platform/test-mac-leopard [ Pass Skip WontFix ]\n'  # Note that this is the expectation (from being skipped internally), not the test name
-                       '\n'
-                       '// For test-mac-leopard\n'
-                       'platform/test-mac-leopard/http/test.html [ Pass ]\n'),
-                      platform='test-mac-*')
-
 class PrintBaselinesTest(unittest.TestCase):
     def setUp(self):
         self.oc = None
diff --git a/Tools/Scripts/webkitpy/tool/commands/rebaseline.py b/Tools/Scripts/webkitpy/tool/commands/rebaseline.py
index a2f3b03..6a283f8 100644
--- a/Tools/Scripts/webkitpy/tool/commands/rebaseline.py
+++ b/Tools/Scripts/webkitpy/tool/commands/rebaseline.py
@@ -428,7 +428,8 @@
         tests_to_rebaseline = {}
         expectations = TestExpectations(port, include_overrides=True)
         for test in expectations.get_rebaselining_failures():
-            tests_to_rebaseline[test] = TestExpectations.suffixes_for_expectations(expectations.get_expectations(test))
+            suffixes = TestExpectations.suffixes_for_expectations(expectations.get_expectations(test))
+            tests_to_rebaseline[test] = suffixes or BASELINE_SUFFIX_LIST
         return tests_to_rebaseline
 
     def _add_tests_to_rebaseline_for_port(self, port_name):
diff --git a/Tools/Scripts/webkitpy/tool/commands/rebaseline_unittest.py b/Tools/Scripts/webkitpy/tool/commands/rebaseline_unittest.py
index b64746a..8c2fbb6 100644
--- a/Tools/Scripts/webkitpy/tool/commands/rebaseline_unittest.py
+++ b/Tools/Scripts/webkitpy/tool/commands/rebaseline_unittest.py
@@ -498,12 +498,16 @@
         self.assertDictEqual(self.command._tests_to_rebaseline(self.lion_port), {'userscripts/another-test.html': set(['png', 'txt', 'wav'])})
         self.assertEqual(self._read(self.lion_expectations_path), '')
 
+    def test_rebaseline_without_other_expectations(self):
+        self._write("userscripts/another-test.html", "Dummy test contents")
+        self._write(self.lion_expectations_path, "Bug(x) userscripts/another-test.html [ Rebaseline ]\n")
+        self.assertDictEqual(self.command._tests_to_rebaseline(self.lion_port), {'userscripts/another-test.html': ('png', 'wav', 'txt')})
+
 
 class _FakeOptimizer(BaselineOptimizer):
     def read_results_by_directory(self, baseline_name):
         if baseline_name.endswith('txt'):
-            return {'LayoutTests/passes/text.html': '123456',
-                    'LayoutTests/platform/test-mac-leopard/passes/text.html': 'abcdef'}
+            return {'LayoutTests/passes/text.html': '123456'}
         return {}
 
 
@@ -522,13 +526,11 @@
         self.command.execute(MockOptions(suffixes='txt', missing=False, platform=None), ['passes/text.html'], self.tool)
         self.assertEqual(self.lines,
             ['passes/text-expected.txt:',
-             '  (generic): 123456',
-             '  test-mac-leopard: abcdef'])
+             '  (generic): 123456'])
 
     def test_missing_baselines(self):
         self.command.execute(MockOptions(suffixes='png,txt', missing=True, platform=None), ['passes/text.html'], self.tool)
         self.assertEqual(self.lines,
             ['passes/text-expected.png: (no baselines found)',
              'passes/text-expected.txt:',
-             '  (generic): 123456',
-             '  test-mac-leopard: abcdef'])
+             '  (generic): 123456'])
diff --git a/Tools/Scripts/webkitpy/tool/commands/stepsequence.py b/Tools/Scripts/webkitpy/tool/commands/stepsequence.py
index 1668cdb..8947712 100644
--- a/Tools/Scripts/webkitpy/tool/commands/stepsequence.py
+++ b/Tools/Scripts/webkitpy/tool/commands/stepsequence.py
@@ -1,9 +1,9 @@
 # Copyright (C) 2009 Google Inc. All rights reserved.
-# 
+#
 # Redistribution and use in source and binary forms, with or without
 # modification, are permitted provided that the following conditions are
 # met:
-# 
+#
 #     * Redistributions of source code must retain the above copyright
 # notice, this list of conditions and the following disclaimer.
 #     * Redistributions in binary form must reproduce the above
@@ -13,7 +13,7 @@
 #     * Neither the name of Google Inc. nor the names of its
 # contributors may be used to endorse or promote products derived from
 # this software without specific prior written permission.
-# 
+#
 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
@@ -27,12 +27,12 @@
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 import logging
+import sys
 
 from webkitpy.tool import steps
 
 from webkitpy.common.checkout.scm import CheckoutNeedsUpdate
 from webkitpy.common.system.executive import ScriptError
-from webkitpy.tool.bot.queueengine import QueueEngine
 
 _log = logging.getLogger(__name__)
 
@@ -66,6 +66,14 @@
         for step in self._steps:
             step(tool, options).run(state)
 
+    # Child processes exit with a special code to the parent queue process can detect the error was handled.
+    handled_error_code = 2
+
+    @classmethod
+    def exit_after_handled_error(cls, error):
+        _log.error(error)
+        sys.exit(cls.handled_error_code)
+
     def run_and_handle_errors(self, tool, options, state=None):
         if not state:
             state = {}
@@ -76,11 +84,11 @@
             if options.parent_command:
                 command = tool.command_by_name(options.parent_command)
                 command.handle_checkout_needs_update(tool, state, options, e)
-            QueueEngine.exit_after_handled_error(e)
+            self.exit_after_handled_error(e)
         except ScriptError, e:
             if not options.quiet:
                 _log.error(e.message_with_output())
             if options.parent_command:
                 command = tool.command_by_name(options.parent_command)
                 command.handle_script_error(tool, state, e)
-            QueueEngine.exit_after_handled_error(e)
+            self.exit_after_handled_error(e)
diff --git a/Tools/Scripts/webkitpy/w3c/__init__.py b/Tools/Scripts/webkitpy/w3c/__init__.py
new file mode 100644
index 0000000..ef65bee
--- /dev/null
+++ b/Tools/Scripts/webkitpy/w3c/__init__.py
@@ -0,0 +1 @@
+# Required for Python to search this directory for module files
diff --git a/Tools/Scripts/webkitpy/w3c/test_converter.py b/Tools/Scripts/webkitpy/w3c/test_converter.py
new file mode 100644
index 0000000..1b029b8
--- /dev/null
+++ b/Tools/Scripts/webkitpy/w3c/test_converter.py
@@ -0,0 +1,193 @@
+#!/usr/bin/env python
+
+# Copyright (C) 2013 Adobe Systems Incorporated. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# 1. Redistributions of source code must retain the above
+#    copyright notice, this list of conditions and the following
+#    disclaimer.
+# 2. Redistributions in binary form must reproduce the above
+#    copyright notice, this list of conditions and the following
+#    disclaimer in the documentation and/or other materials
+#    provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER "AS IS" AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
+# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
+# THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+# SUCH DAMAGE.
+
+import logging
+import re
+
+from webkitpy.common.host import Host
+from webkitpy.thirdparty.BeautifulSoup import BeautifulSoup, Tag
+
+
+_log = logging.getLogger(__name__)
+
+
+class W3CTestConverter(object):
+
+    def __init__(self):
+        self._host = Host()
+        self._filesystem = self._host.filesystem
+        self._host.initialize_scm()
+        self._webkit_root = self._host.scm().checkout_root
+
+        # These settings might vary between WebKit and Blink
+        self._css_property_file = self.path_from_webkit_root('Source', 'core', 'css', 'CSSPropertyNames.in')
+        self._css_property_split_string = 'alias_for='
+
+        self.prefixed_properties = self.read_webkit_prefixed_css_property_list()
+
+    def path_from_webkit_root(self, *comps):
+        return self._filesystem.abspath(self._filesystem.join(self._webkit_root, *comps))
+
+    def read_webkit_prefixed_css_property_list(self):
+        prefixed_properties = []
+
+        contents = self._filesystem.read_text_file(self._css_property_file)
+        for line in contents.splitlines():
+            # Find lines starting with the -webkit- prefix.
+            match = re.match('-webkit-[\w|-]*', line)
+            if match:
+                # Ignore lines where both the prefixed and non-prefixed property
+                # are supported - denoted by -webkit-some-property = some-property.
+                fields = line.split(self._css_property_split_string)
+                if len(fields) == 2 and fields[1].strip() in fields[0].strip():
+                    continue
+                prefixed_properties.append(match.group(0))
+
+        return prefixed_properties
+
+    def convert_for_webkit(self, new_path, filename):
+        """ Converts a file's |contents| so it will function correctly in its |new_path| in Webkit.
+
+        Returns the list of modified properties and the modified text if the file was modifed, None otherwise."""
+        contents = self._filesystem.read_binary_file(filename)
+        if filename.endswith('.css'):
+            return self.convert_css(contents, filename)
+        return self.convert_html(new_path, contents, filename)
+
+    def convert_css(self, contents, filename):
+        return self.add_webkit_prefix_to_unprefixed_properties(contents, filename)
+
+    def convert_html(self, new_path, contents, filename):
+        doc = BeautifulSoup(contents)
+        did_modify_paths = self.convert_testharness_paths(doc, new_path, filename)
+        converted_properties_and_content = self.convert_prefixed_properties(doc, filename)
+        return converted_properties_and_content if (did_modify_paths or converted_properties_and_content[0]) else None
+
+    def convert_testharness_paths(self, doc, new_path, filename):
+        """ Update links to testharness.js in the BeautifulSoup |doc| to point to the copy in |new_path|.
+
+        Returns whether the document was modified."""
+
+        # Look for the W3C-style path to any testharness files - scripts (.js) or links (.css)
+        pattern = re.compile('/resources/testharness')
+        script_tags = doc.findAll(src=pattern)
+        link_tags = doc.findAll(href=pattern)
+        testharness_tags = script_tags + link_tags
+
+        if not testharness_tags:
+            return False
+
+        resources_path = self.path_from_webkit_root('LayoutTests', 'resources')
+        resources_relpath = self._filesystem.relpath(resources_path, new_path)
+
+        for tag in testharness_tags:
+            # FIXME: We need to handle img, audio, video tags also.
+            attr = 'src'
+            if tag.name != 'script':
+                attr = 'href'
+
+            if not attr in tag.attrMap:
+                # FIXME: Figure out what to do w/ invalid tags. For now, we return False
+                # and leave the document unmodified, which means that it'll probably fail to run.
+                _log.error("Missing an attr in %s" % filename)
+                return False
+
+            old_path = tag[attr]
+            new_tag = Tag(doc, tag.name, tag.attrs)
+            new_tag[attr] = re.sub(pattern, resources_relpath + '/testharness', old_path)
+
+            self.replace_tag(tag, new_tag)
+
+        return True
+
+    def convert_prefixed_properties(self, doc, filename):
+        """ Searches a BeautifulSoup |doc| for any CSS properties requiring the -webkit- prefix and converts them.
+
+        Returns the list of converted properties and the modified document as a string """
+
+        converted_properties = []
+
+        # Look for inline and document styles.
+        inline_styles = doc.findAll(style=re.compile('.*'))
+        style_tags = doc.findAll('style')
+        all_styles = inline_styles + style_tags
+
+        for tag in all_styles:
+
+            # Get the text whether in a style tag or style attribute.
+            style_text = ''
+            if tag.name == 'style':
+                if not tag.contents:
+                    continue
+                style_text = tag.contents[0]
+            else:
+                style_text = tag['style']
+
+            updated_style_text = self.add_webkit_prefix_to_unprefixed_properties(style_text, filename)
+
+            # Rewrite tag only if changes were made.
+            if updated_style_text[0]:
+                converted_properties.extend(updated_style_text[0])
+
+                new_tag = Tag(doc, tag.name, tag.attrs)
+                new_tag.insert(0, updated_style_text[1])
+
+                self.replace_tag(tag, new_tag)
+
+        return (converted_properties, doc.prettify())
+
+    def add_webkit_prefix_to_unprefixed_properties(self, text, filename):
+        """ Searches |text| for instances of properties requiring the -webkit- prefix and adds the prefix to them.
+
+        Returns the list of converted properties and the modified text."""
+
+        converted_properties = []
+
+        for prefixed_property in self.prefixed_properties:
+            # FIXME: add in both the prefixed and unprefixed versions, rather than just replacing them?
+            # That might allow the imported test to work in other browsers more easily.
+
+            unprefixed_property = prefixed_property.replace('-webkit-', '')
+
+            # Look for the various ways it might be in the CSS
+            # Match the the property preceded by either whitespace or left curly brace
+            # or at the beginning of the string (for inline style attribute)
+            pattern = '([\s{]|^)' + unprefixed_property + '(\s+:|:)'
+            if re.search(pattern, text):
+                _log.info('converting %s -> %s' % (unprefixed_property, prefixed_property))
+                converted_properties.append(prefixed_property)
+                text = re.sub(pattern, prefixed_property + ':', text)
+
+        # FIXME: Handle the JS versions of these properties and GetComputedStyle, too.
+        return (converted_properties, text)
+
+    def replace_tag(self, old_tag, new_tag):
+        index = old_tag.parent.contents.index(old_tag)
+        old_tag.parent.insert(index, new_tag)
+        old_tag.extract()
diff --git a/Tools/Scripts/webkitpy/w3c/test_converter_unittest.py b/Tools/Scripts/webkitpy/w3c/test_converter_unittest.py
new file mode 100644
index 0000000..ff104ab
--- /dev/null
+++ b/Tools/Scripts/webkitpy/w3c/test_converter_unittest.py
@@ -0,0 +1,319 @@
+#!/usr/bin/env python
+
+# Copyright (C) 2013 Adobe Systems Incorporated. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# 1. Redistributions of source code must retain the above
+#    copyright notice, this list of conditions and the following
+#    disclaimer.
+# 2. Redistributions in binary form must reproduce the above
+#    copyright notice, this list of conditions and the following
+#    disclaimer in the documentation and/or other materials
+#    provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER "AS IS" AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
+# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
+# THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+# SUCH DAMAGE.
+
+import os
+import re
+import unittest2 as unittest
+
+from webkitpy.common.system.outputcapture import OutputCapture
+from webkitpy.thirdparty.BeautifulSoup import BeautifulSoup
+from webkitpy.w3c.test_converter import W3CTestConverter
+
+
+DUMMY_FILENAME = 'dummy.html'
+
+class W3CTestConverterTest(unittest.TestCase):
+
+    def fake_dir_path(self, converter, dirname):
+        return converter.path_from_webkit_root("LayoutTests", "css", dirname)
+
+    def test_read_prefixed_property_list(self):
+        """ Tests that the current list of properties requiring the -webkit- prefix load correctly """
+
+        # FIXME: We should be passing in a MockHost here ...
+        converter = W3CTestConverter()
+        prop_list = converter.prefixed_properties
+        self.assertTrue(prop_list, 'No prefixed properties found')
+        for prop in prop_list:
+            self.assertTrue(prop.startswith('-webkit-'))
+
+    def test_convert_for_webkit_nothing_to_convert(self):
+        """ Tests convert_for_webkit() using a basic test that has nothing to convert """
+
+        test_html = """<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"
+"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<title>CSS Test: DESCRIPTION OF TEST</title>
+<link rel="author" title="NAME_OF_AUTHOR"
+href="mailto:EMAIL OR http://CONTACT_PAGE"/>
+<link rel="help" href="RELEVANT_SPEC_SECTION"/>
+<meta name="assert" content="TEST ASSERTION"/>
+<style type="text/css"><![CDATA[
+CSS FOR TEST
+]]></style>
+</head>
+<body>
+CONTENT OF TEST
+</body>
+</html>
+"""
+        converter = W3CTestConverter()
+
+        oc = OutputCapture()
+        oc.capture_output()
+        try:
+            converted = converter.convert_html('/nothing/to/convert', test_html, DUMMY_FILENAME)
+        finally:
+            oc.restore_output()
+
+        self.verify_no_conversion_happened(converted)
+
+    def test_convert_for_webkit_harness_only(self):
+        """ Tests convert_for_webkit() using a basic JS test that uses testharness.js only and has no prefixed properties """
+
+        test_html = """<head>
+<link href="/resources/testharness.css" rel="stylesheet" type="text/css">
+<script src="/resources/testharness.js"></script>
+</head>
+"""
+        converter = W3CTestConverter()
+        fake_dir_path = self.fake_dir_path(converter, "harnessonly")
+
+        converted = converter.convert_html(fake_dir_path, test_html, DUMMY_FILENAME)
+
+        self.verify_conversion_happened(converted)
+        self.verify_test_harness_paths(converter, converted[1], fake_dir_path, 1, 1)
+        self.verify_prefixed_properties(converted, [])
+
+    def test_convert_for_webkit_properties_only(self):
+        """ Tests convert_for_webkit() using a test that has 2 prefixed properties: 1 in a style block + 1 inline style """
+
+        test_html = """<html>
+<head>
+<link href="/resources/testharness.css" rel="stylesheet" type="text/css">
+<script src="/resources/testharness.js"></script>
+<style type="text/css">
+
+#block1 { @test0@: propvalue; }
+
+</style>
+</head>
+<body>
+<div id="elem1" style="@test1@: propvalue;"></div>
+</body>
+</html>
+"""
+        converter = W3CTestConverter()
+        fake_dir_path = self.fake_dir_path(converter, 'harnessandprops')
+        test_content = self.generate_test_content(converter.prefixed_properties, 1, test_html)
+
+        oc = OutputCapture()
+        oc.capture_output()
+        try:
+            converted = converter.convert_html(fake_dir_path, test_content[1], DUMMY_FILENAME)
+        finally:
+            oc.restore_output()
+
+        self.verify_conversion_happened(converted)
+        self.verify_test_harness_paths(converter, converted[1], fake_dir_path, 1, 1)
+        self.verify_prefixed_properties(converted, test_content[0])
+
+    def test_convert_for_webkit_harness_and_properties(self):
+        """ Tests convert_for_webkit() using a basic JS test that uses testharness.js and testharness.css and has 4 prefixed properties: 3 in a style block + 1 inline style """
+
+        test_html = """<html>
+<head>
+<link href="/resources/testharness.css" rel="stylesheet" type="text/css">
+<script src="/resources/testharness.js"></script>
+<style type="text/css">
+
+#block1 { @test0@: propvalue; }
+#block2 { @test1@: propvalue; }
+#block3 { @test2@: propvalue; }
+
+</style>
+</head>
+<body>
+<div id="elem1" style="@test3@: propvalue;"></div>
+</body>
+</html>
+"""
+        converter = W3CTestConverter()
+        fake_dir_path = self.fake_dir_path(converter, 'harnessandprops')
+
+        oc = OutputCapture()
+        oc.capture_output()
+        try:
+            test_content = self.generate_test_content(converter.prefixed_properties, 2, test_html)
+            converted = converter.convert_html(fake_dir_path, test_content[1], DUMMY_FILENAME)
+        finally:
+            oc.restore_output()
+
+        self.verify_conversion_happened(converted)
+        self.verify_test_harness_paths(converter, converted[1], fake_dir_path, 1, 1)
+        self.verify_prefixed_properties(converted, test_content[0])
+
+    def test_convert_test_harness_paths(self):
+        """ Tests convert_testharness_paths() with a test that uses all three testharness files """
+
+        test_html = """<head>
+<link href="/resources/testharness.css" rel="stylesheet" type="text/css">
+<script src="/resources/testharness.js"></script>
+<script src="/resources/testharnessreport.js"></script>
+</head>
+"""
+        converter = W3CTestConverter()
+
+        fake_dir_path = self.fake_dir_path(converter, 'testharnesspaths')
+
+        doc = BeautifulSoup(test_html)
+        oc = OutputCapture()
+        oc.capture_output()
+        try:
+            converted = converter.convert_testharness_paths(doc, fake_dir_path, DUMMY_FILENAME)
+        finally:
+            oc.restore_output()
+
+        self.verify_conversion_happened(converted)
+        self.verify_test_harness_paths(converter, doc, fake_dir_path, 2, 1)
+
+    def test_convert_prefixed_properties(self):
+        """ Tests convert_prefixed_properties() file that has 20 properties requiring the -webkit- prefix:
+        10 in one style block + 5 in another style
+        block + 5 inline styles, including one with multiple prefixed properties.
+        The properties in the test content are in all sorts of wack formatting.
+        """
+
+        test_html = """<html>
+<style type="text/css"><![CDATA[
+
+.block1 {
+    width: 300px;
+    height: 300px
+}
+
+.block2 {
+    @test0@: propvalue;
+}
+
+.block3{@test1@: propvalue;}
+
+.block4 { @test2@:propvalue; }
+
+.block5{ @test3@ :propvalue; }
+
+#block6 {    @test4@   :   propvalue;  }
+
+#block7
+{
+    @test5@: propvalue;
+}
+
+#block8 { @test6@: propvalue; }
+
+#block9:pseudo
+{
+
+    @test7@: propvalue;
+    @test8@:  propvalue propvalue propvalue;;
+}
+
+]]></style>
+</head>
+<body>
+    <div id="elem1" style="@test9@: propvalue;"></div>
+    <div id="elem2" style="propname: propvalue; @test10@ : propvalue; propname:propvalue;"></div>
+    <div id="elem2" style="@test11@: propvalue; @test12@ : propvalue; @test13@   :propvalue;"></div>
+    <div id="elem3" style="@test14@:propvalue"></div>
+</body>
+<style type="text/css"><![CDATA[
+
+.block10{ @test15@: propvalue; }
+.block11{ @test16@: propvalue; }
+.block12{ @test17@: propvalue; }
+#block13:pseudo
+{
+    @test18@: propvalue;
+    @test19@: propvalue;
+}
+
+]]></style>
+</html>
+"""
+        converter = W3CTestConverter()
+
+        test_content = self.generate_test_content(converter.prefixed_properties, 20, test_html)
+
+        oc = OutputCapture()
+        oc.capture_output()
+        try:
+            converted = converter.convert_prefixed_properties(BeautifulSoup(test_content[1]), DUMMY_FILENAME)
+        finally:
+            oc.restore_output()
+
+        self.verify_conversion_happened(converted)
+        self.verify_prefixed_properties(converted, test_content[0])
+
+    def verify_conversion_happened(self, converted):
+        self.assertTrue(converted, "conversion didn't happen")
+
+    def verify_no_conversion_happened(self, converted):
+        self.assertEqual(converted, None, 'test should not have been converted')
+
+    def verify_test_harness_paths(self, converter, converted, test_path, num_src_paths, num_href_paths):
+        if isinstance(converted, basestring):
+            converted = BeautifulSoup(converted)
+
+        resources_dir = converter.path_from_webkit_root("LayoutTests", "resources")
+
+        # Verify the original paths are gone, and the new paths are present.
+        orig_path_pattern = re.compile('\"/resources/testharness')
+        self.assertEquals(len(converted.findAll(src=orig_path_pattern)), 0, 'testharness src path was not converted')
+        self.assertEquals(len(converted.findAll(href=orig_path_pattern)), 0, 'testharness href path was not converted')
+
+        new_relpath = os.path.relpath(resources_dir, test_path)
+        relpath_pattern = re.compile(new_relpath)
+        self.assertEquals(len(converted.findAll(src=relpath_pattern)), num_src_paths, 'testharness src relative path not correct')
+        self.assertEquals(len(converted.findAll(href=relpath_pattern)), num_href_paths, 'testharness href relative path not correct')
+
+    def verify_prefixed_properties(self, converted, test_properties):
+        self.assertEqual(len(converted[0]), len(test_properties), 'Incorrect number of properties converted')
+        for test_prop in test_properties:
+            self.assertTrue((test_prop in converted[1]), 'Property ' + test_prop + ' not found in converted doc')
+
+    def generate_test_content(self, full_property_list, num_test_properties, html):
+        """Inserts properties requiring a -webkit- prefix into the content, replacing \'@testXX@\' with a property."""
+        test_properties = []
+        count = 0
+        while count < num_test_properties:
+            test_properties.append(full_property_list[count])
+            count += 1
+
+        # Replace the tokens in the testhtml with the test properties. Walk backward
+        # through the list to replace the double-digit tokens first
+        index = len(test_properties) - 1
+        while index >= 0:
+            # Use the unprefixed version
+            test_prop = test_properties[index].replace('-webkit-', '')
+            # Replace the token
+            html = html.replace('@test' + str(index) + '@', test_prop)
+            index -= 1
+
+        return (test_properties, html)
diff --git a/Tools/Scripts/webkitpy/w3c/test_importer.py b/Tools/Scripts/webkitpy/w3c/test_importer.py
new file mode 100644
index 0000000..b4ba374
--- /dev/null
+++ b/Tools/Scripts/webkitpy/w3c/test_importer.py
@@ -0,0 +1,451 @@
+#!/usr/bin/env python
+
+# Copyright (C) 2013 Adobe Systems Incorporated. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# 1. Redistributions of source code must retain the above
+#    copyright notice, this list of conditions and the following
+#    disclaimer.
+# 2. Redistributions in binary form must reproduce the above
+#    copyright notice, this list of conditions and the following
+#    disclaimer in the documentation and/or other materials
+#    provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER "AS IS" AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
+# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
+# THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+# SUCH DAMAGE.
+
+"""
+ This script imports a directory of W3C CSS tests into WebKit.
+
+ You must have checked out the W3C repository to your local drive.
+
+ This script will import the tests into WebKit following these rules:
+
+    - Only tests that are approved or officially submitted awaiting review are imported
+
+    - All tests are imported into LayoutTests/csswg
+
+    - If the tests are approved, they'll be imported into a directory tree that
+      mirrors the CSS Mercurial repo. For example, <csswg_repo_root>/approved/css2.1 is brought in
+      as LayoutTests/csswg/approved/css2.1, maintaining the entire directory structure under that
+
+    - If the tests are submitted, they'll be brought in as LayoutTests/csswg/submitted and will also
+      maintain their directory structure under that. For example, everything under
+      <csswg_repo_root>/contributors/adobe/submitted is brought into submitted, mirroring its
+      directory structure in the csswg repo
+
+    - If the import directory specified is just a contributor folder, only the submitted folder
+      for that contributor is brought in. For example, to import all of Mozilla's tests, either
+      <csswg_repo_root>/contributors/mozilla or <csswg_repo_root>/contributors/mozilla/submitted
+      will work and are equivalent
+
+    - For the time being, this script won't work if you try to import the full set of submitted
+      tests under contributors/*/submitted. Since these are awaiting review, this is just a small
+      control mechanism to enforce carefully selecting what non-approved tests are imported.
+      It can obviously and easily be changed.
+
+    - By default, only reftests and jstest are imported. This can be overridden with a -a or --all
+      argument
+
+    - Also by default, if test files by the same name already exist in the destination directory,
+      they are overwritten with the idea that running this script would refresh files periodically.
+      This can also be overridden by a -n or --no-overwrite flag
+
+    - All files are converted to work in WebKit:
+         1. Paths to testharness.js files are modified point to Webkit's copy of them in
+            LayoutTests/resources, using the correct relative path from the new location
+         2. All CSS properties requiring the -webkit-vendor prefix are prefixed - this current
+            list of what needs prefixes is read from Source/WebCore/CSS/CSSProperties.in
+         3. Each reftest has its own copy of its reference file following the naming conventions
+            new-run-webkit-tests expects
+         4. If a reference files lives outside the directory of the test that uses it, it is checked
+            for paths to support files as it will be imported into a different relative position to the
+            test file (in the same directory)
+
+     - Upon completion, script outputs the total number tests imported, broken down by test type
+
+     - Also upon completion, each directory where files are imported will have w3c-import.log written
+       with a timestamp, the W3C Mercurial changeset if available, the list of CSS properties used that
+       require prefixes, the list of imported files, and guidance for future test modification and
+       maintenance.
+
+     - On subsequent imports, this file is read to determine if files have been removed in the newer changesets.
+       The script removes these files accordingly.
+"""
+
+# FIXME: Change this file to use the Host abstractions rather that os, sys, shutils, etc.
+
+import datetime
+import logging
+import mimetypes
+import optparse
+import os
+import shutil
+import sys
+
+from webkitpy.common.host import Host
+from webkitpy.common.system.executive import ScriptError
+from webkitpy.w3c.test_parser import TestParser
+from webkitpy.w3c.test_converter import W3CTestConverter
+
+
+TEST_STATUS_UNKNOWN = 'unknown'
+TEST_STATUS_APPROVED = 'approved'
+TEST_STATUS_SUBMITTED = 'submitted'
+
+CHANGESET_NOT_AVAILABLE = 'Not Available'
+
+
+_log = logging.getLogger(__name__)
+
+
+def main(_argv, _stdout, _stderr):
+    options, args = parse_args()
+    import_dir = args[0]
+    if len(args) == 1:
+        repo_dir = os.path.dirname(import_dir)
+    else:
+        repo_dir = args[1]
+
+    if not os.path.exists(import_dir):
+        sys.exit('Source directory %s not found!' % import_dir)
+
+    if not os.path.exists(repo_dir):
+        sys.exit('Repository directory %s not found!' % repo_dir)
+    if not repo_dir in import_dir:
+        sys.exit('Repository directory %s must be a parent of %s' % (repo_dir, import_dir))
+
+    configure_logging()
+
+    test_importer = TestImporter(Host(), import_dir, repo_dir, options)
+    test_importer.do_import()
+
+
+def configure_logging():
+    class LogHandler(logging.StreamHandler):
+
+        def format(self, record):
+            if record.levelno > logging.INFO:
+                return "%s: %s" % (record.levelname, record.getMessage())
+            return record.getMessage()
+
+    logger = logging.getLogger()
+    logger.setLevel(logging.INFO)
+    handler = LogHandler()
+    handler.setLevel(logging.INFO)
+    logger.addHandler(handler)
+    return handler
+
+
+def parse_args():
+    parser = optparse.OptionParser(usage='usage: %prog [options] w3c_test_directory [repo_directory]')
+    parser.add_option('-n', '--no-overwrite', dest='overwrite', action='store_false', default=True,
+        help='Flag to prevent duplicate test files from overwriting existing tests. By default, they will be overwritten')
+    parser.add_option('-a', '--all', action='store_true', default=False,
+        help='Import all tests including reftests, JS tests, and manual/pixel tests. By default, only reftests and JS tests are imported')
+
+    options, args = parser.parse_args()
+    if len(args) not in (1, 2):
+        parser.error('Incorrect number of arguments')
+    return options, args
+
+
+class TestImporter(object):
+
+    def __init__(self, host, source_directory, repo_dir, options):
+        self.host = host
+        self.source_directory = source_directory
+        self.options = options
+
+        self.filesystem = self.host.filesystem
+
+        self._webkit_root = __file__.split(self.filesystem.sep + 'Tools')[0]
+        self.repo_dir = repo_dir
+        subdirs = os.path.dirname(os.path.relpath(source_directory, repo_dir))
+
+        self.destination_directory = os.path.join(self.path_from_webkit_root("LayoutTests"), 'w3c', subdirs)
+
+        self.changeset = CHANGESET_NOT_AVAILABLE
+        self.test_status = TEST_STATUS_UNKNOWN
+
+        self.import_list = []
+
+    def path_from_webkit_root(self, *comps):
+        return self.filesystem.abspath(self.filesystem.join(self._webkit_root, *comps))
+
+    def do_import(self):
+        self.find_importable_tests(self.source_directory)
+        self.load_changeset()
+        self.import_tests()
+
+    def load_changeset(self):
+        """Returns the current changeset from mercurial or "Not Available"."""
+        try:
+            self.changeset = self.host.executive.run_command(['hg', 'tip']).split('changeset:')[1]
+        except (OSError, ScriptError):
+            self.changeset = CHANGESET_NOT_AVAILABLE
+
+    def find_importable_tests(self, directory):
+        # FIXME: use filesystem
+        for root, dirs, files in os.walk(directory):
+            _log.info('Scanning ' + root + '...')
+            total_tests = 0
+            reftests = 0
+            jstests = 0
+
+            # "archive" and "data" dirs are internal csswg things that live in every approved directory.
+            # FIXME: skip 'incoming' tests for now, but we should rework the 'test_status' concept and
+            # support reading them as well.
+            DIRS_TO_SKIP = ('.git', '.hg', 'data', 'archive', 'incoming')
+            for d in DIRS_TO_SKIP:
+                if d in dirs:
+                    dirs.remove(d)
+
+            copy_list = []
+
+            for filename in files:
+                # FIXME: This block should really be a separate function, but the early-continues make that difficult.
+
+                if filename.startswith('.') or filename.endswith('.pl'):
+                    continue  # For some reason the w3c repo contains random perl scripts we don't care about.
+
+                fullpath = os.path.join(root, filename)
+
+                mimetype = mimetypes.guess_type(fullpath)
+                if not 'html' in str(mimetype[0]) and not 'xml' in str(mimetype[0]):
+                    copy_list.append({'src': fullpath, 'dest': filename})
+                    continue
+
+                test_parser = TestParser(vars(self.options), filename=fullpath)
+                test_info = test_parser.analyze_test()
+                if test_info is None:
+                    continue
+
+                if 'reference' in test_info.keys():
+                    reftests += 1
+                    total_tests += 1
+                    test_basename = os.path.basename(test_info['test'])
+
+                    # Add the ref file, following WebKit style.
+                    # FIXME: Ideally we'd support reading the metadata
+                    # directly rather than relying  on a naming convention.
+                    # Using a naming convention creates duplicate copies of the
+                    # reference files.
+                    ref_file = os.path.splitext(test_basename)[0] + '-expected'
+                    ref_file += os.path.splitext(test_basename)[1]
+
+                    copy_list.append({'src': test_info['reference'], 'dest': ref_file})
+                    copy_list.append({'src': test_info['test'], 'dest': filename})
+
+                    # Update any support files that need to move as well to remain relative to the -expected file.
+                    if 'refsupport' in test_info.keys():
+                        for support_file in test_info['refsupport']:
+                            source_file = os.path.join(os.path.dirname(test_info['reference']), support_file)
+                            source_file = os.path.normpath(source_file)
+
+                            # Keep the dest as it was
+                            to_copy = {'src': source_file, 'dest': support_file}
+
+                            # Only add it once
+                            if not(to_copy in copy_list):
+                                copy_list.append(to_copy)
+                elif 'jstest' in test_info.keys():
+                    jstests += 1
+                    total_tests += 1
+                    copy_list.append({'src': fullpath, 'dest': filename})
+                else:
+                    total_tests += 1
+                    copy_list.append({'src': fullpath, 'dest': filename})
+
+            if not total_tests:
+                # We can skip the support directory if no tests were found.
+                if 'support' in dirs:
+                    dirs.remove('support')
+
+            if copy_list:
+                # Only add this directory to the list if there's something to import
+                self.import_list.append({'dirname': root, 'copy_list': copy_list,
+                    'reftests': reftests, 'jstests': jstests, 'total_tests': total_tests})
+
+    def import_tests(self):
+        converter = W3CTestConverter()
+        total_imported_tests = 0
+        total_imported_reftests = 0
+        total_imported_jstests = 0
+        total_prefixed_properties = {}
+
+        for dir_to_copy in self.import_list:
+            total_imported_tests += dir_to_copy['total_tests']
+            total_imported_reftests += dir_to_copy['reftests']
+            total_imported_jstests += dir_to_copy['jstests']
+
+            prefixed_properties = []
+
+            if not dir_to_copy['copy_list']:
+                continue
+
+            orig_path = dir_to_copy['dirname']
+
+            subpath = os.path.relpath(orig_path, self.repo_dir)
+            new_path = os.path.join(self.destination_directory, subpath)
+
+            if not(os.path.exists(new_path)):
+                os.makedirs(new_path)
+
+            copied_files = []
+
+            for file_to_copy in dir_to_copy['copy_list']:
+                # FIXME: Split this block into a separate function.
+                orig_filepath = os.path.normpath(file_to_copy['src'])
+
+                if os.path.isdir(orig_filepath):
+                    # FIXME: Figure out what is triggering this and what to do about it.
+                    _log.error('%s refers to a directory' % orig_filepath)
+                    continue
+
+                if not(os.path.exists(orig_filepath)):
+                    _log.warning('%s not found. Possible error in the test.', orig_filepath)
+                    continue
+
+                new_filepath = os.path.join(new_path, file_to_copy['dest'])
+
+                if not(os.path.exists(os.path.dirname(new_filepath))):
+                    os.makedirs(os.path.dirname(new_filepath))
+
+                if not self.options.overwrite and os.path.exists(new_filepath):
+                    _log.info('Skipping import of existing file ' + new_filepath)
+                else:
+                    # FIXME: Maybe doing a file diff is in order here for existing files?
+                    # In other words, there's no sense in overwriting identical files, but
+                    # there's no harm in copying the identical thing.
+                    _log.info('Importing: %s', orig_filepath)
+                    _log.info('       As: %s', new_filepath)
+
+                # Only html, xml, or css should be converted
+                # FIXME: Eventually, so should js when support is added for this type of conversion
+                mimetype = mimetypes.guess_type(orig_filepath)
+                if 'html' in str(mimetype[0]) or 'xml' in str(mimetype[0])  or 'css' in str(mimetype[0]):
+                    converted_file = converter.convert_for_webkit(new_path, filename=orig_filepath)
+
+                    if not converted_file:
+                        shutil.copyfile(orig_filepath, new_filepath)  # The file was unmodified.
+                    else:
+                        for prefixed_property in converted_file[0]:
+                            total_prefixed_properties.setdefault(prefixed_property, 0)
+                            total_prefixed_properties[prefixed_property] += 1
+
+                        prefixed_properties.extend(set(converted_file[0]) - set(prefixed_properties))
+                        outfile = open(new_filepath, 'wb')
+                        outfile.write(converted_file[1])
+                        outfile.close()
+                else:
+                    shutil.copyfile(orig_filepath, new_filepath)
+
+                copied_files.append(new_filepath.replace(self._webkit_root, ''))
+
+            self.remove_deleted_files(new_path, copied_files)
+            self.write_import_log(new_path, copied_files, prefixed_properties)
+
+        _log.info('Import complete')
+
+        _log.info('IMPORTED %d TOTAL TESTS', total_imported_tests)
+        _log.info('Imported %d reftests', total_imported_reftests)
+        _log.info('Imported %d JS tests', total_imported_jstests)
+        _log.info('Imported %d pixel/manual tests', total_imported_tests - total_imported_jstests - total_imported_reftests)
+        _log.info('')
+        _log.info('Properties needing prefixes (by count):')
+        for prefixed_property in sorted(total_prefixed_properties, key=lambda p: total_prefixed_properties[p]):
+            _log.info('  %s: %s', prefixed_property, total_prefixed_properties[prefixed_property])
+
+    def setup_destination_directory(self):
+        """ Creates a destination directory that mirrors that of the source approved or submitted directory """
+
+        self.update_test_status()
+
+        start = self.source_directory.find(self.test_status)
+        new_subpath = self.source_directory[len(self.repo_dir):]
+
+        destination_directory = os.path.join(self.destination_directory, new_subpath)
+
+        if not os.path.exists(destination_directory):
+            os.makedirs(destination_directory)
+
+        _log.info('Tests will be imported into: %s', destination_directory)
+
+    def update_test_status(self):
+        """ Sets the test status to either 'approved' or 'submitted' """
+
+        status = TEST_STATUS_UNKNOWN
+
+        if 'approved' in self.source_directory.split(os.path.sep):
+            status = TEST_STATUS_APPROVED
+        elif 'submitted' in self.source_directory.split(os.path.sep):
+            status = TEST_STATUS_SUBMITTED
+
+        self.test_status = status
+
+    def remove_deleted_files(self, import_directory, new_file_list):
+        """ Reads an import log in |import_directory|, compares it to the |new_file_list|, and removes files not in the new list."""
+
+        previous_file_list = []
+
+        import_log_file = os.path.join(import_directory, 'w3c-import.log')
+        if not os.path.exists(import_log_file):
+            return
+
+        import_log = open(import_log_file, 'r')
+        contents = import_log.readlines()
+
+        if 'List of files\n' in contents:
+            list_index = contents.index('List of files:\n') + 1
+            previous_file_list = [filename.strip() for filename in contents[list_index:]]
+
+        deleted_files = set(previous_file_list) - set(new_file_list)
+        for deleted_file in deleted_files:
+            _log.info('Deleting file removed from the W3C repo: %s', deleted_file)
+            deleted_file = os.path.join(self._webkit_root, deleted_file)
+            os.remove(deleted_file)
+
+        import_log.close()
+
+    def write_import_log(self, import_directory, file_list, prop_list):
+        """ Writes a w3c-import.log file in each directory with imported files. """
+
+        now = datetime.datetime.now()
+
+        import_log = open(os.path.join(import_directory, 'w3c-import.log'), 'w')
+        import_log.write('The tests in this directory were imported from the W3C repository.\n')
+        import_log.write('Do NOT modify these tests directly in Webkit. Instead, push changes to the W3C CSS repo:\n\n')
+        import_log.write('http://hg.csswg.org/test\n\n')
+        import_log.write('Then run the Tools/Scripts/import-w3c-tests in Webkit to reimport\n\n')
+        import_log.write('Do NOT modify or remove this file\n\n')
+        import_log.write('------------------------------------------------------------------------\n')
+        import_log.write('Last Import: ' + now.strftime('%Y-%m-%d %H:%M') + '\n')
+        import_log.write('W3C Mercurial changeset: ' + self.changeset + '\n')
+        import_log.write('Test status at time of import: ' + self.test_status + '\n')
+        import_log.write('------------------------------------------------------------------------\n')
+        import_log.write('Properties requiring vendor prefixes:\n')
+        if prop_list:
+            for prop in prop_list:
+                import_log.write(prop + '\n')
+        else:
+            import_log.write('None\n')
+        import_log.write('------------------------------------------------------------------------\n')
+        import_log.write('List of files:\n')
+        for item in file_list:
+            import_log.write(item + '\n')
+
+        import_log.close()
diff --git a/Tools/Scripts/webkitpy/w3c/test_importer_unittest.py b/Tools/Scripts/webkitpy/w3c/test_importer_unittest.py
new file mode 100644
index 0000000..526f1d2
--- /dev/null
+++ b/Tools/Scripts/webkitpy/w3c/test_importer_unittest.py
@@ -0,0 +1,80 @@
+#!/usr/bin/env python
+
+# Copyright (C) 2013 Adobe Systems Incorporated. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# 1. Redistributions of source code must retain the above
+#    copyright notice, this list of conditions and the following
+#    disclaimer.
+# 2. Redistributions in binary form must reproduce the above
+#    copyright notice, this list of conditions and the following
+#    disclaimer in the documentation and/or other materials
+#    provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER "AS IS" AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
+# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
+# THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+# SUCH DAMAGE.
+
+import optparse
+import shutil
+import tempfile
+import unittest2 as unittest
+
+from webkitpy.common.host import Host
+from webkitpy.common.system.executive_mock import MockExecutive2, ScriptError
+from webkitpy.common.system.outputcapture import OutputCapture
+from webkitpy.w3c.test_importer import TestImporter
+
+
+DUMMY_SOURCE_DIR = '/w3c'
+DUMMY_REPO_DIR = '/blink/LayoutTests'
+
+class TestImporterTest(unittest.TestCase):
+
+    def test_import_dir_with_no_tests_and_no_hg(self):
+        # FIXME: Use MockHosts instead.
+        host = Host()
+        host.executive = MockExecutive2(exception=OSError())
+
+        importer = TestImporter(host, DUMMY_SOURCE_DIR, DUMMY_REPO_DIR, optparse.Values({"overwrite": False}))
+        importer.source_directory = importer.path_from_webkit_root("Tools", "Scripts", "webkitpy", "w3c")
+        importer.destination_directory = tempfile.mkdtemp(prefix='csswg')
+
+        oc = OutputCapture()
+        oc.capture_output()
+        try:
+            importer.do_import()
+        finally:
+            oc.restore_output()
+            shutil.rmtree(importer.destination_directory, ignore_errors=True)
+
+    def test_import_dir_with_no_tests(self):
+        # FIXME: Use MockHosts instead.
+        host = Host()
+        host.executive = MockExecutive2(exception=ScriptError("abort: no repository found in '/Volumes/Source/src/wk/Tools/Scripts/webkitpy/w3c' (.hg not found)!"))
+
+        importer = TestImporter(host, '/w3c', '/blink', optparse.Values({"overwrite": False}))
+        importer.source_directory = importer.path_from_webkit_root("Tools", "Scripts", "webkitpy", "w3c")
+        importer.destination_directory = tempfile.mkdtemp(prefix='csswg')
+
+        oc = OutputCapture()
+        oc.capture_output()
+        try:
+            importer.do_import()
+        finally:
+            oc.restore_output()
+            shutil.rmtree(importer.destination_directory, ignore_errors=True)
+
+    # FIXME: Need more tests, but need to add a mock filesystem w/ sample data.
diff --git a/Tools/Scripts/webkitpy/w3c/test_parser.py b/Tools/Scripts/webkitpy/w3c/test_parser.py
new file mode 100644
index 0000000..bb66fda
--- /dev/null
+++ b/Tools/Scripts/webkitpy/w3c/test_parser.py
@@ -0,0 +1,162 @@
+#!/usr/bin/env python
+
+# Copyright (C) 2013 Adobe Systems Incorporated. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# 1. Redistributions of source code must retain the above
+#    copyright notice, this list of conditions and the following
+#    disclaimer.
+# 2. Redistributions in binary form must reproduce the above
+#    copyright notice, this list of conditions and the following
+#    disclaimer in the documentation and/or other materials
+#    provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER "AS IS" AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
+# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
+# THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+# SUCH DAMAGE.
+
+import logging
+import re
+
+from webkitpy.common.host import Host
+from webkitpy.thirdparty.BeautifulSoup import BeautifulSoup as Parser
+
+
+_log = logging.getLogger(__name__)
+
+
+class TestParser(object):
+
+    def __init__(self, options, filename):
+        self.options = options
+        self.filename = filename
+        self.host = Host()
+        self.filesystem = self.host.filesystem
+
+        self.test_doc = None
+        self.ref_doc = None
+        self.load_file(filename)
+
+    def load_file(self, filename):
+        if self.filesystem.isfile(filename):
+            try:
+                self.test_doc = Parser(self.filesystem.read_binary_file(filename))
+            except:
+                # FIXME: Figure out what to do if we can't parse the file.
+                _log.error("Failed to parse %s", filename)
+                self.test_doc is None
+        else:
+            if self.filesystem.isdir(filename):
+                # FIXME: Figure out what is triggering this and what to do about it.
+                _log.error("Trying to load %s, which is a directory", filename)
+            self.test_doc = None
+        self.ref_doc = None
+
+    def analyze_test(self, test_contents=None, ref_contents=None):
+        """ Analyzes a file to determine if it's a test, what type of test, and what reference or support files it requires. Returns all of the test info """
+
+        test_info = None
+
+        if test_contents is None and self.test_doc is None:
+            return test_info
+
+        if test_contents is not None:
+            self.test_doc = Parser(test_contents)
+
+        if ref_contents is not None:
+            self.ref_doc = Parser(ref_contents)
+
+        # First check if it's a reftest
+
+        matches = self.reference_links_of_type('match') + self.reference_links_of_type('mismatch')
+        if matches:
+            if len(matches) > 1:
+                # FIXME: Is this actually true? We should fix this.
+                _log.warning('Multiple references are not supported. Importing the first ref defined in %s',
+                             self.filesystem.basename(self.filename))
+
+            try:
+                ref_file = self.filesystem.join(self.filesystem.dirname(self.filename), matches[0]['href'])
+            except KeyError as e:
+                # FIXME: Figure out what to do w/ invalid test files.
+                _log.error('%s has a reference link but is missing the "href"', self.filesystem)
+                return None
+
+            if self.ref_doc is None:
+                self.ref_doc = self.load_file(ref_file)
+
+            test_info = {'test': self.filename, 'reference': ref_file}
+
+            # If the ref file path is relative, we need to check it for
+            # relative paths also because when it lands in WebKit, it will be
+            # moved down into the test dir.
+            #
+            # Note: The test files themselves are not checked for support files
+            # outside their directories as the convention in the CSSWG is to
+            # put all support files in the same dir or subdir as the test.
+            #
+            # All non-test files in the test's directory tree are normally
+            # copied as part of the import as they are assumed to be required
+            # support files.
+            #
+            # *But*, there is exactly one case in the entire css2.1 suite where
+            # a test depends on a file that lives in a different directory,
+            # which depends on another file that lives outside of its
+            # directory. This code covers that case :)
+            if matches[0]['href'].startswith('..'):
+                support_files = self.support_files(self.ref_doc)
+                test_info['refsupport'] = support_files
+
+        elif self.is_jstest():
+            test_info = {'test': self.filename, 'jstest': True}
+        elif self.options['all'] is True and not('-ref' in self.filename) and not('reference' in self.filename):
+            test_info = {'test': self.filename}
+
+        return test_info
+
+    def reference_links_of_type(self, reftest_type):
+        return self.test_doc.findAll(rel=reftest_type)
+
+    def is_jstest(self):
+        """Returns whether the file appears to be a jstest, by searching for usage of W3C-style testharness paths."""
+        return bool(self.test_doc.find(src=re.compile('[\'\"/]?/resources/testharness')))
+
+    def support_files(self, doc):
+        """ Searches the file for all paths specified in url()'s, href or src attributes."""
+        support_files = []
+
+        if doc is None:
+            return support_files
+
+        elements_with_src_attributes = doc.findAll(src=re.compile('.*'))
+        elements_with_href_attributes = doc.findAll(href=re.compile('.*'))
+
+        url_pattern = re.compile('url\(.*\)')
+        urls = []
+        for url in doc.findAll(text=url_pattern):
+            url = re.search(url_pattern, url)
+            url = re.sub('url\([\'\"]?', '', url.group(0))
+            url = re.sub('[\'\"]?\)', '', url)
+            urls.append(url)
+
+        src_paths = [src_tag['src'] for src_tag in elements_with_src_attributes]
+        href_paths = [href_tag['href'] for href_tag in elements_with_href_attributes]
+
+        paths = src_paths + href_paths + urls
+        for path in paths:
+            if not(path.startswith('http:')) and not(path.startswith('mailto:')):
+                support_files.append(path)
+
+        return support_files
diff --git a/Tools/Scripts/webkitpy/w3c/test_parser_unittest.py b/Tools/Scripts/webkitpy/w3c/test_parser_unittest.py
new file mode 100644
index 0000000..7fb0c5b
--- /dev/null
+++ b/Tools/Scripts/webkitpy/w3c/test_parser_unittest.py
@@ -0,0 +1,217 @@
+#!/usr/bin/env python
+
+# Copyright (C) 2013 Adobe Systems Incorporated. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# 1. Redistributions of source code must retain the above
+#    copyright notice, this list of conditions and the following
+#    disclaimer.
+# 2. Redistributions in binary form must reproduce the above
+#    copyright notice, this list of conditions and the following
+#    disclaimer in the documentation and/or other materials
+#    provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER "AS IS" AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
+# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
+# THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+# SUCH DAMAGE.
+
+import os
+import unittest2 as unittest
+
+from webkitpy.common.system.outputcapture import OutputCapture
+from webkitpy.w3c.test_parser import TestParser
+
+
+options = {'all': False, 'no_overwrite': False}
+
+
+class TestParserTest(unittest.TestCase):
+
+    def test_analyze_test_reftest_one_match(self):
+        test_html = """<head>
+<link rel="match" href="green-box-ref.xht" />
+</head>
+"""
+        test_path = '/some/madeup/path/'
+        parser = TestParser(options, test_path + 'somefile.html')
+        test_info = parser.analyze_test(test_contents=test_html)
+
+        self.assertNotEqual(test_info, None, 'did not find a test')
+        self.assertTrue('test' in test_info.keys(), 'did not find a test file')
+        self.assertTrue('reference' in test_info.keys(), 'did not find a reference file')
+        self.assertTrue(test_info['reference'].startswith(test_path), 'reference path is not correct')
+        self.assertFalse('refsupport' in test_info.keys(), 'there should be no refsupport files for this test')
+        self.assertFalse('jstest' in test_info.keys(), 'test should not have been analyzed as a jstest')
+
+    def test_analyze_test_reftest_multiple_matches(self):
+        test_html = """<head>
+<link rel="match" href="green-box-ref.xht" />
+<link rel="match" href="blue-box-ref.xht" />
+<link rel="match" href="orange-box-ref.xht" />
+</head>
+"""
+        oc = OutputCapture()
+        oc.capture_output()
+        try:
+            test_path = '/some/madeup/path/'
+            parser = TestParser(options, test_path + 'somefile.html')
+            test_info = parser.analyze_test(test_contents=test_html)
+        finally:
+            _, _, logs = oc.restore_output()
+
+        self.assertNotEqual(test_info, None, 'did not find a test')
+        self.assertTrue('test' in test_info.keys(), 'did not find a test file')
+        self.assertTrue('reference' in test_info.keys(), 'did not find a reference file')
+        self.assertTrue(test_info['reference'].startswith(test_path), 'reference path is not correct')
+        self.assertFalse('refsupport' in test_info.keys(), 'there should be no refsupport files for this test')
+        self.assertFalse('jstest' in test_info.keys(), 'test should not have been analyzed as a jstest')
+
+        self.assertEqual(logs, 'Multiple references are not supported. Importing the first ref defined in somefile.html\n')
+
+    def test_analyze_test_reftest_match_and_mismatch(self):
+        test_html = """<head>
+<link rel="match" href="green-box-ref.xht" />
+<link rel="match" href="blue-box-ref.xht" />
+<link rel="mismatch" href="orange-box-notref.xht" />
+</head>
+"""
+        oc = OutputCapture()
+        oc.capture_output()
+
+        try:
+            test_path = '/some/madeup/path/'
+            parser = TestParser(options, test_path + 'somefile.html')
+            test_info = parser.analyze_test(test_contents=test_html)
+        finally:
+            _, _, logs = oc.restore_output()
+
+        self.assertNotEqual(test_info, None, 'did not find a test')
+        self.assertTrue('test' in test_info.keys(), 'did not find a test file')
+        self.assertTrue('reference' in test_info.keys(), 'did not find a reference file')
+        self.assertTrue(test_info['reference'].startswith(test_path), 'reference path is not correct')
+        self.assertFalse('refsupport' in test_info.keys(), 'there should be no refsupport files for this test')
+        self.assertFalse('jstest' in test_info.keys(), 'test should not have been analyzed as a jstest')
+
+        self.assertEqual(logs, 'Multiple references are not supported. Importing the first ref defined in somefile.html\n')
+
+    def test_analyze_test_reftest_with_ref_support_Files(self):
+        """ Tests analyze_test() using a reftest that has refers to a reference file outside of the tests directory and the reference file has paths to other support files """
+
+        test_html = """<html>
+<head>
+<link rel="match" href="../reference/green-box-ref.xht" />
+</head>
+"""
+        ref_html = """<head>
+<link href="support/css/ref-stylesheet.css" rel="stylesheet" type="text/css">
+<style type="text/css">
+    background-image: url("../../support/some-image.png")
+</style>
+</head>
+<body>
+<div><img src="../support/black96x96.png" alt="Image download support must be enabled" /></div>
+</body>
+</html>
+"""
+        test_path = '/some/madeup/path/'
+        parser = TestParser(options, test_path + 'somefile.html')
+        test_info = parser.analyze_test(test_contents=test_html, ref_contents=ref_html)
+
+        self.assertNotEqual(test_info, None, 'did not find a test')
+        self.assertTrue('test' in test_info.keys(), 'did not find a test file')
+        self.assertTrue('reference' in test_info.keys(), 'did not find a reference file')
+        self.assertTrue(test_info['reference'].startswith(test_path), 'reference path is not correct')
+        self.assertTrue('refsupport' in test_info.keys(), 'there should be refsupport files for this test')
+        self.assertEquals(len(test_info['refsupport']), 3, 'there should be 3 support files in this reference')
+        self.assertFalse('jstest' in test_info.keys(), 'test should not have been analyzed as a jstest')
+
+    def test_analyze_jstest(self):
+        """ Tests analyze_test() using a jstest """
+
+        test_html = """<head>
+<link href="/resources/testharness.css" rel="stylesheet" type="text/css">
+<script src="/resources/testharness.js"></script>
+</head>
+"""
+        test_path = '/some/madeup/path/'
+        parser = TestParser(options, test_path + 'somefile.html')
+        test_info = parser.analyze_test(test_contents=test_html)
+
+        self.assertNotEqual(test_info, None, 'test_info is None')
+        self.assertTrue('test' in test_info.keys(), 'did not find a test file')
+        self.assertFalse('reference' in test_info.keys(), 'shold not have found a reference file')
+        self.assertFalse('refsupport' in test_info.keys(), 'there should be no refsupport files for this test')
+        self.assertTrue('jstest' in test_info.keys(), 'test should be a jstest')
+
+    def test_analyze_pixel_test_all_true(self):
+        """ Tests analyze_test() using a test that is neither a reftest or jstest with all=False """
+
+        test_html = """<html>
+<head>
+<title>CSS Test: DESCRIPTION OF TEST</title>
+<link rel="author" title="NAME_OF_AUTHOR" />
+<style type="text/css"><![CDATA[
+CSS FOR TEST
+]]></style>
+</head>
+<body>
+CONTENT OF TEST
+</body>
+</html>
+"""
+        # Set options to 'all' so this gets found
+        options['all'] = True
+
+        test_path = '/some/madeup/path/'
+        parser = TestParser(options, test_path + 'somefile.html')
+        test_info = parser.analyze_test(test_contents=test_html)
+
+        self.assertNotEqual(test_info, None, 'test_info is None')
+        self.assertTrue('test' in test_info.keys(), 'did not find a test file')
+        self.assertFalse('reference' in test_info.keys(), 'shold not have found a reference file')
+        self.assertFalse('refsupport' in test_info.keys(), 'there should be no refsupport files for this test')
+        self.assertFalse('jstest' in test_info.keys(), 'test should not be a jstest')
+
+    def test_analyze_pixel_test_all_false(self):
+        """ Tests analyze_test() using a test that is neither a reftest or jstest, with -all=False """
+
+        test_html = """<html>
+<head>
+<title>CSS Test: DESCRIPTION OF TEST</title>
+<link rel="author" title="NAME_OF_AUTHOR" />
+<style type="text/css"><![CDATA[
+CSS FOR TEST
+]]></style>
+</head>
+<body>
+CONTENT OF TEST
+</body>
+</html>
+"""
+        # Set all to false so this gets skipped
+        options['all'] = False
+
+        test_path = '/some/madeup/path/'
+        parser = TestParser(options, test_path + 'somefile.html')
+        test_info = parser.analyze_test(test_contents=test_html)
+
+        self.assertEqual(test_info, None, 'test should have been skipped')
+
+    def test_analyze_non_html_file(self):
+        """ Tests analyze_test() with a file that has no html"""
+        # FIXME: use a mock filesystem
+        parser = TestParser(options, os.path.join(os.path.dirname(__file__), 'test_parser.py'))
+        test_info = parser.analyze_test()
+        self.assertEqual(test_info, None, 'no tests should have been found in this file')
diff --git a/Tools/TestResultServer/handlers/testfilehandler.py b/Tools/TestResultServer/handlers/testfilehandler.py
index fd17202..0f18abb 100644
--- a/Tools/TestResultServer/handlers/testfilehandler.py
+++ b/Tools/TestResultServer/handlers/testfilehandler.py
@@ -46,7 +46,6 @@
 PARAM_NAME = "name"
 PARAM_KEY = "key"
 PARAM_TEST_TYPE = "testtype"
-PARAM_INCREMENTAL = "incremental"
 PARAM_TEST_LIST_JSON = "testlistjson"
 PARAM_CALLBACK = "callback"
 
@@ -226,7 +225,6 @@
 
         master = self.request.get(PARAM_MASTER)
         test_type = self.request.get(PARAM_TEST_TYPE)
-        incremental = self.request.get(PARAM_INCREMENTAL)
 
         logging.debug(
             "Processing upload request, master: %s, builder: %s, test_type: %s.",
@@ -244,14 +242,18 @@
 
         errors = []
         for file in files:
-            filename = file.filename.lower()
-            if ((incremental and filename == "results.json") or
-                (filename == "incremental_results.json")):
-                # Merge incremental json results.
-                update_succeeded = JsonResults.update(master, builder, test_type, file.value)
+            if file.filename == "incremental_results.json":
+                # FIXME: Remove this check once we stop uploading incremental_results.json files for layout tests.
+                if test_type == "layout-tests":
+                    update_succeeded = True
+                else:
+                    update_succeeded = JsonResults.update(master, builder, test_type, file.value, is_full_results_format=False)
             else:
-                update_succeeded = TestFile.add_file(
-                    master, builder, test_type, file.filename, file.value)
+                update_succeeded = bool(TestFile.add_file(master, builder, test_type, file.filename, file.value))
+                # FIXME: Upload full_results.json files for non-layout tests as well and stop supporting the
+                # incremental_results.json file format.
+                if file.filename == "full_results.json" and test_type == "layout-tests":
+                    update_succeeded |= JsonResults.update(master, builder, test_type, file.value, is_full_results_format=True)
 
             if not update_succeeded:
                 errors.append(
diff --git a/Tools/TestResultServer/main.py b/Tools/TestResultServer/main.py
index 2fa61e5..a9b00cf 100644
--- a/Tools/TestResultServer/main.py
+++ b/Tools/TestResultServer/main.py
@@ -28,7 +28,7 @@
 
 # Request a modern Django
 from google.appengine.dist import use_library
-use_library('django', '1.1')
+use_library('django', '1.3')
 
 from google.appengine.ext import webapp
 from google.appengine.ext.webapp.util import run_wsgi_app
diff --git a/Tools/TestResultServer/model/datastorefile.py b/Tools/TestResultServer/model/datastorefile.py
index ac28d64..84642b8 100755
--- a/Tools/TestResultServer/model/datastorefile.py
+++ b/Tools/TestResultServer/model/datastorefile.py
@@ -29,6 +29,7 @@
 from datetime import datetime
 import logging
 
+from google.appengine.ext import blobstore
 from google.appengine.ext import db
 
 MAX_DATA_ENTRY_PER_FILE = 10
@@ -65,9 +66,20 @@
 
     data = None
 
+    # FIXME: Remove this once all the bots have cycled after converting to the high-replication database.
+    def _convert_blob_keys(self, keys):
+        converted_keys = []
+        for key in keys:
+            new_key = blobstore.BlobMigrationRecord.get_new_blob_key(key)
+            if new_key:
+                converted_keys.append(new_key)
+            else:
+                converted_keys.append(key)
+        return keys
+
     def delete_data(self, keys=None):
         if not keys:
-            keys = self.data_keys
+            keys = self._convert_blob_keys(self.data_keys)
 
         for key in keys:
             data_entry = DataEntry.get(key)
@@ -91,9 +103,11 @@
         # reason, only the data pointed by new_data_keys may be corrupted,
         # the existing data_keys data remains untouched. The corrupted data
         # in new_data_keys will be overwritten in next update.
-        keys = self.new_data_keys
+        keys = self._convert_blob_keys(self.new_data_keys)
         self.new_data_keys = []
 
+        # FIXME: is all this complexity with storing the file in chunks really needed anymore?
+        # Can we just store it in a single blob?
         while start < len(data):
             if keys:
                 key = keys[0]
@@ -123,7 +137,7 @@
         if keys:
             self.delete_data(keys)
 
-        temp_keys = self.data_keys
+        temp_keys = self._convert_blob_keys(self.data_keys)
         self.data_keys = self.new_data_keys
         self.new_data_keys = temp_keys
         self.data = data
@@ -136,7 +150,7 @@
             return None
 
         data = []
-        for key in self.data_keys:
+        for key in self._convert_blob_keys(self.data_keys):
             logging.info("Loading data for key: %s.", key)
             data_entry = DataEntry.get(key)
             if not data_entry:
diff --git a/Tools/TestResultServer/model/jsonresults.py b/Tools/TestResultServer/model/jsonresults.py
index 16316f3..4b3ad73 100755
--- a/Tools/TestResultServer/model/jsonresults.py
+++ b/Tools/TestResultServer/model/jsonresults.py
@@ -27,81 +27,92 @@
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 from datetime import datetime
-from django.utils import simplejson
 import logging
 import sys
 import traceback
 
+# FIXME: Once we're on python 2.7, just use json directly.
+try:
+    from django.utils import simplejson
+except:
+    import json as simplejson
+
 from model.testfile import TestFile
 
 JSON_RESULTS_FILE = "results.json"
 JSON_RESULTS_FILE_SMALL = "results-small.json"
 JSON_RESULTS_PREFIX = "ADD_RESULTS("
 JSON_RESULTS_SUFFIX = ");"
-JSON_RESULTS_VERSION_KEY = "version"
-JSON_RESULTS_BUILD_NUMBERS = "buildNumbers"
-JSON_RESULTS_TESTS = "tests"
-JSON_RESULTS_RESULTS = "results"
-JSON_RESULTS_TIMES = "times"
-JSON_RESULTS_PASS = "P"
-JSON_RESULTS_SKIP = "X"
-JSON_RESULTS_NO_DATA = "N"
+
 JSON_RESULTS_MIN_TIME = 3
 JSON_RESULTS_HIERARCHICAL_VERSION = 4
 JSON_RESULTS_MAX_BUILDS = 500
 JSON_RESULTS_MAX_BUILDS_SMALL = 100
 
+BUG_KEY = "bugs"
+BUILD_NUMBERS_KEY = "buildNumbers"
+EXPECTED_KEY = "expected"
+FAILURE_MAP_KEY = "failure_map"
+FAILURES_BY_TYPE_KEY = "num_failures_by_type"
+FIXABLE_COUNTS_KEY = "fixableCounts"
+RESULTS_KEY = "results"
+TESTS_KEY = "tests"
+TIME_KEY = "time"
+TIMES_KEY = "times"
+VERSIONS_KEY = "version"
 
-def _add_path_to_trie(path, value, trie):
-    if not "/" in path:
-        trie[path] = value
-        return
+AUDIO = "A"
+CRASH = "C"
+IMAGE = "I"
+IMAGE_PLUS_TEXT = "Z"
+# This is only output by gtests.
+FLAKY = "L"
+MISSING = "O"
+NO_DATA = "N"
+NOTRUN = "Y"
+PASS = "P"
+SKIP = "X"
+TEXT = "F"
+TIMEOUT = "T"
 
-    directory, slash, rest = path.partition("/")
-    if not directory in trie:
-        trie[directory] = {}
-    _add_path_to_trie(rest, value, trie[directory])
+AUDIO_STRING = "AUDIO"
+CRASH_STRING = "CRASH"
+IMAGE_PLUS_TEXT_STRING = "IMAGE+TEXT"
+IMAGE_STRING = "IMAGE"
+FLAKY_STRING = "FLAKY"
+MISSING_STRING = "MISSING"
+NO_DATA_STRING = "NO DATA"
+NOTRUN_STRING = "NOTRUN"
+PASS_STRING = "PASS"
+SKIP_STRING = "SKIP"
+TEXT_STRING = "TEXT"
+TIMEOUT_STRING = "TIMEOUT"
 
+FAILURE_TO_CHAR = {
+    AUDIO_STRING: AUDIO,
+    CRASH_STRING: CRASH,
+    IMAGE_PLUS_TEXT_STRING: IMAGE_PLUS_TEXT,
+    IMAGE_STRING: IMAGE,
+    FLAKY_STRING: FLAKY,
+    MISSING_STRING: MISSING,
+    NO_DATA_STRING: NO_DATA,
+    NOTRUN_STRING: NOTRUN,
+    PASS_STRING: PASS,
+    SKIP_STRING: SKIP,
+    TEXT_STRING: TEXT,
+    TIMEOUT_STRING: TIMEOUT,
+}
 
-def _trie_json_tests(tests):
-    """Breaks a test name into chunks by directory and puts the test time as a value in the lowest part, e.g.
-    foo/bar/baz.html: VALUE1
-    foo/bar/baz1.html: VALUE2
-
-    becomes
-    foo: {
-        bar: {
-            baz.html: VALUE1,
-            baz1.html: VALUE2
-        }
-    }
-    """
-    trie = {}
-    for test, value in tests.iteritems():
-        _add_path_to_trie(test, value, trie)
-    return trie
-
+# FIXME: Use dict comprehensions once we update the server to python 2.7.
+CHAR_TO_FAILURE = dict((value, key) for key, value in FAILURE_TO_CHAR.items())
 
 def _is_directory(subtree):
-    # FIXME: Some data got corrupted and has results/times at the directory level.
-    # Once the data is fixed, this should assert that the directory level does not have
-    # results or times and just return "JSON_RESULTS_RESULTS not in subtree".
-    if JSON_RESULTS_RESULTS not in subtree:
-        return True
-
-    for key in subtree:
-        if key not in (JSON_RESULTS_RESULTS, JSON_RESULTS_TIMES):
-            del subtree[JSON_RESULTS_RESULTS]
-            del subtree[JSON_RESULTS_TIMES]
-            return True
-
-    return False
+    return RESULTS_KEY not in subtree
 
 
 class JsonResults(object):
     @classmethod
     def _strip_prefix_suffix(cls, data):
-        # FIXME: Stop stripping jsonp callback once we upload pure json everywhere.
         if data.startswith(JSON_RESULTS_PREFIX) and data.endswith(JSON_RESULTS_SUFFIX):
             return data[len(JSON_RESULTS_PREFIX):len(data) - len(JSON_RESULTS_SUFFIX)]
         return data
@@ -126,19 +137,36 @@
 
     @classmethod
     def _merge_json(cls, aggregated_json, incremental_json, num_runs):
+        # We have to delete expected entries because the incremental json may not have any
+        # entry for every test in the aggregated json. But, the incremental json will have
+        # all the correct expected entries for that run.
+        cls._delete_expected_entries(aggregated_json[TESTS_KEY])
         cls._merge_non_test_data(aggregated_json, incremental_json, num_runs)
-        incremental_tests = incremental_json[JSON_RESULTS_TESTS]
+        incremental_tests = incremental_json[TESTS_KEY]
         if incremental_tests:
-            aggregated_tests = aggregated_json[JSON_RESULTS_TESTS]
+            aggregated_tests = aggregated_json[TESTS_KEY]
             cls._merge_tests(aggregated_tests, incremental_tests, num_runs)
-            cls._normalize_results(aggregated_tests, num_runs)
+
+    @classmethod
+    def _delete_expected_entries(cls, aggregated_json):
+        for key in aggregated_json:
+            item = aggregated_json[key]
+            if _is_directory(item):
+                cls._delete_expected_entries(item)
+            else:
+                if EXPECTED_KEY in item:
+                    del item[EXPECTED_KEY]
+                if BUG_KEY in item:
+                    del item[BUG_KEY]
 
     @classmethod
     def _merge_non_test_data(cls, aggregated_json, incremental_json, num_runs):
-        incremental_builds = incremental_json[JSON_RESULTS_BUILD_NUMBERS]
-        aggregated_builds = aggregated_json[JSON_RESULTS_BUILD_NUMBERS]
+        incremental_builds = incremental_json[BUILD_NUMBERS_KEY]
+        aggregated_builds = aggregated_json[BUILD_NUMBERS_KEY]
         aggregated_build_number = int(aggregated_builds[0])
 
+        # FIXME: It's no longer possible to have multiple runs worth of data in the incremental_json,
+        # So we can get rid of this for-loop and the associated index.
         for index in reversed(range(len(incremental_builds))):
             build_number = int(incremental_builds[index])
             logging.debug("Merging build %s, incremental json index: %d.", build_number, index)
@@ -151,12 +179,15 @@
         for key in incremental_json.keys():
             # Merge json results except "tests" properties (results, times etc).
             # "tests" properties will be handled separately.
-            if key == JSON_RESULTS_TESTS:
+            if key == TESTS_KEY or key == FAILURE_MAP_KEY:
                 continue
 
             if key in aggregated_json:
-                aggregated_json[key].insert(0, incremental_json[key][incremental_index])
-                aggregated_json[key] = aggregated_json[key][:num_runs]
+                if key == FAILURES_BY_TYPE_KEY:
+                    cls._merge_one_build(aggregated_json[key], incremental_json[key], incremental_index, num_runs=num_runs)
+                else:
+                    aggregated_json[key].insert(0, incremental_json[key][incremental_index])
+                    aggregated_json[key] = aggregated_json[key][:num_runs]
             else:
                 aggregated_json[key] = incremental_json[key]
 
@@ -164,11 +195,11 @@
     def _merge_tests(cls, aggregated_json, incremental_json, num_runs):
         # FIXME: Some data got corrupted and has results/times at the directory level.
         # Once the data is fixe, this should assert that the directory level does not have
-        # results or times and just return "JSON_RESULTS_RESULTS not in subtree".
-        if JSON_RESULTS_RESULTS in aggregated_json:
-            del aggregated_json[JSON_RESULTS_RESULTS]
-        if JSON_RESULTS_TIMES in aggregated_json:
-            del aggregated_json[JSON_RESULTS_TIMES]
+        # results or times and just return "RESULTS_KEY not in subtree".
+        if RESULTS_KEY in aggregated_json:
+            del aggregated_json[RESULTS_KEY]
+        if TIMES_KEY in aggregated_json:
+            del aggregated_json[TIMES_KEY]
 
         all_tests = set(aggregated_json.iterkeys())
         if incremental_json:
@@ -184,16 +215,21 @@
                 cls._merge_tests(aggregated_json[test_name], incremental_sub_result, num_runs)
                 continue
 
+            aggregated_test = aggregated_json[test_name]
+
             if incremental_sub_result:
-                results = incremental_sub_result[JSON_RESULTS_RESULTS]
-                times = incremental_sub_result[JSON_RESULTS_TIMES]
+                results = incremental_sub_result[RESULTS_KEY]
+                times = incremental_sub_result[TIMES_KEY]
+                if EXPECTED_KEY in incremental_sub_result and incremental_sub_result[EXPECTED_KEY] != PASS_STRING:
+                    aggregated_test[EXPECTED_KEY] = incremental_sub_result[EXPECTED_KEY]
+                if BUG_KEY in incremental_sub_result:
+                    aggregated_test[BUG_KEY] = incremental_sub_result[BUG_KEY]
             else:
-                results = [[1, JSON_RESULTS_NO_DATA]]
+                results = [[1, NO_DATA]]
                 times = [[1, 0]]
 
-            aggregated_test = aggregated_json[test_name]
-            cls._insert_item_run_length_encoded(results, aggregated_test[JSON_RESULTS_RESULTS], num_runs)
-            cls._insert_item_run_length_encoded(times, aggregated_test[JSON_RESULTS_TIMES], num_runs)
+            cls._insert_item_run_length_encoded(results, aggregated_test[RESULTS_KEY], num_runs)
+            cls._insert_item_run_length_encoded(times, aggregated_test[TIMES_KEY], num_runs)
 
     @classmethod
     def _insert_item_run_length_encoded(cls, incremental_item, aggregated_item, num_runs):
@@ -209,10 +245,13 @@
         for test_name in aggregated_json:
             if _is_directory(aggregated_json[test_name]):
                 cls._normalize_results(aggregated_json[test_name], num_runs)
+                # If normalizing deletes all the children of this directory, also delete the directory.
+                if not aggregated_json[test_name]:
+                    names_to_delete.append(test_name)
             else:
                 leaf = aggregated_json[test_name]
-                leaf[JSON_RESULTS_RESULTS] = cls._remove_items_over_max_number_of_builds(leaf[JSON_RESULTS_RESULTS], num_runs)
-                leaf[JSON_RESULTS_TIMES] = cls._remove_items_over_max_number_of_builds(leaf[JSON_RESULTS_TIMES], num_runs)
+                leaf[RESULTS_KEY] = cls._remove_items_over_max_number_of_builds(leaf[RESULTS_KEY], num_runs)
+                leaf[TIMES_KEY] = cls._remove_items_over_max_number_of_builds(leaf[TIMES_KEY], num_runs)
                 if cls._should_delete_leaf(leaf):
                     names_to_delete.append(test_name)
 
@@ -221,12 +260,18 @@
 
     @classmethod
     def _should_delete_leaf(cls, leaf):
-        deletable_types = set((JSON_RESULTS_PASS, JSON_RESULTS_NO_DATA, JSON_RESULTS_SKIP))
-        for result in leaf[JSON_RESULTS_RESULTS]:
+        if leaf.get(EXPECTED_KEY, PASS_STRING) != PASS_STRING:
+            return False
+
+        if BUG_KEY in leaf:
+            return False
+
+        deletable_types = set((PASS, NO_DATA, NOTRUN))
+        for result in leaf[RESULTS_KEY]:
             if result[1] not in deletable_types:
                 return False
 
-        for time in leaf[JSON_RESULTS_TIMES]:
+        for time in leaf[TIMES_KEY]:
             if time[1] >= JSON_RESULTS_MIN_TIME:
                 return False
 
@@ -245,11 +290,28 @@
         return encoded_list
 
     @classmethod
+    def _convert_gtest_json_to_aggregate_results_format(cls, json):
+        # FIXME: Change gtests over to uploading the full results format like layout-tests
+        # so we don't have to do this normalizing.
+
+        if FAILURES_BY_TYPE_KEY in json:
+            # This is already in the right format.
+            return
+
+        failures_by_type = {}
+        for fixableCount in json[FIXABLE_COUNTS_KEY]:
+            for failure_type, count in fixableCount.items():
+                failure_string = CHAR_TO_FAILURE[failure_type]
+                if failure_string not in failures_by_type:
+                    failures_by_type[failure_string] = []
+                failures_by_type[failure_string].append(count)
+        json[FAILURES_BY_TYPE_KEY] = failures_by_type
+
+    @classmethod
     def _check_json(cls, builder, json):
-        version = json[JSON_RESULTS_VERSION_KEY]
+        version = json[VERSIONS_KEY]
         if version > JSON_RESULTS_HIERARCHICAL_VERSION:
-            logging.error("Results JSON version '%s' is not supported.",
-                version)
+            logging.error("Results JSON version '%s' is not supported.", version)
             return False
 
         if not builder in json:
@@ -257,89 +319,184 @@
             return False
 
         results_for_builder = json[builder]
-        if not JSON_RESULTS_BUILD_NUMBERS in results_for_builder:
+        if not BUILD_NUMBERS_KEY in results_for_builder:
             logging.error("Missing build number in json results.")
             return False
 
-        # FIXME: Once all the bots have cycled, we can remove this code since all the results will be heirarchical.
-        if version < JSON_RESULTS_HIERARCHICAL_VERSION:
-            json[builder][JSON_RESULTS_TESTS] = _trie_json_tests(results_for_builder[JSON_RESULTS_TESTS])
-            json[JSON_RESULTS_VERSION_KEY] = JSON_RESULTS_HIERARCHICAL_VERSION
-
+        cls._convert_gtest_json_to_aggregate_results_format(json[builder])
         return True
 
     @classmethod
-    def merge(cls, builder, aggregated, incremental, num_runs, sort_keys=False):
-        if not incremental:
+    def _populate_tests_from_full_results(cls, full_results, new_results):
+        if EXPECTED_KEY in full_results:
+            expected = full_results[EXPECTED_KEY]
+            if expected != PASS_STRING and expected != NOTRUN_STRING:
+                new_results[EXPECTED_KEY] = expected
+            time = int(round(full_results[TIME_KEY])) if TIME_KEY in full_results else 0
+            new_results[TIMES_KEY] = [[1, time]]
+
+            actual_failures = full_results['actual']
+            # Treat unexpected skips like NOTRUNs to avoid exploding the results JSON files
+            # when a bot exits early (e.g. due to too many crashes/timeouts).
+            if expected != SKIP_STRING and actual_failures == SKIP_STRING:
+                expected = first_actual_failure = NOTRUN_STRING
+            elif expected == NOTRUN_STRING:
+                first_actual_failure = expected
+            else:
+                # FIXME: Include the retry result as well and find a nice way to display it in the flakiness dashboard.
+                first_actual_failure = actual_failures.split(' ')[0]
+            new_results[RESULTS_KEY] = [[1, FAILURE_TO_CHAR[first_actual_failure]]]
+
+            if BUG_KEY in full_results:
+                new_results[BUG_KEY] = full_results[BUG_KEY]
+            return
+
+        for key in full_results:
+            new_results[key] = {}
+            cls._populate_tests_from_full_results(full_results[key], new_results[key])
+
+    @classmethod
+    def _convert_full_results_format_to_aggregate(cls, full_results_format):
+        num_total_tests = 0
+        num_failing_tests = 0
+        fixableCounts = {}
+        failures_by_type = full_results_format[FAILURES_BY_TYPE_KEY]
+
+        # FIXME: full_results format has "FAIL" entries, but that is no longer a possible result type.
+        if 'FAIL' in failures_by_type:
+            del failures_by_type['FAIL']
+
+        for failure_type in failures_by_type:
+            count = failures_by_type[failure_type]
+            num_total_tests += count
+            if failure_type != PASS_STRING:
+                num_failing_tests += count
+            fixableCounts[FAILURE_TO_CHAR[failure_type]] = count
+
+        tests = {}
+        cls._populate_tests_from_full_results(full_results_format[TESTS_KEY], tests)
+
+        aggregate_results_format = {
+            VERSIONS_KEY: JSON_RESULTS_HIERARCHICAL_VERSION,
+            full_results_format['builder_name']: {
+                # FIXME: Use dict comprehensions once we update the server to python 2.7.
+                FAILURES_BY_TYPE_KEY: dict((key, [value]) for key, value in failures_by_type.items()),
+                TESTS_KEY: tests,
+                # FIXME: Have the consumers of these use num_failures_by_type directly and stop include these counts.
+                'allFixableCount': [num_total_tests],
+                'fixableCount': [num_failing_tests],
+                FIXABLE_COUNTS_KEY: [fixableCounts],
+                # FIXME: Have all the consumers of this switch over to the full_results_format keys
+                # so we don't have to do this silly conversion.
+                BUILD_NUMBERS_KEY: [full_results_format['build_number']],
+                'chromeRevision': [full_results_format['chromium_revision']],
+                'blinkRevision': [full_results_format['blink_revision']],
+                'secondsSinceEpoch': [full_results_format['seconds_since_epoch']],
+            }
+        }
+        return aggregate_results_format
+
+    @classmethod
+    def _get_incremental_json(cls, builder, incremental_string, is_full_results_format):
+        if not incremental_string:
             logging.warning("Nothing to merge.")
             return None
 
-        logging.info("Loading incremental json...")
-        incremental_json = cls._load_json(incremental)
+        logging.info("Loading incremental json.")
+        incremental_json = cls._load_json(incremental_string)
         if not incremental_json:
             return None
 
-        logging.info("Checking incremental json...")
+        if is_full_results_format:
+            logging.info("Converting full results format to aggregate.")
+            incremental_json = cls._convert_full_results_format_to_aggregate(incremental_json)
+
+        logging.info("Checking incremental json.")
         if not cls._check_json(builder, incremental_json):
             return None
+        return incremental_json
 
-        logging.info("Loading existing aggregated json...")
-        aggregated_json = cls._load_json(aggregated)
+    @classmethod
+    def _get_aggregated_json(cls, builder, aggregated_string):
+        logging.info("Loading existing aggregated json.")
+        aggregated_json = cls._load_json(aggregated_string)
         if not aggregated_json:
-            return incremental
-
-        logging.info("Checking existing aggregated json...")
-        if not cls._check_json(builder, aggregated_json):
-            return incremental
-
-        if aggregated_json[builder][JSON_RESULTS_BUILD_NUMBERS][0] == incremental_json[builder][JSON_RESULTS_BUILD_NUMBERS][0]:
-            logging.error("Incremental JSON's build number is the latest build number in the aggregated JSON: %d." % aggregated_json[builder][JSON_RESULTS_BUILD_NUMBERS][0])
-            return aggregated
-
-        logging.info("Merging json results...")
-        try:
-            cls._merge_json(aggregated_json[builder], incremental_json[builder], num_runs)
-        except:
-            logging.error("Failed to merge json results: %s", traceback.print_exception(*sys.exc_info()))
             return None
 
-        aggregated_json[JSON_RESULTS_VERSION_KEY] = JSON_RESULTS_HIERARCHICAL_VERSION
+        logging.info("Checking existing aggregated json.")
+        if not cls._check_json(builder, aggregated_json):
+            return None
 
+        return aggregated_json
+
+    @classmethod
+    def merge(cls, builder, aggregated_string, incremental_json, num_runs, sort_keys=False):
+        aggregated_json = cls._get_aggregated_json(builder, aggregated_string)
+        if not aggregated_json:
+            aggregated_json = incremental_json
+        else:
+            if aggregated_json[builder][BUILD_NUMBERS_KEY][0] == incremental_json[builder][BUILD_NUMBERS_KEY][0]:
+                logging.error("Incremental JSON's build number is the latest build number in the aggregated JSON: %d." % aggregated_json[builder][BUILD_NUMBERS_KEY][0])
+                return None
+
+            logging.info("Merging json results.")
+            try:
+                cls._merge_json(aggregated_json[builder], incremental_json[builder], num_runs)
+            except:
+                logging.error("Failed to merge json results: %s", traceback.print_exception(*sys.exc_info()))
+                return None
+
+        aggregated_json[VERSIONS_KEY] = JSON_RESULTS_HIERARCHICAL_VERSION
+        aggregated_json[builder][FAILURE_MAP_KEY] = CHAR_TO_FAILURE
+        cls._normalize_results(aggregated_json[builder][TESTS_KEY], num_runs)
         return cls._generate_file_data(aggregated_json, sort_keys)
 
     @classmethod
-    def update(cls, master, builder, test_type, incremental):
-        small_file_updated = cls.update_file(master, builder, test_type, incremental, JSON_RESULTS_FILE_SMALL, JSON_RESULTS_MAX_BUILDS_SMALL)
-        large_file_updated = cls.update_file(master, builder, test_type, incremental, JSON_RESULTS_FILE, JSON_RESULTS_MAX_BUILDS)
+    def _get_file(cls, master, builder, test_type, filename):
+        files = TestFile.get_files(master, builder, test_type, filename)
+        if files:
+            return files[0]
+
+        file = TestFile()
+        file.master = master
+        file.builder = builder
+        file.test_type = test_type
+        file.name = filename
+        file.data = ""
+        return file
+
+    @classmethod
+    def update(cls, master, builder, test_type, incremental_string, is_full_results_format):
+        logging.info("Updating %s and %s." % (JSON_RESULTS_FILE_SMALL, JSON_RESULTS_FILE))
+        small_file = cls._get_file(master, builder, test_type, JSON_RESULTS_FILE_SMALL)
+        large_file = cls._get_file(master, builder, test_type, JSON_RESULTS_FILE)
+        return cls.update_files(builder, incremental_string, small_file, large_file, is_full_results_format)
+
+    @classmethod
+    def update_files(cls, builder, incremental_string, small_file, large_file, is_full_results_format):
+        incremental_json = cls._get_incremental_json(builder, incremental_string, is_full_results_format)
+        if not incremental_json:
+            return False
+
+        small_file_updated = cls.update_file(builder, small_file, incremental_json, JSON_RESULTS_MAX_BUILDS_SMALL)
+        if not small_file_updated:
+            logging.info("Update for %s failed." % JSON_RESULTS_FILE_SMALL)
+
+        large_file_updated = cls.update_file(builder, large_file, incremental_json, JSON_RESULTS_MAX_BUILDS)
+        if not large_file_updated:
+            logging.info("Update for %s failed." % JSON_RESULTS_FILE)
 
         return small_file_updated and large_file_updated
 
     @classmethod
-    def update_file(cls, master, builder, test_type, incremental, filename, num_runs):
-        files = TestFile.get_files(master, builder, test_type, filename)
-        if files:
-            file = files[0]
-            new_results = cls.merge(builder, file.data, incremental, num_runs)
-        else:
-            # Use the incremental data if there is no aggregated file to merge.
-            file = TestFile()
-            file.master = master
-            file.builder = builder
-            file.test_type = test_type
-            file.name = filename
-            new_results = incremental
-            logging.info("No existing json results, incremental json is saved.")
-
-        if not new_results or not file.save(new_results):
-            logging.info("Update failed, master: %s, builder: %s, test_type: %s, name: %s." % (master, builder, test_type, filename))
-            return False
-
-        return True
+    def update_file(cls, builder, file, incremental_json, num_runs):
+        new_results = cls.merge(builder, file.data, incremental_json, num_runs)
+        return new_results and file.save(new_results)
 
     @classmethod
     def _delete_results_and_times(cls, tests):
         for key in tests.keys():
-            if key in (JSON_RESULTS_RESULTS, JSON_RESULTS_TIMES):
+            if key in (RESULTS_KEY, TIMES_KEY):
                 del tests[key]
             else:
                 cls._delete_results_and_times(tests[key])
@@ -356,7 +513,7 @@
             return None
 
         test_list_json = {}
-        tests = json[builder][JSON_RESULTS_TESTS]
+        tests = json[builder][TESTS_KEY]
         cls._delete_results_and_times(tests)
-        test_list_json[builder] = {"tests": tests}
+        test_list_json[builder] = {TESTS_KEY: tests}
         return cls._generate_file_data(test_list_json)
diff --git a/Tools/TestResultServer/model/jsonresults_unittest.py b/Tools/TestResultServer/model/jsonresults_unittest.py
index 3ddbf4c..d0e35c9 100755
--- a/Tools/TestResultServer/model/jsonresults_unittest.py
+++ b/Tools/TestResultServer/model/jsonresults_unittest.py
@@ -28,64 +28,176 @@
 
 try:
     import jsonresults
-    from jsonresults import JsonResults
+    from jsonresults import *
 except ImportError:
     print "ERROR: Add the TestResultServer, google_appengine and yaml/lib directories to your PYTHONPATH"
     raise
 
-from django.utils import simplejson
+# FIXME: Once we're on python 2.7, just use json directly.
+try:
+    from django.utils import simplejson
+except:
+    import json as simplejson
 
 import unittest
 
+FULL_RESULT_EXAMPLE = """ADD_RESULTS({
+    "seconds_since_epoch": 1368146629,
+    "tests": {
+        "media": {
+            "encrypted-media": {
+                "encrypted-media-v2-events.html": {
+                    "bugs": ["crbug.com/1234"],
+                    "expected": "TIMEOUT",
+                    "actual": "TIMEOUT",
+                    "time": 6.0
+                },
+                "encrypted-media-v2-syntax.html": {
+                    "expected": "TIMEOUT",
+                    "actual": "TIMEOUT"
+                }
+            },
+            "progress-events-generated-correctly.html": {
+                "expected": "PASS FAIL IMAGE TIMEOUT CRASH MISSING",
+                "actual": "TIMEOUT",
+                "time": 6.0
+            },
+            "W3C": {
+                "audio": {
+                    "src": {
+                        "src_removal_does_not_trigger_loadstart.html": {
+                            "expected": "PASS",
+                            "actual": "PASS",
+                            "time": 3.5
+                        }
+                    }
+                },
+                "video": {
+                    "src": {
+                        "src_removal_does_not_trigger_loadstart.html": {
+                            "expected": "PASS",
+                            "actual": "PASS",
+                            "time": 1.1
+                        },
+                        "notrun.html": {
+                            "expected": "NOTRUN",
+                            "actual": "SKIP",
+                            "time": 1.1
+                        }
+                    }
+                }
+            },
+            "unexpected-skip.html": {
+                "expected": "PASS",
+                "actual": "SKIP"
+            },
+            "media-document-audio-repaint.html": {
+                "expected": "IMAGE",
+                "image_diff_percent": 0,
+                "actual": "IMAGE",
+                "time": 0.1
+            }
+        }
+    },
+    "skipped": 2,
+    "num_regressions": 0,
+    "build_number": "3",
+    "interrupted": false,
+    "num_missing": 0,
+    "uses_expectations_file": true,
+    "layout_tests_dir": "\/tmp\/cr\/src\/third_party\/WebKit\/LayoutTests",
+    "version": 3,
+    "builder_name": "Webkit",
+    "num_passes": 10,
+    "pixel_tests_enabled": true,
+    "blink_revision": "1234",
+    "has_pretty_patch": true,
+    "fixable": 25,
+    "num_flaky": 0,
+    "num_failures_by_type": {
+        "CRASH": 3,
+        "MISSING": 0,
+        "TEXT": 3,
+        "IMAGE": 1,
+        "PASS": 10,
+        "SKIP": 2,
+        "TIMEOUT": 16,
+        "IMAGE+TEXT": 0,
+        "FAIL": 0,
+        "AUDIO": 0
+    },
+    "has_wdiff": true,
+    "chromium_revision": "5678"
+});"""
 
-JSON_RESULTS_TEMPLATE = (
-    '{"Webkit":{'
+JSON_RESULTS_OLD_TEMPLATE = (
+    '{"[BUILDER_NAME]":{'
     '"allFixableCount":[[TESTDATA_COUNT]],'
     '"blinkRevision":[[TESTDATA_WEBKITREVISION]],'
     '"buildNumbers":[[TESTDATA_BUILDNUMBERS]],'
     '"chromeRevision":[[TESTDATA_CHROMEREVISION]],'
-    '"deferredCounts":[[TESTDATA_COUNTS]],'
+    '"failure_map": %s,'
     '"fixableCount":[[TESTDATA_COUNT]],'
     '"fixableCounts":[[TESTDATA_COUNTS]],'
     '"secondsSinceEpoch":[[TESTDATA_TIMES]],'
-    '"tests":{[TESTDATA_TESTS]},'
-    '"wontfixCounts":[[TESTDATA_COUNTS]]'
+    '"tests":{[TESTDATA_TESTS]}'
     '},'
     '"version":[VERSION]'
-    '}')
+    '}') % simplejson.dumps(CHAR_TO_FAILURE)
 
-JSON_RESULTS_COUNTS_TEMPLATE = (
-    '{'
-    '"C":[TESTDATA],'
-    '"F":[TESTDATA],'
-    '"I":[TESTDATA],'
-    '"O":[TESTDATA],'
-    '"P":[TESTDATA],'
-    '"T":[TESTDATA],'
-    '"X":[TESTDATA],'
-    '"Z":[TESTDATA]}')
+JSON_RESULTS_COUNTS = '{"' + '":[[TESTDATA_COUNT]],"'.join([char for char in CHAR_TO_FAILURE.values()]) + '":[[TESTDATA_COUNT]]}'
 
-JSON_RESULTS_DIRECTORY_TEMPLATE = '[[TESTDATA_DIRECTORY]]:{[TESTDATA_DATA]}'
+JSON_RESULTS_TEMPLATE = (
+    '{"[BUILDER_NAME]":{'
+    '"allFixableCount":[[TESTDATA_COUNT]],'
+    '"blinkRevision":[[TESTDATA_WEBKITREVISION]],'
+    '"buildNumbers":[[TESTDATA_BUILDNUMBERS]],'
+    '"chromeRevision":[[TESTDATA_CHROMEREVISION]],'
+    '"failure_map": %s,'
+    '"fixableCount":[[TESTDATA_COUNT]],'
+    '"fixableCounts":[[TESTDATA_COUNTS]],'
+    '"num_failures_by_type":%s,'
+    '"secondsSinceEpoch":[[TESTDATA_TIMES]],'
+    '"tests":{[TESTDATA_TESTS]}'
+    '},'
+    '"version":[VERSION]'
+    '}') % (simplejson.dumps(CHAR_TO_FAILURE), JSON_RESULTS_COUNTS)
 
-JSON_RESULTS_TESTS_TEMPLATE = (
-    '[[TESTDATA_TEST_NAME]]:{'
-    '"results":[[TESTDATA_TEST_RESULTS]],'
-    '"times":[[TESTDATA_TEST_TIMES]]}')
+JSON_RESULTS_COUNTS_TEMPLATE = '{"' + '":[TESTDATA],"'.join([char for char in CHAR_TO_FAILURE]) + '":[TESTDATA]}'
 
-JSON_RESULTS_TEST_LIST_TEMPLATE = (
-    '{"Webkit":{"tests":{[TESTDATA_TESTS]}}}')
+JSON_RESULTS_TEST_LIST_TEMPLATE = '{"Webkit":{"tests":{[TESTDATA_TESTS]}}}'
+
+
+class MockFile(object):
+    def __init__(self, name='results.json', data=''):
+        self.master = 'MockMasterName'
+        self.builder = 'MockBuilderName'
+        self.test_type = 'MockTestType'
+        self.name = name
+        self.data = data
+
+    def save(self, data):
+        self.data = data
+        return True
 
 
 class JsonResultsTest(unittest.TestCase):
     def setUp(self):
         self._builder = "Webkit"
 
+    # Use this to get better error messages than just string compare gives.
+    def assert_json_equal(self, a, b):
+        self.maxDiff = None
+        a = simplejson.loads(a) if isinstance(a, str) else a
+        b = simplejson.loads(b) if isinstance(b, str) else b
+        self.assertEqual(a, b)
+
     def test_strip_prefix_suffix(self):
         json = "['contents']"
         self.assertEqual(JsonResults._strip_prefix_suffix("ADD_RESULTS(" + json + ");"), json)
         self.assertEqual(JsonResults._strip_prefix_suffix(json), json)
 
-    def _make_test_json(self, test_data):
+    def _make_test_json(self, test_data, json_string=JSON_RESULTS_TEMPLATE, builder_name="Webkit"):
         if not test_data:
             return ""
 
@@ -94,8 +206,6 @@
         if not builds or not tests:
             return ""
 
-        json = JSON_RESULTS_TEMPLATE
-
         counts = []
         build_numbers = []
         webkit_revision = []
@@ -108,26 +218,27 @@
             chrome_revision.append("3000%s" % build)
             times.append("100000%s000" % build)
 
-        json = json.replace("[TESTDATA_COUNTS]", ",".join(counts))
-        json = json.replace("[TESTDATA_COUNT]", ",".join(builds))
-        json = json.replace("[TESTDATA_BUILDNUMBERS]", ",".join(build_numbers))
-        json = json.replace("[TESTDATA_WEBKITREVISION]", ",".join(webkit_revision))
-        json = json.replace("[TESTDATA_CHROMEREVISION]", ",".join(chrome_revision))
-        json = json.replace("[TESTDATA_TIMES]", ",".join(times))
+        json_string = json_string.replace("[BUILDER_NAME]", builder_name)
+        json_string = json_string.replace("[TESTDATA_COUNTS]", ",".join(counts))
+        json_string = json_string.replace("[TESTDATA_COUNT]", ",".join(builds))
+        json_string = json_string.replace("[TESTDATA_BUILDNUMBERS]", ",".join(build_numbers))
+        json_string = json_string.replace("[TESTDATA_WEBKITREVISION]", ",".join(webkit_revision))
+        json_string = json_string.replace("[TESTDATA_CHROMEREVISION]", ",".join(chrome_revision))
+        json_string = json_string.replace("[TESTDATA_TIMES]", ",".join(times))
 
         version = str(test_data["version"]) if "version" in test_data else "4"
-        json = json.replace("[VERSION]", version)
-        json = json.replace("{[TESTDATA_TESTS]}", simplejson.dumps(tests, separators=(',', ':'), sort_keys=True))
-        return json
+        json_string = json_string.replace("[VERSION]", version)
+        json_string = json_string.replace("{[TESTDATA_TESTS]}", simplejson.dumps(tests, separators=(',', ':'), sort_keys=True))
+        return json_string
 
     def _test_merge(self, aggregated_data, incremental_data, expected_data, max_builds=jsonresults.JSON_RESULTS_MAX_BUILDS):
         aggregated_results = self._make_test_json(aggregated_data)
-        incremental_results = self._make_test_json(incremental_data)
-        merged_results = JsonResults.merge(self._builder, aggregated_results, incremental_results, max_builds, sort_keys=True)
+        incremental_json = JsonResults._get_incremental_json(self._builder, self._make_test_json(incremental_data), is_full_results_format=False)
+        merged_results = JsonResults.merge(self._builder, aggregated_results, incremental_json, num_runs=max_builds, sort_keys=True)
 
         if expected_data:
             expected_results = self._make_test_json(expected_data)
-            self.assertEqual(merged_results, expected_results)
+            self.assert_json_equal(merged_results, expected_results)
         else:
             self.assertFalse(merged_results)
 
@@ -135,36 +246,180 @@
         input_results = self._make_test_json(input_data)
         expected_results = JSON_RESULTS_TEST_LIST_TEMPLATE.replace("{[TESTDATA_TESTS]}", simplejson.dumps(expected_data, separators=(',', ':')))
         actual_results = JsonResults.get_test_list(self._builder, input_results)
-        self.assertEqual(actual_results, expected_results)
+        self.assert_json_equal(actual_results, expected_results)
 
-    def test_merge_null_incremental_results(self):
-        # Empty incremental results json.
-        # Nothing to merge.
-        self._test_merge(
-            # Aggregated results
-            {"builds": ["2", "1"],
-             "tests": {"001.html": {
-                           "results": [[200,"F"]],
-                           "times": [[200,0]]}}},
-            # Incremental results
-            None,
-            # Expect no merge happens.
-            None)
+    def test_update_files_empty_aggregate_data(self):
+        small_file = MockFile(name='results-small.json')
+        large_file = MockFile(name='results.json')
 
-    def test_merge_empty_incremental_results(self):
-        # No actual incremental test results (only prefix and suffix) to merge.
-        # Nothing to merge.
-        self._test_merge(
-            # Aggregated results
-            {"builds": ["2", "1"],
-             "tests": {"001.html": {
-                           "results": [[200,"F"]],
-                           "times": [[200,0]]}}},
-            # Incremental results
-            {"builds": [],
-             "tests": {}},
-            # Expected no merge happens.
-            None)
+        incremental_data = {
+            "builds": ["2", "1"],
+            "tests": {
+                "001.html": {
+                    "results": [[200, TEXT]],
+                    "times": [[200, 0]],
+                }
+            }
+        }
+        incremental_string = self._make_test_json(incremental_data, builder_name=small_file.builder)
+
+        self.assertTrue(JsonResults.update_files(small_file.builder, incremental_string, small_file, large_file, is_full_results_format=False))
+        self.assert_json_equal(small_file.data, incremental_string)
+        self.assert_json_equal(large_file.data, incremental_string)
+
+    def test_update_files_null_incremental_data(self):
+        small_file = MockFile(name='results-small.json')
+        large_file = MockFile(name='results.json')
+
+        aggregated_data = {
+            "builds": ["2", "1"],
+            "tests": {
+                "001.html": {
+                    "results": [[200, TEXT]],
+                    "times": [[200, 0]],
+                }
+            }
+        }
+        aggregated_string = self._make_test_json(aggregated_data, builder_name=small_file.builder)
+
+        small_file.data = large_file.data = aggregated_string
+
+        incremental_string = ""
+
+        self.assertFalse(JsonResults.update_files(small_file.builder, incremental_string, small_file, large_file, is_full_results_format=False))
+        self.assert_json_equal(small_file.data, aggregated_string)
+        self.assert_json_equal(large_file.data, aggregated_string)
+
+    def test_update_files_empty_incremental_data(self):
+        small_file = MockFile(name='results-small.json')
+        large_file = MockFile(name='results.json')
+
+        aggregated_data = {
+            "builds": ["2", "1"],
+            "tests": {
+                "001.html": {
+                    "results": [[200, TEXT]],
+                    "times": [[200, 0]],
+                }
+            }
+        }
+        aggregated_string = self._make_test_json(aggregated_data, builder_name=small_file.builder)
+
+        small_file.data = large_file.data = aggregated_string
+
+        incremental_data = {
+            "builds": [],
+            "tests": {}
+        }
+        incremental_string = self._make_test_json(incremental_data, builder_name=small_file.builder)
+
+        self.assertFalse(JsonResults.update_files(small_file.builder, incremental_string, small_file, large_file, is_full_results_format=False))
+        self.assert_json_equal(small_file.data, aggregated_string)
+        self.assert_json_equal(large_file.data, aggregated_string)
+
+    def test_merge_with_empty_aggregated_results(self):
+        incremental_data = {
+            "builds": ["2", "1"],
+            "tests": {
+                "001.html": {
+                    "results": [[200, TEXT]],
+                    "times": [[200, 0]],
+                }
+            }
+        }
+        incremental_results = JsonResults._get_incremental_json(self._builder, self._make_test_json(incremental_data), is_full_results_format=False)
+        aggregated_results = ""
+        merged_results = JsonResults.merge(self._builder, aggregated_results, incremental_results, num_runs=jsonresults.JSON_RESULTS_MAX_BUILDS, sort_keys=True)
+        self.assert_json_equal(merged_results, incremental_results)
+
+    def test_failures_by_type_added(self):
+        aggregated_results = self._make_test_json({
+            "builds": ["2", "1"],
+            "tests": {
+                "001.html": {
+                    "results": [[100, TEXT]],
+                    "times": [[100, 0]],
+                }
+            }
+        }, json_string=JSON_RESULTS_OLD_TEMPLATE)
+        incremental_results = self._make_test_json({
+            "builds": ["3"],
+            "tests": {
+                "001.html": {
+                    "results": [[1, TEXT]],
+                    "times": [[1, 0]],
+                }
+            }
+        }, json_string=JSON_RESULTS_OLD_TEMPLATE)
+        incremental_json = JsonResults._get_incremental_json(self._builder, incremental_results, is_full_results_format=False)
+        merged_results = JsonResults.merge(self._builder, aggregated_results, incremental_json, num_runs=200, sort_keys=True)
+        self.assert_json_equal(merged_results, self._make_test_json({
+            "builds": ["3", "2", "1"],
+            "tests": {
+                "001.html": {
+                    "results": [[101, TEXT]],
+                    "times": [[101, 0]],
+                }
+            }
+        }))
+
+    def test_merge_full_results_format(self):
+        expected_incremental_results = {
+            "Webkit": {
+                "allFixableCount": [35],
+                "blinkRevision": ["1234"],
+                "buildNumbers": ["3"],
+                "chromeRevision": ["5678"],
+                "failure_map": CHAR_TO_FAILURE,
+                "fixableCount": [25],
+                "fixableCounts": [{AUDIO: 0, CRASH: 3, TEXT: 3, IMAGE: 1, MISSING: 0, PASS: 10, TIMEOUT: 16, SKIP: 2, IMAGE_PLUS_TEXT: 0}],
+                "num_failures_by_type": {"AUDIO": [0], "CRASH": [3], "IMAGE": [1], "IMAGE+TEXT": [0], "MISSING": [0], "PASS": [10], "SKIP": [2], "TEXT": [3], "TIMEOUT": [16]},
+                "secondsSinceEpoch": [1368146629],
+                "tests": {
+                    "media": {
+                        "W3C": {
+                            "audio": {
+                                "src": {
+                                    "src_removal_does_not_trigger_loadstart.html": {
+                                        "results": [[1, PASS]],
+                                        "times": [[1, 4]],
+                                    }
+                                }
+                            }
+                        },
+                        "encrypted-media": {
+                            "encrypted-media-v2-events.html": {
+                                "bugs": ["crbug.com/1234"],
+                                "expected": "TIMEOUT",
+                                "results": [[1, TIMEOUT]],
+                                "times": [[1, 6]],
+                            },
+                            "encrypted-media-v2-syntax.html": {
+                                "expected": "TIMEOUT",
+                                "results": [[1, TIMEOUT]],
+                                "times": [[1, 0]],
+                            }
+                        },
+                        "media-document-audio-repaint.html": {
+                            "expected": "IMAGE",
+                            "results": [[1, IMAGE]],
+                            "times": [[1, 0]],
+                        },
+                        "progress-events-generated-correctly.html": {
+                            "expected": "PASS FAIL IMAGE TIMEOUT CRASH MISSING",
+                            "results": [[1, TIMEOUT]],
+                            "times": [[1, 6]],
+                        }
+                    }
+                }
+            },
+            "version": 4
+        }
+
+        aggregated_results = ""
+        incremental_json = JsonResults._get_incremental_json(self._builder, FULL_RESULT_EXAMPLE, is_full_results_format=True)
+        merged_results = JsonResults.merge("Webkit", aggregated_results, incremental_json, num_runs=jsonresults.JSON_RESULTS_MAX_BUILDS, sort_keys=True)
+        self.assert_json_equal(merged_results, expected_incremental_results)
 
     def test_merge_empty_aggregated_results(self):
         # No existing aggregated results.
@@ -173,56 +428,52 @@
             # Aggregated results
             None,
             # Incremental results
-
             {"builds": ["2", "1"],
              "tests": {"001.html": {
-                           "results": [[200,"F"]],
-                           "times": [[200,0]]}}},
+                           "results": [[200, TEXT]],
+                           "times": [[200, 0]]}}},
             # Expected result
             {"builds": ["2", "1"],
              "tests": {"001.html": {
-                           "results": [[200,"F"]],
-                           "times": [[200,0]]}}})
+                           "results": [[200, TEXT]],
+                           "times": [[200, 0]]}}})
 
     def test_merge_duplicate_build_number(self):
         self._test_merge(
             # Aggregated results
             {"builds": ["2", "1"],
              "tests": {"001.html": {
-                           "results": [[100, "F"]],
+                           "results": [[100, TEXT]],
                            "times": [[100, 0]]}}},
             # Incremental results
             {"builds": ["2"],
              "tests": {"001.html": {
-                           "results": [[1, "F"]],
+                           "results": [[1, TEXT]],
                            "times": [[1, 0]]}}},
             # Expected results
-            {"builds": ["2", "1"],
-             "tests": {"001.html": {
-                           "results": [[100, "F"]],
-                           "times": [[100, 0]]}}})
+            None)
 
     def test_merge_incremental_single_test_single_run_same_result(self):
         # Incremental results has the latest build and same test results for
         # that run.
         # Insert the incremental results at the first place and sum number
-        # of runs for "F" (200 + 1) to get merged results.
+        # of runs for TEXT (200 + 1) to get merged results.
         self._test_merge(
             # Aggregated results
             {"builds": ["2", "1"],
              "tests": {"001.html": {
-                           "results": [[200,"F"]],
-                           "times": [[200,0]]}}},
+                           "results": [[200, TEXT]],
+                           "times": [[200, 0]]}}},
             # Incremental results
             {"builds": ["3"],
              "tests": {"001.html": {
-                           "results": [[1,"F"]],
-                           "times": [[1,0]]}}},
+                           "results": [[1, TEXT]],
+                           "times": [[1, 0]]}}},
             # Expected results
             {"builds": ["3", "2", "1"],
              "tests": {"001.html": {
-                           "results": [[201,"F"]],
-                           "times": [[201,0]]}}})
+                           "results": [[201, TEXT]],
+                           "times": [[201, 0]]}}})
 
     def test_merge_single_test_single_run_different_result(self):
         # Incremental results has the latest build but different test results
@@ -232,18 +483,18 @@
             # Aggregated results
             {"builds": ["2", "1"],
              "tests": {"001.html": {
-                           "results": [[200,"F"]],
-                           "times": [[200,0]]}}},
+                           "results": [[200, TEXT]],
+                           "times": [[200, 0]]}}},
             # Incremental results
             {"builds": ["3"],
              "tests": {"001.html": {
-                           "results": [[1, "I"]],
-                           "times": [[1,1]]}}},
+                           "results": [[1, IMAGE]],
+                           "times": [[1, 1]]}}},
             # Expected results
             {"builds": ["3", "2", "1"],
              "tests": {"001.html": {
-                           "results": [[1,"I"],[200,"F"]],
-                           "times": [[1,1],[200,0]]}}})
+                           "results": [[1, IMAGE], [200, TEXT]],
+                           "times": [[1, 1], [200, 0]]}}})
 
     def test_merge_single_test_single_run_result_changed(self):
         # Incremental results has the latest build but results which differ from
@@ -252,18 +503,18 @@
             # Aggregated results
             {"builds": ["2", "1"],
              "tests": {"001.html": {
-                           "results": [[200,"F"],[10,"I"]],
-                           "times": [[200,0],[10,1]]}}},
+                           "results": [[200, TEXT], [10, IMAGE]],
+                           "times": [[200, 0], [10, 1]]}}},
             # Incremental results
             {"builds": ["3"],
              "tests": {"001.html": {
-                           "results": [[1,"I"]],
-                           "times": [[1,1]]}}},
+                           "results": [[1, IMAGE]],
+                           "times": [[1, 1]]}}},
             # Expected results
             {"builds": ["3", "2", "1"],
              "tests": {"001.html": {
-                           "results": [[1,"I"],[200,"F"],[10,"I"]],
-                           "times": [[1,1],[200,0],[10,1]]}}})
+                           "results": [[1, IMAGE], [200, TEXT], [10, IMAGE]],
+                           "times": [[1, 1], [200, 0], [10, 1]]}}})
 
     def test_merge_multiple_tests_single_run(self):
         # All tests have incremental updates.
@@ -271,96 +522,96 @@
             # Aggregated results
             {"builds": ["2", "1"],
              "tests": {"001.html": {
-                           "results": [[200,"F"]],
-                           "times": [[200,0]]},
+                           "results": [[200, TEXT]],
+                           "times": [[200, 0]]},
                        "002.html": {
-                           "results": [[100,"I"]],
-                           "times": [[100,1]]}}},
+                           "results": [[100, IMAGE]],
+                           "times": [[100, 1]]}}},
             # Incremental results
             {"builds": ["3"],
              "tests": {"001.html": {
-                           "results": [[1,"F"]],
-                           "times": [[1,0]]},
+                           "results": [[1, TEXT]],
+                           "times": [[1, 0]]},
                        "002.html": {
-                           "results": [[1,"I"]],
-                           "times": [[1,1]]}}},
+                           "results": [[1, IMAGE]],
+                           "times": [[1, 1]]}}},
             # Expected results
             {"builds": ["3", "2", "1"],
              "tests": {"001.html": {
-                           "results": [[201,"F"]],
-                           "times": [[201,0]]},
+                           "results": [[201, TEXT]],
+                           "times": [[201, 0]]},
                        "002.html": {
-                           "results": [[101,"I"]],
-                           "times": [[101,1]]}}})
+                           "results": [[101, IMAGE]],
+                           "times": [[101, 1]]}}})
 
     def test_merge_multiple_tests_single_run_one_no_result(self):
         self._test_merge(
             # Aggregated results
             {"builds": ["2", "1"],
              "tests": {"001.html": {
-                           "results": [[200,"F"]],
-                           "times": [[200,0]]},
+                           "results": [[200, TEXT]],
+                           "times": [[200, 0]]},
                        "002.html": {
-                           "results": [[100,"I"]],
-                           "times": [[100,1]]}}},
+                           "results": [[100, IMAGE]],
+                           "times": [[100, 1]]}}},
             # Incremental results
             {"builds": ["3"],
              "tests": {"002.html": {
-                           "results": [[1,"I"]],
-                           "times": [[1,1]]}}},
+                           "results": [[1, IMAGE]],
+                           "times": [[1, 1]]}}},
             # Expected results
             {"builds": ["3", "2", "1"],
              "tests": {"001.html": {
-                           "results": [[1,"N"],[200,"F"]],
-                           "times": [[201,0]]},
+                           "results": [[1, NO_DATA], [200, TEXT]],
+                           "times": [[201, 0]]},
                        "002.html": {
-                           "results": [[101,"I"]],
-                           "times": [[101,1]]}}})
+                           "results": [[101, IMAGE]],
+                           "times": [[101, 1]]}}})
 
     def test_merge_single_test_multiple_runs(self):
         self._test_merge(
             # Aggregated results
             {"builds": ["2", "1"],
              "tests": {"001.html": {
-                           "results": [[200,"F"]],
-                           "times": [[200,0]]}}},
+                           "results": [[200, TEXT]],
+                           "times": [[200, 0]]}}},
             # Incremental results
             {"builds": ["4", "3"],
              "tests": {"001.html": {
-                           "results": [[2, "I"]],
-                           "times": [[2,2]]}}},
+                           "results": [[2, IMAGE]],
+                           "times": [[2, 2]]}}},
             # Expected results
             {"builds": ["4", "3", "2", "1"],
              "tests": {"001.html": {
-                           "results": [[2,"I"],[200,"F"]],
-                           "times": [[2,2],[200,0]]}}})
+                           "results": [[2, IMAGE], [200, TEXT]],
+                           "times": [[2, 2], [200, 0]]}}})
 
     def test_merge_multiple_tests_multiple_runs(self):
         self._test_merge(
             # Aggregated results
             {"builds": ["2", "1"],
              "tests": {"001.html": {
-                           "results": [[200,"F"]],
-                           "times": [[200,0]]},
+                           "results": [[200, TEXT]],
+                           "times": [[200, 0]]},
                        "002.html": {
-                           "results": [[10,"Z"]],
-                           "times": [[10,0]]}}},
+                           "results": [[10, IMAGE_PLUS_TEXT]],
+                           "times": [[10, 0]]}}},
             # Incremental results
             {"builds": ["4", "3"],
              "tests": {"001.html": {
-                           "results": [[2, "I"]],
-                           "times": [[2,2]]},
+                           "results": [[2, IMAGE]],
+                           "times": [[2, 2]]},
                        "002.html": {
-                           "results": [[1,"C"]],
-                           "times": [[1,1]]}}},
+                           "results": [[1, CRASH]],
+                           "times": [[1, 1]]}}},
             # Expected results
             {"builds": ["4", "3", "2", "1"],
              "tests": {"001.html": {
-                           "results": [[2,"I"],[200,"F"]],
-                           "times": [[2,2],[200,0]]},
+                           "results": [[2, IMAGE], [200, TEXT]],
+                           "times": [[2, 2], [200, 0]]},
                        "002.html": {
-                           "results": [[1,"C"],[10,"Z"]],
-                           "times": [[1,1],[10,0]]}}})
+                           "results": [[1, CRASH], [10, IMAGE_PLUS_TEXT]],
+                           "times": [[1, 1], [10, 0]]}}})
 
     def test_merge_incremental_result_older_build(self):
         # Test the build in incremental results is older than the most recent
@@ -369,18 +620,18 @@
             # Aggregated results
             {"builds": ["3", "1"],
              "tests": {"001.html": {
-                           "results": [[5,"F"]],
-                           "times": [[5,0]]}}},
+                           "results": [[5, TEXT]],
+                           "times": [[5, 0]]}}},
             # Incremental results
             {"builds": ["2"],
              "tests": {"001.html": {
-                           "results": [[1, "F"]],
-                           "times": [[1,0]]}}},
+                           "results": [[1, TEXT]],
+                           "times": [[1, 0]]}}},
             # Expected no merge happens.
             {"builds": ["2", "3", "1"],
              "tests": {"001.html": {
-                           "results": [[6,"F"]],
-                           "times": [[6,0]]}}})
+                           "results": [[6, TEXT]],
+                           "times": [[6, 0]]}}})
 
     def test_merge_incremental_result_same_build(self):
         # Test the build in incremental results is same as the build in
@@ -389,111 +640,211 @@
             # Aggregated results
             {"builds": ["2", "1"],
              "tests": {"001.html": {
-                           "results": [[5,"F"]],
-                           "times": [[5,0]]}}},
+                           "results": [[5, TEXT]],
+                           "times": [[5, 0]]}}},
             # Incremental results
             {"builds": ["3", "2"],
              "tests": {"001.html": {
-                           "results": [[2, "F"]],
-                           "times": [[2,0]]}}},
+                           "results": [[2, TEXT]],
+                           "times": [[2, 0]]}}},
             # Expected no merge happens.
             {"builds": ["3", "2", "2", "1"],
              "tests": {"001.html": {
-                           "results": [[7,"F"]],
-                           "times": [[7,0]]}}})
+                           "results": [[7, TEXT]],
+                           "times": [[7, 0]]}}})
 
     def test_merge_remove_new_test(self):
         self._test_merge(
             # Aggregated results
             {"builds": ["2", "1"],
              "tests": {"001.html": {
-                           "results": [[199, "F"]],
+                           "results": [[199, TEXT]],
                            "times": [[199, 0]]},
                        }},
             # Incremental results
             {"builds": ["3"],
              "tests": {"001.html": {
-                           "results": [[1, "F"]],
+                           "results": [[1, TEXT]],
                            "times": [[1, 0]]},
                        "002.html": {
-                           "results": [[1, "P"]],
+                           "results": [[1, PASS]],
+                           "times": [[1, 0]]},
+                       "notrun.html": {
+                           "results": [[1, NOTRUN]],
                            "times": [[1, 0]]},
                        "003.html": {
-                           "results": [[1, "N"]],
+                           "results": [[1, NO_DATA]],
                            "times": [[1, 0]]},
-                       "004.html": {
-                           "results": [[1, "X"]],
-                           "times": [[1, 0]]},
-                       }},
+                        }},
             # Expected results
             {"builds": ["3", "2", "1"],
              "tests": {"001.html": {
-                           "results": [[200, "F"]],
+                           "results": [[200, TEXT]],
                            "times": [[200, 0]]},
                        }},
             max_builds=200)
 
-
     def test_merge_remove_test(self):
         self._test_merge(
             # Aggregated results
-            {"builds": ["2", "1"],
-             "tests": {"001.html": {
-                           "results": [[200,"P"]],
-                           "times": [[200,0]]},
-                       "002.html": {
-                           "results": [[10,"F"]],
-                           "times": [[10,0]]},
-                       "003.html": {
-                           "results": [[190, 'X'], [9, 'N'], [1,"F"]],
-                           "times": [[200,0]]},
-                       }},
+            {
+                "builds": ["2", "1"],
+                "tests": {
+                    "directory": {
+                        "directory": {
+                            "001.html": {
+                                "results": [[200, PASS]],
+                                "times": [[200, 0]]
+                            }
+                        }
+                    },
+                    "002.html": {
+                        "results": [[10, TEXT]],
+                        "times": [[10, 0]]
+                    },
+                    "003.html": {
+                        "results": [[190, PASS], [9, NO_DATA], [1, TEXT]],
+                        "times": [[200, 0]]
+                    },
+                }
+            },
             # Incremental results
-            {"builds": ["3"],
-             "tests": {"001.html": {
-                           "results": [[1,"P"]],
-                           "times": [[1,0]]},
-                       "002.html": {
-                           "results": [[1,"P"]],
-                           "times": [[1,0]]},
-                       "003.html": {
-                           "results": [[1,"P"]],
-                           "times": [[1,0]]},
-                       }},
+            {
+                "builds": ["3"],
+                "tests": {
+                    "directory": {
+                        "directory": {
+                            "001.html": {
+                                "results": [[1, PASS]],
+                                "times": [[1, 0]]
+                            }
+                        }
+                    },
+                    "002.html": {
+                        "results": [[1, PASS]],
+                        "times": [[1, 0]]
+                    },
+                    "003.html": {
+                        "results": [[1, PASS]],
+                        "times": [[1, 0]]
+                    },
+                }
+            },
             # Expected results
-            {"builds": ["3", "2", "1"],
-             "tests": {"002.html": {
-                           "results": [[1,"P"],[10,"F"]],
-                           "times": [[11,0]]}}},
+            {
+                "builds": ["3", "2", "1"],
+                "tests": {
+                    "002.html": {
+                        "results": [[1, PASS], [10, TEXT]],
+                        "times": [[11, 0]]
+                    }
+                }
+            },
             max_builds=200)
 
+    def test_merge_updates_expected(self):
+        self._test_merge(
+            # Aggregated results
+            {
+                "builds": ["2", "1"],
+                "tests": {
+                    "directory": {
+                        "directory": {
+                            "001.html": {
+                                "expected": "FAIL",
+                                "results": [[200, PASS]],
+                                "times": [[200, 0]]
+                            }
+                        }
+                    },
+                    "002.html": {
+                        "bugs": ["crbug.com/1234"],
+                        "expected": "FAIL",
+                        "results": [[10, TEXT]],
+                        "times": [[10, 0]]
+                    },
+                    "003.html": {
+                        "expected": "FAIL",
+                        "results": [[190, PASS], [9, NO_DATA], [1, TEXT]],
+                        "times": [[200, 0]]
+                    },
+                    "004.html": {
+                        "results": [[199, PASS], [1, TEXT]],
+                        "times": [[200, 0]]
+                    },
+                }
+            },
+            # Incremental results
+            {
+                "builds": ["3"],
+                "tests": {
+                    "002.html": {
+                        "expected": "PASS",
+                        "results": [[1, PASS]],
+                        "times": [[1, 0]]
+                    },
+                    "003.html": {
+                        "expected": "TIMEOUT",
+                        "results": [[1, PASS]],
+                        "times": [[1, 0]]
+                    },
+                    "004.html": {
+                        "bugs": ["crbug.com/1234"],
+                        "results": [[1, PASS]],
+                        "times": [[1, 0]]
+                    },
+                }
+            },
+            # Expected results
+            {
+                "builds": ["3", "2", "1"],
+                "tests": {
+                    "002.html": {
+                        "results": [[1, PASS], [10, TEXT]],
+                        "times": [[11, 0]]
+                    },
+                    "003.html": {
+                        "expected": "TIMEOUT",
+                        "results": [[191, PASS], [9, NO_DATA]],
+                        "times": [[200, 0]]
+                    },
+                    "004.html": {
+                        "bugs": ["crbug.com/1234"],
+                        "results": [[200, PASS]],
+                        "times": [[200, 0]]
+                    },
+                }
+            },
+            max_builds=200)
+
+
     def test_merge_keep_test_with_all_pass_but_slow_time(self):
         # Do not remove test where all run pass but max running time >= 5 seconds
         self._test_merge(
             # Aggregated results
             {"builds": ["2", "1"],
              "tests": {"001.html": {
-                           "results": [[200,"P"]],
-                           "times": [[200,jsonresults.JSON_RESULTS_MIN_TIME]]},
+                           "results": [[200, PASS]],
+                           "times": [[200, jsonresults.JSON_RESULTS_MIN_TIME]]},
                        "002.html": {
-                           "results": [[10,"F"]],
-                           "times": [[10,0]]}}},
+                           "results": [[10, TEXT]],
+                           "times": [[10, 0]]}}},
             # Incremental results
             {"builds": ["3"],
              "tests": {"001.html": {
-                           "results": [[1,"P"]],
-                           "times": [[1,1]]},
+                           "results": [[1, PASS]],
+                           "times": [[1, 1]]},
                        "002.html": {
-                           "results": [[1,"P"]],
-                           "times": [[1,0]]}}},
+                           "results": [[1, PASS]],
+                           "times": [[1, 0]]}}},
             # Expected results
             {"builds": ["3", "2", "1"],
              "tests": {"001.html": {
-                           "results": [[201,"P"]],
-                           "times": [[1,1],[200,jsonresults.JSON_RESULTS_MIN_TIME]]},
+                           "results": [[201, PASS]],
+                           "times": [[1, 1], [200, jsonresults.JSON_RESULTS_MIN_TIME]]},
                        "002.html": {
-                           "results": [[1,"P"],[10,"F"]],
-                           "times": [[11,0]]}}})
+                           "results": [[1, PASS], [10, TEXT]],
+                           "times": [[11, 0]]}}})
 
     def test_merge_prune_extra_results(self):
         # Remove items from test results and times that exceed the max number
@@ -503,18 +854,18 @@
             # Aggregated results
             {"builds": ["2", "1"],
              "tests": {"001.html": {
-                           "results": [[max_builds,"F"],[1,"I"]],
-                           "times": [[max_builds,0],[1,1]]}}},
+                           "results": [[max_builds, TEXT], [1, IMAGE]],
+                           "times": [[max_builds, 0], [1, 1]]}}},
             # Incremental results
             {"builds": ["3"],
              "tests": {"001.html": {
-                           "results": [[1,"T"]],
-                           "times": [[1,1]]}}},
+                           "results": [[1, TIMEOUT]],
+                           "times": [[1, 1]]}}},
             # Expected results
             {"builds": ["3", "2", "1"],
              "tests": {"001.html": {
-                           "results": [[1,"T"],[max_builds,"F"]],
-                           "times": [[1,1],[max_builds,0]]}}})
+                           "results": [[1, TIMEOUT], [max_builds, TEXT]],
+                           "times": [[1, 1], [max_builds, 0]]}}})
 
     def test_merge_prune_extra_results_small(self):
         # Remove items from test results and times that exceed the max number
@@ -524,18 +875,18 @@
             # Aggregated results
             {"builds": ["2", "1"],
              "tests": {"001.html": {
-                           "results": [[max_builds,"F"],[1,"I"]],
-                           "times": [[max_builds,0],[1,1]]}}},
+                           "results": [[max_builds, TEXT], [1, IMAGE]],
+                           "times": [[max_builds, 0], [1, 1]]}}},
             # Incremental results
             {"builds": ["3"],
              "tests": {"001.html": {
-                           "results": [[1,"T"]],
-                           "times": [[1,1]]}}},
+                           "results": [[1, TIMEOUT]],
+                           "times": [[1, 1]]}}},
             # Expected results
             {"builds": ["3", "2", "1"],
              "tests": {"001.html": {
-                           "results": [[1,"T"],[max_builds,"F"]],
-                           "times": [[1,1],[max_builds,0]]}}},
+                           "results": [[1, TIMEOUT], [max_builds, TEXT]],
+                           "times": [[1, 1], [max_builds, 0]]}}},
             int(max_builds))
 
     def test_merge_prune_extra_results_with_new_result_of_same_type(self):
@@ -546,93 +897,67 @@
             # Aggregated results
             {"builds": ["2", "1"],
              "tests": {"001.html": {
-                           "results": [[max_builds,"F"],[1,"N"]],
-                           "times": [[max_builds,0],[1,1]]}}},
+                           "results": [[max_builds, TEXT], [1, NO_DATA]],
+                           "times": [[max_builds, 0], [1, 1]]}}},
             # Incremental results
             {"builds": ["3"],
              "tests": {"001.html": {
-                           "results": [[1,"F"]],
-                           "times": [[1,0]]}}},
+                           "results": [[1, TEXT]],
+                           "times": [[1, 0]]}}},
             # Expected results
             {"builds": ["3", "2", "1"],
              "tests": {"001.html": {
-                           "results": [[max_builds,"F"]],
-                           "times": [[max_builds,0]]}}},
+                           "results": [[max_builds, TEXT]],
+                           "times": [[max_builds, 0]]}}},
             int(max_builds))
 
-    # FIXME: Some data got corrupted and has results and times at the directory level.
-    # Once we've purged this from all the data, we should throw an error on this case.
-    def test_merge_directory_hierarchy_extra_results_and_times(self):
-        self._test_merge(
-            # Aggregated results
-            {"builds": ["2", "1"],
-             "tests": {"baz": {
-                            "003.html": {
-                                "results": [[25,"F"]],
-                                "times": [[25,0]]}},
-                        "results": [[25,"F"]],
-                        "times": [[25,0]]}},
-             # Incremental results
-             {"builds": ["3"],
-             "tests": {"baz": {
-                            "003.html": {
-                                "results": [[1,"F"]],
-                                "times": [[1,0]]}}}},
-             # Expected results
-             {"builds": ["3", "2", "1"],
-             "tests": {"baz": {
-                            "003.html": {
-                                "results": [[26,"F"]],
-                                "times": [[26,0]]}}},
-              "version": 4})
-
     def test_merge_build_directory_hierarchy(self):
         self._test_merge(
             # Aggregated results
             {"builds": ["2", "1"],
              "tests": {"bar": {"baz": {
                            "003.html": {
-                                "results": [[25,"F"]],
-                                "times": [[25,0]]}}},
+                                "results": [[25, TEXT]],
+                                "times": [[25, 0]]}}},
                        "foo": {
                            "001.html": {
-                                "results": [[50,"F"]],
-                                "times": [[50,0]]},
+                                "results": [[50, TEXT]],
+                                "times": [[50, 0]]},
                            "002.html": {
-                                "results": [[100,"I"]],
-                                "times": [[100,0]]}}},
+                                "results": [[100, IMAGE]],
+                                "times": [[100, 0]]}}},
               "version": 4},
             # Incremental results
             {"builds": ["3"],
              "tests": {"baz": {
                            "004.html": {
-                               "results": [[1,"I"]],
-                               "times": [[1,0]]}},
+                               "results": [[1, IMAGE]],
+                               "times": [[1, 0]]}},
                        "foo": {
                            "001.html": {
-                               "results": [[1,"F"]],
-                               "times": [[1,0]]},
+                               "results": [[1, TEXT]],
+                               "times": [[1, 0]]},
                            "002.html": {
-                               "results": [[1,"I"]],
-                               "times": [[1,0]]}}},
+                               "results": [[1, IMAGE]],
+                               "times": [[1, 0]]}}},
              "version": 4},
             # Expected results
             {"builds": ["3", "2", "1"],
              "tests": {"bar": {"baz": {
                            "003.html": {
-                               "results": [[1,"N"],[25,"F"]],
-                               "times": [[26,0]]}}},
+                               "results": [[1, NO_DATA], [25, TEXT]],
+                               "times": [[26, 0]]}}},
                        "baz": {
                            "004.html": {
-                               "results": [[1,"I"]],
-                               "times": [[1,0]]}},
+                               "results": [[1, IMAGE]],
+                               "times": [[1, 0]]}},
                        "foo": {
                            "001.html": {
-                               "results": [[51,"F"]],
-                               "times": [[51,0]]},
+                               "results": [[51, TEXT]],
+                               "times": [[51, 0]]},
                            "002.html": {
-                               "results": [[101,"I"]],
-                               "times": [[101,0]]}}},
+                               "results": [[101, IMAGE]],
+                               "times": [[101, 0]]}}},
              "version": 4})
 
     # FIXME(aboxhall): Add some tests for xhtml/svg test results.
@@ -648,49 +973,49 @@
             {"builds": ["3", "2", "1"],
              "tests": {"foo": {
                            "001.html": {
-                               "results": [[200,"P"]],
-                               "times": [[200,0]]},
-                           "results": [[1,"N"]],
-                           "times": [[1,0]]},
+                               "results": [[200, PASS]],
+                               "times": [[200, 0]]},
+                           "results": [[1, NO_DATA]],
+                           "times": [[1, 0]]},
                        "002.html": {
-                           "results": [[10,"F"]],
-                           "times": [[10,0]]}}},
+                           "results": [[10, TEXT]],
+                           "times": [[10, 0]]}}},
             # Expected results
-            {"foo": {"001.html":{}}, "002.html":{}})
+            {"foo": {"001.html": {}}, "002.html": {}})
 
     def test_gtest(self):
         self._test_merge(
             # Aggregated results
             {"builds": ["2", "1"],
              "tests": {"foo.bar": {
-                           "results": [[50,"F"]],
-                           "times": [[50,0]]},
+                           "results": [[50, TEXT]],
+                           "times": [[50, 0]]},
                        "foo.bar2": {
-                           "results": [[100,"I"]],
-                           "times": [[100,0]]},
+                           "results": [[100, IMAGE]],
+                           "times": [[100, 0]]},
                        },
              "version": 3},
             # Incremental results
             {"builds": ["3"],
              "tests": {"foo.bar2": {
-                           "results": [[1,"I"]],
-                           "times": [[1,0]]},
+                           "results": [[1, IMAGE]],
+                           "times": [[1, 0]]},
                        "foo.bar3": {
-                           "results": [[1,"F"]],
-                           "times": [[1,0]]},
+                           "results": [[1, TEXT]],
+                           "times": [[1, 0]]},
                        },
              "version": 4},
             # Expected results
             {"builds": ["3", "2", "1"],
              "tests": {"foo.bar": {
-                           "results": [[1, "N"], [50,"F"]],
-                           "times": [[51,0]]},
+                           "results": [[1, NO_DATA], [50, TEXT]],
+                           "times": [[51, 0]]},
                        "foo.bar2": {
-                           "results": [[101,"I"]],
-                           "times": [[101,0]]},
+                           "results": [[101, IMAGE]],
+                           "times": [[101, 0]]},
                        "foo.bar3": {
-                           "results": [[1,"F"]],
-                           "times": [[1,0]]},
+                           "results": [[1, TEXT]],
+                           "times": [[1, 0]]},
                        },
              "version": 4})
 
diff --git a/Tools/TestResultServer/static-dashboards/aggregate_results.html b/Tools/TestResultServer/static-dashboards/aggregate_results.html
index 2790ddc..a949633 100644
--- a/Tools/TestResultServer/static-dashboards/aggregate_results.html
+++ b/Tools/TestResultServer/static-dashboards/aggregate_results.html
@@ -49,6 +49,22 @@
     margin-right: 5px;
     padding: 2px;
 }
+table {
+    margin-bottom: 1em;
+}
+td {
+    white-space: nowrap;
+}
+td:first-child {
+    font-weight: bold;
+    font-size: 90%;
+}
+tr:nth-child(odd) {
+    background-color: #eeeeee;
+}
+tr:nth-child(even) {
+    background-color: #e0eaf1;
+}
 </style>
 <script src="builders.js"></script>
 <script src="loader.js"></script>
diff --git a/Tools/TestResultServer/static-dashboards/aggregate_results.js b/Tools/TestResultServer/static-dashboards/aggregate_results.js
index 4400302..dd2535c 100644
--- a/Tools/TestResultServer/static-dashboards/aggregate_results.js
+++ b/Tools/TestResultServer/static-dashboards/aggregate_results.js
@@ -36,7 +36,11 @@
 
 function generatePage(historyInstance)
 {
-    var html = ui.html.testTypeSwitcher(true) + '<br>';
+    var html = ui.html.testTypeSwitcher(true);
+    html += '<div>' +
+        ui.html.checkbox('rawValues', 'Show raw values', g_history.dashboardSpecificState.rawValues) +
+        ui.html.checkbox('showOutliers', 'Show outliers', g_history.dashboardSpecificState.showOutliers) +
+    '</div>';
     for (var builder in currentBuilders())
         html += htmlForBuilder(builder);
     document.body.innerHTML = html;
@@ -46,6 +50,7 @@
 {
     switch(key) {
     case 'rawValues':
+    case 'showOutliers':
         historyInstance.dashboardSpecificState[key] = value == 'true';
         return true;
 
@@ -55,10 +60,10 @@
 }
 
 var defaultDashboardSpecificStateValues = {
-    rawValues: false
+    rawValues: false,
+    showOutliers: true
 };
 
-
 var aggregateResultsConfig = {
     defaultStateValues: defaultDashboardSpecificStateValues,
     generatePage: generatePage,
@@ -69,53 +74,44 @@
 var g_history = new history.History(aggregateResultsConfig);
 g_history.parseCrossDashboardParameters();
 
+g_totalFailureCounts = {};
+
+function totalFailureCountFor(builder)
+{
+    if (!g_totalFailureCounts[builder])
+        g_totalFailureCounts[builder] = getTotalTestCounts(g_resultsByBuilder[builder][FAILURES_BY_TYPE_KEY]);
+    return g_totalFailureCounts[builder];
+}
+
 function htmlForBuilder(builder)
 {
-    var results = g_resultsByBuilder[builder];
-    // Some keys were added later than others, so they don't have as many
-    // builds. Use the shortest.
-    // FIXME: Once 500 runs have finished, we can get rid of passing this
-    // around and just assume all keys have the same number of builders for a
-    // given builder.
-    var numColumns = results[ALL_FIXABLE_COUNT_KEY].length;
     var html = '<div class=container><h2>' + builder + '</h2>';
 
-    if (g_history.dashboardSpecificState.rawValues)
-        html += rawValuesHTML(results, numColumns);
-    else {
+    if (g_history.dashboardSpecificState.rawValues) {
+        html += htmlForTestType(builder);
+    } else {
         html += '<a href="timeline_explorer.html' + (location.hash ? location.hash + '&' : '#') + 'builder=' + builder + '">' +
-            chartHTML(results, numColumns) + '</a>';
+            chartHTML(builder) + '</a>';
     }
 
-    html += '</div>';
-    return html;
+    return html + '</div>';
 }
 
-function rawValuesHTML(results, numColumns)
+function chartHTML(builder)
 {
-    var html = htmlForSummaryTable(results, numColumns) +
-        htmlForTestType(results, FIXABLE_COUNTS_KEY, FIXABLE_DESCRIPTION, numColumns);
-    if (g_history.isLayoutTestResults()) {
-        html += htmlForTestType(results, DEFERRED_COUNTS_KEY, DEFERRED_DESCRIPTION, numColumns) +
-            htmlForTestType(results, WONTFIX_COUNTS_KEY, WONTFIX_DESCRIPTION, numColumns);
-    }
-    return html;
-}
-
-function chartHTML(results, numColumns)
-{
+    var results = g_resultsByBuilder[builder];
+    var totalFailingTests = totalFailureCountFor(builder).totalFailingTests;
     var shouldShowBlinkRevisions = isTipOfTreeWebKitBuilder();
     var revisionKey = shouldShowBlinkRevisions ? BLINK_REVISIONS_KEY : CHROME_REVISIONS_KEY;
-    var startRevision = results[revisionKey][numColumns - 1];
+    var startRevision = results[revisionKey][totalFailingTests.length - 1];
     var endRevision = results[revisionKey][0];
-    var revisionLabel = shouldShowBlinkRevisions ? "WebKit Revision" : "Chromium Revision";
+    var revisionLabel = shouldShowBlinkRevisions ? "Blink Revision" : "Chromium Revision";
 
-    var fixable = results[FIXABLE_COUNT_KEY].slice(0, numColumns);
-    var html = chart("Total failing", {"": fixable}, revisionLabel, startRevision, endRevision);
+    var html = chart("Total failing", {"": totalFailingTests}, revisionLabel, startRevision, endRevision);
 
-    var values = valuesPerExpectation(results[FIXABLE_COUNTS_KEY], numColumns);
+    var values = results[FAILURES_BY_TYPE_KEY];
     // Don't care about number of passes for the charts.
-    delete(values['P']);
+    delete(values[PASS]);
 
     return html + chart("Detailed breakdown", values, revisionLabel, startRevision, endRevision);
 }
@@ -124,23 +120,25 @@
 
 // FIXME: Find a better way to exclude outliers. This is just so we exclude
 // runs where every test failed.
-var MAX_VALUE = 10000;
+var MAX_VALUE = 2000;
 
 function filteredValues(values, desiredNumberOfPoints)
 {
     // Filter out values to make the graph a bit more readable and to keep URLs
     // from exceeding the browsers max length restriction.
     var filterAmount = Math.floor(values.length / desiredNumberOfPoints);
-    if (filterAmount < 1)
-        return values;
-
     return values.filter(function(element, index, array) {
-        // Include the most recent and oldest values and exclude outliers.
-        return (index % filterAmount == 0 || index == array.length - 1) && (array[index] < MAX_VALUE && array[index] != 0);
+        if (!g_history.dashboardSpecificState.showOutliers && element > MAX_VALUE)
+            return false;
+        if (filterAmount <= 1)
+            return true;
+        // Include the most recent and oldest values.
+        return index % filterAmount == 0 || index == array.length - 1;
     });
 }
 
-function chartUrl(title, values, revisionLabel, startRevision, endRevision, desiredNumberOfPoints) {
+function chartUrl(title, values, revisionLabel, startRevision, endRevision, desiredNumberOfPoints)
+{
     var maxValue = 0;
     for (var expectation in values)
         maxValue = Math.max(maxValue, Math.max.apply(null, filteredValues(values[expectation], desiredNumberOfPoints)));
@@ -149,22 +147,19 @@
     var labels = '';
     var numLabels = 0;
 
-    var first = true;
     for (var expectation in values) {
-        chartData += (first ? 'e:' : ',') + extendedEncode(filteredValues(values[expectation], desiredNumberOfPoints).reverse(), maxValue);
+        chartData += (chartData ? ',' : 'e:') + extendedEncode(filteredValues(values[expectation], desiredNumberOfPoints).reverse(), maxValue);
 
         if (expectation) {
             numLabels++;
-            labels += (first ? '' : '|') + expectationsMap()[expectation];
+            labels += (labels ? '|' : '') + expectation;
         }
-        first = false;
     }
 
     var url = "http://chart.apis.google.com/chart?cht=lc&chs=600x400&chd=" +
             chartData + "&chg=15,15,1,3&chxt=x,x,y&chxl=1:||" + revisionLabel +
             "|&chxr=0," + startRevision + "," + endRevision + "|2,0," + maxValue + "&chtt=" + title;
 
-
     if (labels)
         url += "&chdl=" + labels + "&chco=" + LABEL_COLORS.slice(0, numLabels).join(',');
     return url;
@@ -187,56 +182,35 @@
 
 function htmlForRevisionRows(results, numColumns)
 {
-    return htmlForTableRow('WebKit Revision', results[BLINK_REVISIONS_KEY].slice(0, numColumns)) +
+    return htmlForTableRow('Blink Revision', results[BLINK_REVISIONS_KEY].slice(0, numColumns)) +
         htmlForTableRow('Chrome Revision', results[CHROME_REVISIONS_KEY].slice(0, numColumns));
 }
 
-function wrapHTMLInTable(description, html)
+function htmlForTestType(builder)
 {
-    return '<h3>' + description + '</h3><table><tbody>' + html + '</tbody></table>';
-}
+    var counts = totalFailureCountFor(builder);
+    var totalFailing = counts.totalFailingTests;
+    var totalTests = counts.totalTests;
 
-function htmlForSummaryTable(results, numColumns)
-{
     var percent = [];
-    var fixable = results[FIXABLE_COUNT_KEY].slice(0, numColumns);
-    var allFixable = results[ALL_FIXABLE_COUNT_KEY].slice(0, numColumns);
-    for (var i = 0; i < numColumns; i++) {
-        var percentage = 100 * (allFixable[i] - fixable[i]) / allFixable[i];
+    for (var i = 0; i < totalTests.length; i++) {
+        var percentage = 100 * (totalTests[i] - totalFailing[i]) / totalTests[i];
         // Round to the nearest tenth of a percent.
         percent.push(Math.round(percentage * 10) / 10 + '%');
     }
-    var html = htmlForRevisionRows(results, numColumns) +
+
+    var results = g_resultsByBuilder[builder];
+    html = '<table><tbody>' +
+        htmlForRevisionRows(results, totalTests.length) +
         htmlForTableRow('Percent passed', percent) +
-        htmlForTableRow('Failures (deduped)', fixable) +
-        htmlForTableRow('Fixable Tests', allFixable);
-    return wrapHTMLInTable('Summary', html);
-}
+        htmlForTableRow('Failures', totalFailing) +
+        htmlForTableRow('Total Tests', totalTests);
 
-function valuesPerExpectation(counts, numColumns)
-{
-    var values = {};
-    for (var i = 0; i < numColumns; i++) {
-        for (var expectation in expectationsMap()) {
-            if (expectation in counts[i]) {
-                var count = counts[i][expectation];
-                if (!values[expectation])
-                    values[expectation] = [];
-                values[expectation].push(count);
-            }
-        }
-    }
-    return values;
-}
-
-function htmlForTestType(results, key, description, numColumns)
-{
-    var counts = results[key];
-    var html = htmlForRevisionRows(results, numColumns);
-    var values = valuesPerExpectation(counts, numColumns);
+    var values = results[FAILURES_BY_TYPE_KEY];
     for (var expectation in values)
-        html += htmlForTableRow(expectationsMap()[expectation], values[expectation]);
-    return wrapHTMLInTable(description, html);
+        html += htmlForTableRow(expectation, values[expectation]);
+
+    return html + '</tbody></table>';
 }
 
 function htmlForTableRow(columnName, values)
diff --git a/Tools/TestResultServer/static-dashboards/aggregate_results_unittest.js b/Tools/TestResultServer/static-dashboards/aggregate_results_unittest.js
new file mode 100644
index 0000000..39d8bfe
--- /dev/null
+++ b/Tools/TestResultServer/static-dashboards/aggregate_results_unittest.js
@@ -0,0 +1,108 @@
+// Copyright (C) 2013 Google Inc. All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//         * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//         * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//         * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+module('aggregate_results');
+
+function setupAggregateResultsData()
+{
+    var historyInstance = new history.History(flakinessConfig);
+    // FIXME(jparent): Remove this once global isn't used.
+    g_history = historyInstance;
+    for (var key in history.DEFAULT_CROSS_DASHBOARD_STATE_VALUES)
+        historyInstance.crossDashboardState[key] = history.DEFAULT_CROSS_DASHBOARD_STATE_VALUES[key];
+
+    var builderName = 'Blink Linux';
+    LOAD_BUILDBOT_DATA([{
+        name: 'ChromiumWebkit',
+        url: 'dummyurl',
+        tests: {'layout-tests': {'builders': [builderName]}}
+    }]);
+    for (var group in LAYOUT_TESTS_BUILDER_GROUPS)
+        LAYOUT_TESTS_BUILDER_GROUPS[group] = null;
+
+    loadBuildersList('@ToT - chromium.org', 'layout-tests');
+
+    g_resultsByBuilder[builderName] = {
+        "num_failures_by_type": {
+            "CRASH": [ 13, 10 ],
+            "MISSING": [ 6, 8 ],
+            "IMAGE+TEXT": [ 17, 17 ],
+            "IMAGE": [ 81, 68 ],
+            "SKIP": [ 1167, 748 ],
+            "TEXT": [ 89, 60 ],
+            "TIMEOUT": [ 72, 48 ],
+            "PASS": [ 28104, 28586 ],
+            "AUDIO": [ 0, 0 ]
+        },
+        blinkRevision: [1234, 1233],
+        chromeRevision: [4567, 4566]
+    }
+    g_totalFailureCounts = {};
+}
+
+test('htmlForBuilder', 1, function() {
+    setupAggregateResultsData();
+    g_history.dashboardSpecificState.rawValues = false;
+
+    var expectedHtml = '<div class=container>' +
+        '<h2>Blink Linux</h2>' +
+        '<a href="timeline_explorer.html#useTestData=true&builder=Blink Linux">' +
+            '<img src="http://chart.apis.google.com/chart?cht=lc&chs=600x400&chd=e:qe..&chg=15,15,1,3&chxt=x,x,y&chxl=1:||Blink Revision|&chxr=0,1233,1234|2,0,1445&chtt=Total failing">' +
+            '<img src="http://chart.apis.google.com/chart?cht=lc&chs=600x400&chd=e:AjAt,AcAV,A7A7,DuEc,pB..,DSE4,CoD8,AAAA&chg=15,15,1,3&chxt=x,x,y&chxl=1:||Blink Revision|&chxr=0,1233,1234|2,0,1167&chtt=Detailed breakdown&chdl=CRASH|MISSING|IMAGE+TEXT|IMAGE|SKIP|TEXT|TIMEOUT|AUDIO&chco=FF0000,00FF00,0000FF,000000,FF6EB4,FFA812,9B30FF,00FFCC">' +
+        '</a>' +
+    '</div>';
+    equal(expectedHtml, htmlForBuilder('Blink Linux'));
+});
+
+test('htmlForBuilderRawResults', 1, function() {
+    setupAggregateResultsData();
+    g_history.dashboardSpecificState.rawValues = true;
+
+    var expectedHtml = '<div class=container>' +
+        '<h2>Blink Linux</h2>' +
+        '<table>' +
+            '<tbody>' +
+                '<tr><td>Blink Revision</td><td>1234</td><td>1233</td></tr>' +
+                '<tr><td>Chrome Revision</td><td>4567</td><td>4566</td></tr>' +
+                '<tr><td>Percent passed</td><td>95.1%</td><td>96.8%</td></tr>' +
+                '<tr><td>Failures</td><td>1445</td><td>959</td></tr>' +
+                '<tr><td>Total Tests</td><td>29549</td><td>29545</td></tr>' +
+                '<tr><td>CRASH</td><td>13</td><td>10</td></tr>' +
+                '<tr><td>MISSING</td><td>6</td><td>8</td></tr>' +
+                '<tr><td>IMAGE+TEXT</td><td>17</td><td>17</td></tr>' +
+                '<tr><td>IMAGE</td><td>81</td><td>68</td></tr>' +
+                '<tr><td>SKIP</td><td>1167</td><td>748</td></tr>' +
+                '<tr><td>TEXT</td><td>89</td><td>60</td></tr>' +
+                '<tr><td>TIMEOUT</td><td>72</td><td>48</td></tr>' +
+                '<tr><td>PASS</td><td>28104</td><td>28586</td></tr>' +
+                '<tr><td>AUDIO</td><td>0</td><td>0</td></tr>' +
+            '</tbody>' +
+        '</table>' +
+    '</div>';
+    equal(expectedHtml, htmlForBuilder('Blink Linux'));
+});
diff --git a/Tools/TestResultServer/static-dashboards/builders_unittests.js b/Tools/TestResultServer/static-dashboards/builders_unittests.js
index 29c02f1..0b2518c 100644
--- a/Tools/TestResultServer/static-dashboards/builders_unittests.js
+++ b/Tools/TestResultServer/static-dashboards/builders_unittests.js
@@ -26,6 +26,8 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+module('builders');
+
 test('loading steps', 4, function() {
     var tests = {}
     var baseUrl = 'http://dummyurl';
diff --git a/Tools/TestResultServer/static-dashboards/dashboard_base.js b/Tools/TestResultServer/static-dashboards/dashboard_base.js
index 6f394a9..cc2fa1b 100644
--- a/Tools/TestResultServer/static-dashboards/dashboard_base.js
+++ b/Tools/TestResultServer/static-dashboards/dashboard_base.js
@@ -26,50 +26,21 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// @fileoverview Base JS file for pages that want to parse the results JSON
-// from the testing bots. This deals with generic utility functions, visible
-// history, popups and appending the script elements for the JSON files.
-
-
-//////////////////////////////////////////////////////////////////////////////
-// CONSTANTS
-//////////////////////////////////////////////////////////////////////////////
-var GTEST_EXPECTATIONS_MAP_ = {
-    'P': 'PASS',
-    'F': 'FAIL',
-    'N': 'NO DATA',
-    'X': 'SKIPPED'
-};
-
-var LAYOUT_TEST_EXPECTATIONS_MAP_ = {
-    'P': 'PASS',
-    'N': 'NO DATA',
-    'X': 'SKIP',
-    'T': 'TIMEOUT',
-    'F': 'TEXT',
-    'C': 'CRASH',
-    'I': 'IMAGE',
-    'Z': 'IMAGE+TEXT',
-    // We used to glob a bunch of expectations into "O" as OTHER. Expectations
-    // are more precise now though and it just means MISSING.
-    'O': 'MISSING'
-};
-
-
 // Keys in the JSON files.
-var WONTFIX_COUNTS_KEY = 'wontfixCounts';
-var FIXABLE_COUNTS_KEY = 'fixableCounts';
-var DEFERRED_COUNTS_KEY = 'deferredCounts';
-var WONTFIX_DESCRIPTION = 'Tests never to be fixed (WONTFIX)';
-var FIXABLE_DESCRIPTION = 'All tests for this release';
-var DEFERRED_DESCRIPTION = 'All deferred tests (DEFER)';
-var FIXABLE_COUNT_KEY = 'fixableCount';
-var ALL_FIXABLE_COUNT_KEY = 'allFixableCount';
+var FAILURES_BY_TYPE_KEY = 'num_failures_by_type';
+var FAILURE_MAP_KEY = 'failure_map';
 var CHROME_REVISIONS_KEY = 'chromeRevision';
 var BLINK_REVISIONS_KEY = 'blinkRevision';
 var TIMESTAMPS_KEY = 'secondsSinceEpoch';
 var BUILD_NUMBERS_KEY = 'buildNumbers';
 var TESTS_KEY = 'tests';
+
+// Failure types.
+var PASS = 'PASS';
+var NO_DATA = 'NO DATA';
+var SKIP = 'SKIP';
+var NOTRUN = 'NOTRUN';
+
 var ONE_DAY_SECONDS = 60 * 60 * 24;
 var ONE_WEEK_SECONDS = ONE_DAY_SECONDS * 7;
 
@@ -122,9 +93,11 @@
     VALUE: 1
 }
 
-function isFailingResult(value)
+var _NON_FAILURE_TYPES = [PASS, NO_DATA, SKIP, NOTRUN];
+
+function isFailingResult(failureMap, failureType)
 {
-    return 'FSTOCIZ'.indexOf(value) != -1;
+    return _NON_FAILURE_TYPES.indexOf(failureMap[failureType]) == -1;
 }
 
 // Generic utility functions.
@@ -176,12 +149,6 @@
 }
 
 var g_resultsByBuilder = {};
-var g_expectationsByPlatform = {};
-
-function isFlakinessDashboard()
-{
-    return string.endsWith(window.location.pathname, 'flakiness_dashboard.html');
-}
 
 // Create a new function with some of its arguements
 // pre-filled.
@@ -202,8 +169,23 @@
     };
 };
 
-// Returns the appropriate expectations map for the current testType.
-function expectationsMap()
+function getTotalTestCounts(failuresByType)
 {
-    return g_history.isLayoutTestResults() ? LAYOUT_TEST_EXPECTATIONS_MAP_ : GTEST_EXPECTATIONS_MAP_;
-}
\ No newline at end of file
+    var countData;
+    for (var failureType in failuresByType) {
+        var failures = failuresByType[failureType];
+        if (countData) {
+            failures.forEach(function(count, index) {
+                countData.totalTests[index] += count;
+                if (failureType != PASS)
+                    countData.totalFailingTests[index] += count;
+            });
+        } else {
+            countData = {
+                totalTests: failures.slice(),
+                totalFailingTests: failures.slice(),
+            };
+        }
+    }
+    return countData;
+}
diff --git a/Tools/TestResultServer/static-dashboards/flakiness_dashboard.js b/Tools/TestResultServer/static-dashboards/flakiness_dashboard.js
index 750cd3a..1cc98e7 100644
--- a/Tools/TestResultServer/static-dashboards/flakiness_dashboard.js
+++ b/Tools/TestResultServer/static-dashboards/flakiness_dashboard.js
@@ -29,109 +29,23 @@
 //////////////////////////////////////////////////////////////////////////////
 // CONSTANTS
 //////////////////////////////////////////////////////////////////////////////
-var ALL = 'ALL';
 var FORWARD = 'forward';
 var BACKWARD = 'backward';
 var GTEST_MODIFIERS = ['FLAKY', 'FAILS', 'MAYBE', 'DISABLED'];
-var TEST_URL_BASE_PATH_IN_VERSION_CONTROL = 'http://src.chromium.org/viewvc/blink/trunk/LayoutTests/';
-var TEST_URL_BASE_PATH = "http://svn.webkit.org/repository/webkit/trunk/LayoutTests/";
-var EXPECTATIONS_URL_BASE_PATH = TEST_URL_BASE_PATH + "platform/";
+var TEST_URL_BASE_PATH_FOR_BROWSING = 'http://src.chromium.org/viewvc/blink/trunk/LayoutTests/';
+var TEST_URL_BASE_PATH_FOR_XHR = 'http://src.chromium.org/blink/trunk/LayoutTests/';
 var TEST_RESULTS_BASE_PATH = 'http://build.chromium.org/f/chromium/layout_test_results/';
 var GPU_RESULTS_BASE_PATH = 'http://chromium-browser-gpu-tests.commondatastorage.googleapis.com/runs/'
 
-var PLATFORMS = {
-    'CHROMIUM': {
-        expectationsDirectory:  null, /* FIXME: cleanup post blink split 'chromium', */
-        subPlatforms: {
-            'LION': { fallbackPlatforms: ['CHROMIUM'] },
-            'SNOWLEOPARD': { fallbackPlatforms: ['CHROMIUM'] },
-            'XP': { fallbackPlatforms: ['CHROMIUM'] },
-            'VISTA': { fallbackPlatforms: ['CHROMIUM'] },
-            'WIN7': { fallbackPlatforms: ['CHROMIUM'] },
-            'LUCID': { fallbackPlatforms: ['CHROMIUM'] },
-            'ANDROID': { fallbackPlatforms: ['CHROMIUM'], expectationsDirectory: null /* 'chromium-android' */ }
-        },
-        platformModifierUnions: {
-            'MAC': ['CHROMIUM_LION', 'CHROMIUM_SNOWLEOPARD'],
-            'WIN': ['CHROMIUM_XP', 'CHROMIUM_VISTA', 'CHROMIUM_WIN7'],
-            'LINUX': ['CHROMIUM_LUCID']
-        }
-    },
-    'APPLE': {
-        subPlatforms: {
-            'MAC': {
-                expectationsDirectory: 'mac',
-                subPlatforms: {
-                    'LION': {
-                        expectationsDirectory: 'mac-lion',
-                        subPlatforms: {
-                            'WK1': { fallbackPlatforms: ['APPLE_MAC_LION', 'APPLE_MAC'] },
-                            'WK2': { fallbackPlatforms: ['APPLE_MAC_LION', 'APPLE_MAC', 'WK2'] }
-                        }
-                    },
-                    'SNOWLEOPARD': {
-                        expectationsDirectory: null,
-                        subPlatforms: {
-                            'WK1': { fallbackPlatforms: ['APPLE_MAC_SNOWLEOPARD', 'APPLE_MAC'] },
-                            'WK2': { fallbackPlatforms: ['APPLE_MAC_SNOWLEOPARD', 'APPLE_MAC', 'WK2'] }
-                        }
-                    }
-                }
-            },
-            'WIN': {
-                expectationsDirectory: 'win',
-                subPlatforms: {
-                    'XP': { fallbackPlatforms: ['APPLE_WIN'] },
-                    'WIN7': { fallbackPlatforms: ['APPLE_WIN'] }
-                }
-            }
-        }
-    },
-    'GTK': {
-        expectationsDirectory: 'gtk',
-        subPlatforms: {
-            'LINUX': {
-                subPlatforms: {
-                    'WK1': { fallbackPlatforms: ['GTK'] },
-                    'WK2': { fallbackPlatforms: ['GTK', 'WK2'], expectationsDirectory: 'gtk-wk2' }
-                }
-            }
-        }
-    },
-    'QT': {
-        expectationsDirectory: 'qt',
-        subPlatforms: {
-            'LINUX': { fallbackPlatforms: ['QT'] }
-        }
-    },
-    'EFL': {
-        expectationsDirectory: 'efl',
-        subPlatforms: {
-            'LINUX': {
-                subPlatforms: {
-                    'WK1': { fallbackPlatforms: ['EFL'], expectationsDirectory: 'efl-wk1' },
-                    'WK2': { fallbackPlatforms: ['EFL', 'WK2'], expectationsDirectory: 'efl-wk2' }
-                }
-            }
-        }
-    },
-    'WK2': {
-        basePlatform: true,
-        expectationsDirectory: 'wk2'
-    }
-};
-
-var BUILD_TYPES = {'DEBUG': 'DBG', 'RELEASE': 'RELEASE'};
-var MIN_SECONDS_FOR_SLOW_TEST = 4;
-var MIN_SECONDS_FOR_SLOW_TEST_DEBUG = 2 * MIN_SECONDS_FOR_SLOW_TEST;
-var FAIL_RESULTS = ['IMAGE', 'IMAGE+TEXT', 'TEXT', 'MISSING'];
+var RELEASE_TIMEOUT = 6;
+var DEBUG_TIMEOUT = 12;
+var SLOW_MULTIPLIER = 5;
 var CHUNK_SIZE = 25;
-var MAX_RESULTS = 1500;
 
 // FIXME: Figure out how to make this not be hard-coded.
 var VIRTUAL_SUITES = {
-    'platform/chromium/virtual/gpu/fast/canvas': 'fast/canvas',
-    'platform/chromium/virtual/gpu/canvas/philip': 'canvas/philip'
+    'virtual/gpu/fast/canvas': 'fast/canvas',
+    'virtual/gpu/canvas/philip': 'canvas/philip'
 };
 
 var resourceLoader;
@@ -148,8 +62,6 @@
     // result expands to all tests that ever have the given result
     if (historyInstance.dashboardSpecificState.tests || historyInstance.dashboardSpecificState.result)
         generatePageForIndividualTests(individualTests());
-    else if (historyInstance.dashboardSpecificState.expectationsUpdate)
-        generatePageForExpectationsUpdate();
     else
         generatePageForBuilder(historyInstance.dashboardSpecificState.builder || currentBuilderGroup().defaultBuilder());
 
@@ -162,6 +74,7 @@
 function handleValidHashParameter(historyInstance, key, value)
 {
     switch(key) {
+    case 'result':
     case 'tests':
         history.validateParameter(historyInstance.dashboardSpecificState, key, value,
             function() {
@@ -169,18 +82,6 @@
             });
         return true;
 
-    case 'result':
-        value = value.toUpperCase();
-        history.validateParameter(historyInstance.dashboardSpecificState, key, value,
-            function() {
-                for (var result in LAYOUT_TEST_EXPECTATIONS_MAP_) {
-                    if (value == LAYOUT_TEST_EXPECTATIONS_MAP_[result])
-                        return true;
-                }
-                return false;
-            });
-        return true;
-
     case 'builder':
         history.validateParameter(historyInstance.dashboardSpecificState, key, value,
             function() {
@@ -211,7 +112,6 @@
         return true;
 
     case 'resultsHeight':
-    case 'updateIndex':
     case 'revision':
         history.validateParameter(historyInstance.dashboardSpecificState, key, Number(value),
             function() {
@@ -220,17 +120,14 @@
         return true;
 
     case 'showChrome':
-    case 'showCorrectExpectations':
-    case 'showWrongExpectations':
     case 'showExpectations':
     case 'showFlaky':
     case 'showLargeExpectations':
-    case 'legacyExpectationsSemantics':
-    case 'showSkipped':
+    case 'showNonFlaky':
     case 'showSlow':
+    case 'showSkip':
     case 'showUnexpectedPasses':
-    case 'showWontFixSkip':
-    case 'expectationsUpdate':
+    case 'showWontFix':
         historyInstance.dashboardSpecificState[key] = value == 'true';
         return true;
 
@@ -263,18 +160,17 @@
     sortOrder: BACKWARD,
     sortColumn: 'flakiness',
     showExpectations: false,
-    showFlaky: true,
+    // FIXME: Show flaky tests by default if you have a builder picked.
+    // Ideally, we'd fix the dashboard to not pick a default builder and have
+    // you pick one. In the interim, this is a good way to make the default
+    // page load faster since we don't need to generate/layout a large table.
+    showFlaky: false,
     showLargeExpectations: false,
-    legacyExpectationsSemantics: true,
     showChrome: true,
-    showCorrectExpectations: false,
-    showWrongExpectations: false,
-    showWontFixSkip: false,
-    showSlow: false,
-    showSkipped: false,
+    showWontFix: false,
+    showNonFlaky: false,
+    showSkip: false,
     showUnexpectedPasses: false,
-    expectationsUpdate: false,
-    updateIndex: 0,
     resultsHeight: 300,
     revision: null,
     tests: '',
@@ -288,7 +184,6 @@
     'group': 'builder'
 };
 
-
 var flakinessConfig = {
     defaultStateValues: defaultDashboardSpecificStateValues,
     generatePage: generatePage,
@@ -305,35 +200,9 @@
 // GLOBALS
 //////////////////////////////////////////////////////////////////////////////
 
-var g_perBuilderPlatformAndBuildType = {};
 var g_perBuilderFailures = {};
-// Map of builder to arrays of tests that are listed in the expectations file
-// but have for that builder.
-var g_perBuilderWithExpectationsButNoFailures = {};
-// Map of builder to arrays of paths that are skipped. This shows the raw
-// path used in TestExpectations rather than the test path since we
-// don't actually have any data here for skipped tests.
-var g_perBuilderSkippedPaths = {};
 // Maps test path to an array of {builder, testResults} objects.
 var g_testToResultsMap = {};
-// Tests that the user wants to update expectations for.
-var g_confirmedTests = {};
-
-function traversePlatformsTree(callback)
-{
-    function traverse(platformObject, parentPlatform) {
-        Object.keys(platformObject).forEach(function(platformName) {
-            var platform = platformObject[platformName];
-            platformName = parentPlatform ? parentPlatform + platformName : platformName;
-
-            if (platform.subPlatforms)
-                traverse(platform.subPlatforms, platformName + '_');
-            else if (!platform.basePlatform)
-                callback(platform, platformName);
-        });
-    }
-    traverse(PLATFORMS, null);
-}
 
 function createResultsObjectForTest(test, builder)
 {
@@ -344,16 +213,8 @@
         html: '',
         flips: 0,
         slowestTime: 0,
-        slowestNonTimeoutCrashTime: 0,
-        meetsExpectations: true,
-        isWontFixSkip: false,
         isFlaky: false,
-        // Sorted string of missing expectations
-        missing: '',
-        // String of extra expectations (i.e. expectations that never occur).
-        extra: '',
-        modifiers: '',
-        bugs: '',
+        bugs: [],
         expectations : '',
         rawResults: '',
         // List of all the results the test actually has.
@@ -361,79 +222,6 @@
     };
 }
 
-function matchingElement(stringToMatch, elementsMap)
-{
-    for (var element in elementsMap) {
-        if (string.contains(stringToMatch, elementsMap[element]))
-            return element;
-    }
-}
-
-function chromiumPlatform(builderNameUpperCase)
-{
-    if (string.contains(builderNameUpperCase, 'MAC')) {
-        if (string.contains(builderNameUpperCase, '10.7'))
-            return 'CHROMIUM_LION';
-        // The webkit.org 'Chromium Mac Release (Tests)' bot runs SnowLeopard.
-        return 'CHROMIUM_SNOWLEOPARD';
-    }
-    if (string.contains(builderNameUpperCase, 'WIN7'))
-        return 'CHROMIUM_WIN7';
-    if (string.contains(builderNameUpperCase, 'VISTA'))
-        return 'CHROMIUM_VISTA';
-    if (string.contains(builderNameUpperCase, 'WIN') || string.contains(builderNameUpperCase, 'XP'))
-        return 'CHROMIUM_XP';
-    if (string.contains(builderNameUpperCase, 'LINUX'))
-        return 'CHROMIUM_LUCID';
-    if (string.contains(builderNameUpperCase, 'ANDROID'))
-        return 'CHROMIUM_ANDROID';
-    // The interactive bot is XP, but doesn't have an OS in it's name.
-    if (string.contains(builderNameUpperCase, 'INTERACTIVE'))
-        return 'CHROMIUM_XP';
-}
-
-
-function platformAndBuildType(builderName)
-{
-    if (!g_perBuilderPlatformAndBuildType[builderName]) {
-        var builderNameUpperCase = builderName.toUpperCase();
-        
-        var platform = chromiumPlatform(builderNameUpperCase);
-        
-        if (!platform)
-            console.error('Could not resolve platform for builder: ' + builderName);
-
-        var buildType = string.contains(builderNameUpperCase, 'DBG') || string.contains(builderNameUpperCase, 'DEBUG') ? 'DEBUG' : 'RELEASE';
-        g_perBuilderPlatformAndBuildType[builderName] = {platform: platform, buildType: buildType};
-    }
-    return g_perBuilderPlatformAndBuildType[builderName];
-}
-
-function isDebug(builderName)
-{
-    return platformAndBuildType(builderName).buildType == 'DEBUG';
-}
-
-// Returns the expectation string for the given single character result.
-// This string should match the expectations that are put into
-// test_expectations.py.
-//
-// For example, if we start explicitly listing IMAGE result failures,
-// this function should start returning 'IMAGE'.
-function expectationsFileStringForResult(result)
-{
-    // For the purposes of comparing against the expecations of a test,
-    // consider simplified diff failures as just text failures since
-    // the test_expectations file doesn't treat them specially.
-    if (result == 'S')
-        return 'TEXT';
-
-    if (result == 'N')
-        return '';
-
-    return expectationsMap()[result];
-}
-
 var TestTrie = function(builders, resultsByBuilder)
 {
     this._trie = {};
@@ -562,22 +350,6 @@
     return testsArray;
 }
 
-// Returns whether this test's slowest time is above the cutoff for
-// being a slow test.
-function isSlowTest(resultsForTest)
-{
-    var maxTime = isDebug(resultsForTest.builder) ? MIN_SECONDS_FOR_SLOW_TEST_DEBUG : MIN_SECONDS_FOR_SLOW_TEST;
-    return resultsForTest.slowestNonTimeoutCrashTime > maxTime;
-}
-
-// Returns whether this test's slowest time is *well* below the cutoff for
-// being a slow test.
-function isFastTest(resultsForTest)
-{
-    var maxTime = isDebug(resultsForTest.builder) ? MIN_SECONDS_FOR_SLOW_TEST_DEBUG : MIN_SECONDS_FOR_SLOW_TEST;
-    return resultsForTest.slowestNonTimeoutCrashTime < maxTime / 2;
-}
-
 function allTestsWithResult(result)
 {
     processTestRunsForAllBuilders();
@@ -585,7 +357,7 @@
 
     getAllTestsTrie().forEach(function(triePath) {
         for (var i = 0; i < g_testToResultsMap[triePath].length; i++) {
-            if (g_testToResultsMap[triePath][i].actualResults.indexOf(result) != -1) {
+            if (g_testToResultsMap[triePath][i].actualResults.indexOf(result.toUpperCase()) != -1) {
                 retVal.push(triePath);
                 break;
             }
@@ -595,372 +367,6 @@
     return retVal;
 }
 
-
-// Adds all the tests for the given builder to the testMapToPopulate.
-function addTestsForBuilder(builder, testMapToPopulate)
-{
-    var tests = g_resultsByBuilder[builder].tests;
-    for (var test in tests) {
-        testMapToPopulate[test] = true;
-    }
-}
-
-// Map of all tests to true values by platform and build type.
-// e.g. g_allTestsByPlatformAndBuildType['XP']['DEBUG'] will have the union
-// of all tests run on the xp-debug builders.
-var g_allTestsByPlatformAndBuildType = {};
-traversePlatformsTree(function(platform, platformName) {
-    g_allTestsByPlatformAndBuildType[platformName] = {};
-});
-
-// Map of all tests to true values by platform and build type.
-// e.g. g_allTestsByPlatformAndBuildType['WIN']['DEBUG'] will have the union
-// of all tests run on the win-debug builders.
-function allTestsWithSamePlatformAndBuildType(platform, buildType)
-{
-    if (!g_allTestsByPlatformAndBuildType[platform][buildType]) {
-        var tests = {};
-        for (var thisBuilder in currentBuilders()) {
-            var thisBuilderBuildInfo = platformAndBuildType(thisBuilder);
-            if (thisBuilderBuildInfo.buildType == buildType && thisBuilderBuildInfo.platform == platform) {
-                addTestsForBuilder(thisBuilder, tests);
-            }
-        }
-        g_allTestsByPlatformAndBuildType[platform][buildType] = tests;
-    }
-
-    return g_allTestsByPlatformAndBuildType[platform][buildType];
-}
-
-function getExpectations(test, platform, buildType)
-{
-    var testObject = g_allExpectations[test];
-    if (!testObject)
-        return null;
-
-    var platformObject = testObject[platform];
-    if (!platformObject)
-        return null;
-        
-    return platformObject[buildType];
-}
-
-function filterBugs(modifiers)
-{
-    var bugs = modifiers.match(/\b(Bug|webkit.org|crbug.com|code.google.com)\S*/g);
-    if (!bugs)
-        return {bugs: '', modifiers: modifiers};
-    for (var j = 0; j < bugs.length; j++)
-        modifiers = modifiers.replace(bugs[j], '');
-    return {bugs: bugs.join(' '), modifiers: string.collapseWhitespace(string.trimString(modifiers))};
-}
-
-function populateExpectationsData(resultsObject)
-{
-    var buildInfo = platformAndBuildType(resultsObject.builder);
-    var expectations = getExpectations(resultsObject.test, buildInfo.platform, buildInfo.buildType);
-    if (!expectations)
-        return;
-
-    resultsObject.expectations = expectations.expectations;
-    var filteredModifiers = filterBugs(expectations.modifiers);
-    resultsObject.modifiers = filteredModifiers.modifiers;
-    resultsObject.bugs = filteredModifiers.bugs;
-    resultsObject.isWontFixSkip = string.contains(expectations.modifiers, 'WONTFIX') || string.contains(expectations.modifiers, 'SKIP'); 
-}
-
-function platformObjectForName(platformName)
-{
-    var platformsList = platformName.split("_");
-    var platformObject = PLATFORMS[platformsList.shift()];
-    platformsList.forEach(function(platformName) {
-        platformObject = platformObject.subPlatforms[platformName];
-    });
-    return platformObject;
-}
-
-// Data structure to hold the processed expectations.
-// g_allExpectations[testPath][platform][buildType] gets the object that has
-// expectations and modifiers properties for this platform/buildType.
-//
-// platform and buildType both go through fallback sets of keys from most
-// specific key to least specific. For example, on Windows XP, we first
-// check the platform WIN-XP, if there's no such object, we check WIN,
-// then finally we check ALL. For build types, we check the current
-// buildType, then ALL.
-var g_allExpectations;
-
-function getParsedExpectations(data)
-{
-    var expectations = [];
-    var lines = data.split('\n');
-    lines.forEach(function(line) {
-        line = string.trimString(line);
-        if (!line || string.startsWith(line, '#'))
-            return;
-
-        // This code mimics _tokenize_line_using_new_format() in
-        // Tools/Scripts/webkitpy/layout_tests/models/test_expectations.py
-        //
-        // FIXME: consider doing more error checking here.
-        //
-        // FIXME: Clean this all up once we've fully cut over to the new syntax.
-        var tokens = line.split(/\s+/)
-        var parsed_bugs = [];
-        var parsed_modifiers = [];
-        var parsed_path;
-        var parsed_expectations = [];
-        var state = 'start';
-
-        // This clones _modifier_tokens_list in test_expectations.py.
-        // FIXME: unify with the platforms constants at the top of the file.
-        var modifier_tokens = {
-            'Release': 'RELEASE',
-            'Debug': 'DEBUG',
-            'Mac': 'MAC',
-            'Win': 'WIN',
-            'Linux': 'LINUX',
-            'SnowLeopard': 'SNOWLEOPARD',
-            'Lion': 'LION',
-            'MountainLion': 'MOUNTAINLION',
-            'Win7': 'WIN7',
-            'XP': 'XP',
-            'Vista': 'VISTA',
-            'Android': 'ANDROID',
-        };
-
-        var expectation_tokens = {
-            'Crash': 'CRASH',
-            'Failure': 'FAIL',
-            'ImageOnlyFailure': 'IMAGE',
-            'Missing': 'MISSING',
-            'Pass': 'PASS',
-            'Rebaseline': 'REBASELINE',
-            'Skip': 'SKIP',
-            'Slow': 'SLOW',
-            'Timeout': 'TIMEOUT',
-            'WontFix': 'WONTFIX',
-        };
-
-        var reachedEol = false;
-
-        // States
-        // - start: Next tokens are bugs or a path.
-        // - modifier: Parsed bugs and a '['. Next token is a modifier.
-        // - path: Parsed modifiers and a ']'. Next token is a path.
-        // - path_found: Parsed a path. Next token is '[' or EOL.
-        // - expectations: Parsed a path and a '['. Next tokens are
-        //                 expectations.
-        // - done: Parsed expectations and a ']'. Next is EOL.
-        // - error: Error occurred. Ignore this line.
-        tokens.forEach(function(token) {
-          if (reachedEol)
-              return;
-
-          if (state == 'start' &&
-              (token.indexOf('Bug') == 0 ||
-               token.indexOf('webkit.org') == 0 ||
-               token.indexOf('crbug.com') == 0 ||
-               token.indexOf('code.google.com') == 0)) {
-              parsed_bugs.push(token);
-          } else if (token == '[') {
-              if (state == 'start') {
-                  state = 'modifier';
-              } else if (state == 'path_found') {
-                  state = 'expectations';
-              } else {
-                  console.error('Unexpected \'[\' (state = ' + state + '): ' + line);
-                  state = 'error';
-                  return;
-              }
-          } else if (token == ']') {
-              if (state == 'modifier') {
-                  state = 'path';
-              } else if (state == 'expectations') {
-                  state = 'done';
-              } else {
-                  state = 'error';
-                  return;
-              }
-          } else if (state == 'modifier') {
-              var modifier = modifier_tokens[token];
-              if (!modifier) {
-                  console.error('Unknown modifier: ' + modifier);
-                  state = 'error';
-                  return;
-              }
-              parsed_modifiers.push(modifier);
-          } else if (state == 'expectations') {
-              if (token == 'Rebaseline' || token == 'Skip' || token == 'Slow' || token == 'WontFix') {
-                  parsed_modifiers.push(token.toUpperCase());
-              } else {
-                  var expectation = expectation_tokens[token];
-                  if (!expectation) {
-                      console.error('Unknown expectation: ' + expectation);
-                      state = 'error';
-                      return;
-                  }
-                  parsed_expectations.push(expectation);
-              }
-          } else if (token == '#') {
-              reachedEol = true;
-          } else if (state == 'path' || state == 'start') {
-              parsed_path = token;
-              state = 'path_found';
-          } else {
-              console.error('Unexpected token (state = ' + state + '): ' + token);
-              state = 'error';
-          }
-        });
-
-        if (state != 'path_found' && state != 'done')
-            return;
-
-        if (!parsed_expectations.length) {
-            if (parsed_modifiers.indexOf('Slow') == -1) {
-                parsed_modifiers.push('Skip');
-                parsed_expectations = ['Pass'];
-            }
-        }
-
-        // FIXME: Should we include line number and comment lines here?
-        expectations.push({
-            modifiers: parsed_bugs.concat(parsed_modifiers).join(' '),
-            path: parsed_path,
-            expectations: parsed_expectations.join(' '),
-        });
-    });
-    return expectations;
-}
-
-
-function addTestToAllExpectationsForPlatform(test, platformName, expectations, modifiers)
-{
-    if (!g_allExpectations[test])
-        g_allExpectations[test] = {};
-
-    if (!g_allExpectations[test][platformName])
-        g_allExpectations[test][platformName] = {};
-
-    var allBuildTypes = [];
-    modifiers.split(' ').forEach(function(modifier) {
-        if (modifier in BUILD_TYPES) {
-            allBuildTypes.push(modifier);
-            return;
-        }
-    });
-    if (!allBuildTypes.length)
-        allBuildTypes = Object.keys(BUILD_TYPES);
-
-    allBuildTypes.forEach(function(buildType) {
-        g_allExpectations[test][platformName][buildType] = {modifiers: modifiers, expectations: expectations};
-    });
-}
-
-function processExpectationsForPlatform(platformObject, platformName, expectationsArray)
-{
-    if (!g_allExpectations)
-        g_allExpectations = {};
-
-    if (!expectationsArray)
-        return;
-
-    // Sort the array to hit more specific paths last. More specific
-    // paths (e.g. foo/bar/baz.html) override entries for less-specific ones (e.g. foo/bar).
-    expectationsArray.sort(alphanumericCompare('path'));
-
-    for (var i = 0; i < expectationsArray.length; i++) {
-        var path = expectationsArray[i].path;
-        var modifiers = expectationsArray[i].modifiers;
-        var expectations = expectationsArray[i].expectations;
-
-        var shouldProcessExpectation = false;
-        var hasPlatformModifierUnions = false;
-        if (platformObject.fallbackPlatforms) {
-            platformObject.fallbackPlatforms.forEach(function(fallbackPlatform) {
-                if (shouldProcessExpectation)
-                    return;
-
-                var fallbackPlatformObject = platformObjectForName(fallbackPlatform);
-                if (!fallbackPlatformObject.platformModifierUnions)
-                    return;
-
-                modifiers.split(' ').forEach(function(modifier) {
-                    if (modifier in fallbackPlatformObject.platformModifierUnions) {
-                        hasPlatformModifierUnions = true;
-                        if (fallbackPlatformObject.platformModifierUnions[modifier].indexOf(platformName) != -1)
-                            shouldProcessExpectation = true;
-                    }
-                });
-            });
-        }
-
-        if (!hasPlatformModifierUnions)
-            shouldProcessExpectation = true;
-
-        if (!shouldProcessExpectation)
-            continue;
-
-        getAllTestsTrie().forEach(function(triePath) {
-            addTestToAllExpectationsForPlatform(triePath, platformName, expectations, modifiers);
-        }, path);
-    }
-}
-
-function processExpectations()
-{
-    // FIXME: An expectations-by-platform object should be passed into this function rather than checking
-    // for a global object. That way this function can be better tested and meaningful errors can
-    // be reported when expectations for a given platform are not found in that object.
-    if (!g_expectationsByPlatform)
-        return;
-
-    traversePlatformsTree(function(platform, platformName) {
-        if (platform.fallbackPlatforms) {
-            platform.fallbackPlatforms.forEach(function(fallbackPlatform) {
-                if (fallbackPlatform in g_expectationsByPlatform)
-                    processExpectationsForPlatform(platform, platformName, g_expectationsByPlatform[fallbackPlatform]);
-            });
-        }
-
-        if (platformName in g_expectationsByPlatform)
-            processExpectationsForPlatform(platform, platformName, g_expectationsByPlatform[platformName]);
-    });
-
-    g_expectationsByPlatform = undefined;
-}
-
-function processMissingTestsWithExpectations(builder, platform, buildType)
-{
-    var noFailures = [];
-    var skipped = [];
-
-    var allTestsForPlatformAndBuildType = allTestsWithSamePlatformAndBuildType(platform, buildType);
-    for (var test in g_allExpectations) {
-        var expectations = getExpectations(test, platform, buildType);
-
-        if (!expectations)
-            continue;
-
-        // Test has expectations, but no result in the builders results.
-        // This means it's either SKIP or passes on all builds.
-        if (!allTestsForPlatformAndBuildType[test] && !string.contains(expectations.modifiers, 'WONTFIX')) {
-            if (string.contains(expectations.modifiers, 'SKIP'))
-                skipped.push(test);
-            else if (!expectations.expectations.match(/^\s*PASS\s*$/)) {
-                // Don't show tests expected to always pass. This is used in ways like
-                // the following:
-                // foo/bar = FAIL
-                // foo/bar/baz.html = PASS
-                noFailures.push({test: test, expectations: expectations.expectations, modifiers: expectations.modifiers});
-            }
-        }
-    }
-
-    g_perBuilderSkippedPaths[builder] = skipped.sort();
-    g_perBuilderWithExpectationsButNoFailures[builder] = noFailures.sort();
-}
-
 function processTestResultsForBuilderAsync(builder)
 {
     setTimeout(function() { processTestRunsForBuilder(builder); }, 0);
@@ -982,26 +388,24 @@
         g_perBuilderFailures[builderName] = [];
         return;
     }
-
-    processExpectations();
    
-    var buildInfo = platformAndBuildType(builderName);
-    var platform = buildInfo.platform;
-    var buildType = buildInfo.buildType;
-    processMissingTestsWithExpectations(builderName, platform, buildType);
-
     var failures = [];
     var allTestsForThisBuilder = g_resultsByBuilder[builderName].tests;
 
     for (var test in allTestsForThisBuilder) {
         var resultsForTest = createResultsObjectForTest(test, builderName);
-        populateExpectationsData(resultsForTest);
 
         var rawTest = g_resultsByBuilder[builderName].tests[test];
         resultsForTest.rawTimes = rawTest.times;
         var rawResults = rawTest.results;
         resultsForTest.rawResults = rawResults;
 
+        if (rawTest.expected)
+            resultsForTest.expectations = rawTest.expected;
+
+        if (rawTest.bugs)
+            resultsForTest.bugs = rawTest.bugs;
+
         // FIXME: Switch to resultsByBuild
         var times = resultsForTest.rawTimes;
         var numTimesSeen = 0;
@@ -1019,16 +423,10 @@
             if (rawResults && rawResults[resultsIndex])
                 currentResult = rawResults[resultsIndex][RLE.VALUE];
 
-            var time = times[i][RLE.VALUE]
-
-            // Ignore times for crashing/timeout runs for the sake of seeing if
-            // a test should be marked slow.
-            if (currentResult != 'C' && currentResult != 'T')
-                resultsForTest.slowestNonTimeoutCrashTime = Math.max(resultsForTest.slowestNonTimeoutCrashTime, time);
-            resultsForTest.slowestTime = Math.max(resultsForTest.slowestTime, time);
+            resultsForTest.slowestTime = Math.max(resultsForTest.slowestTime, times[i][RLE.VALUE]);
         }
 
-        processMissingAndExtraExpectations(resultsForTest);
+        determineFlakiness(g_resultsByBuilder[builderName][FAILURE_MAP_KEY], resultsForTest);
         failures.push(resultsForTest);
 
         if (!g_testToResultsMap[test])
@@ -1039,7 +437,7 @@
     g_perBuilderFailures[builderName] = failures;
 }
 
-function processMissingAndExtraExpectations(resultsForTest)
+function determineFlakiness(failureMap, resultsForTest)
 {
     // Heuristic for determining whether expectations apply to a given test:
     // -If a test result happens < MIN_RUNS_FOR_FLAKE, then consider it a flaky
@@ -1051,12 +449,6 @@
     // a few runs, then being fixed or otherwise modified in a non-flaky way.
     var rawResults = resultsForTest.rawResults;
 
-    // If the first result is no-data that means the test is skipped or is
-    // being run on a different builder (e.g. moved from one shard to another).
-    // Ignore these results since we have no real data about what's going on.
-    if (rawResults[0][RLE.VALUE] == 'N')
-        return;
-
     // Only consider flake if it doesn't happen twice in a row.
     var MIN_RUNS_FOR_FLAKE = 2;
     var resultsMap = {}
@@ -1084,109 +476,14 @@
             continue;
         }
 
-        var expectation = expectationsFileStringForResult(result);
+        var expectation = failureMap[result];
         resultsMap[expectation] = true;
         numRealResults++;
     }
 
+    resultsForTest.actualResults = Object.keys(resultsMap);
     resultsForTest.flips = i - 1;
     resultsForTest.isFlaky = numRealResults > 1;
-
-    var missingExpectations = [];
-    var extraExpectations = [];
-
-    if (g_history.isLayoutTestResults()) {
-        var expectationsArray = resultsForTest.expectations ? resultsForTest.expectations.split(' ') : [];
-        extraExpectations = expectationsArray.filter(
-            function(element) {
-                // FIXME: Once all the FAIL lines are removed from
-                // TestExpectations, delete all the legacyExpectationsSemantics
-                // code.
-                if (g_history.dashboardSpecificState.legacyExpectationsSemantics) {
-                    if (element == 'FAIL') {
-                        for (var i = 0; i < FAIL_RESULTS.length; i++) {
-                            if (resultsMap[FAIL_RESULTS[i]])
-                                return false;
-                        }
-                        return true;
-                    }
-                }
-
-                return element && !resultsMap[element] && !string.contains(element, 'BUG');
-            });
-
-        for (var result in resultsMap) {
-            resultsForTest.actualResults.push(result);
-            var hasExpectation = false;
-            for (var i = 0; i < expectationsArray.length; i++) {
-                var expectation = expectationsArray[i];
-                // FIXME: Once all the FAIL lines are removed from
-                // TestExpectations, delete all the legacyExpectationsSemantics
-                // code.
-                if (g_history.dashboardSpecificState.legacyExpectationsSemantics) {
-                    if (expectation == 'FAIL') {
-                        for (var j = 0; j < FAIL_RESULTS.length; j++) {
-                            if (result == FAIL_RESULTS[j]) {
-                                hasExpectation = true;
-                                break;
-                            }
-                        }
-                    }
-                }
-
-                if (result == expectation)
-                    hasExpectation = true;
-
-                if (hasExpectation)
-                    break;
-            }
-            // If we have no expectations for a test and it only passes, then don't
-            // list PASS as a missing expectation. We only want to list PASS if it
-            // flaky passes, so there would be other expectations.
-            if (!hasExpectation && !(!expectationsArray.length && result == 'PASS' && numRealResults == 1))
-                missingExpectations.push(result);
-        }
-
-        // Only highlight tests that take > 2 seconds as needing to be marked as
-        // slow. There are too many tests that take ~2 seconds every couple
-        // hundred runs. It's not worth the manual maintenance effort.
-        // Also, if a test times out, then it should not be marked as slow.
-        var minTimeForNeedsSlow = isDebug(resultsForTest.builder) ? 2 : 1;
-        if (isSlowTest(resultsForTest) && !resultsMap['TIMEOUT'] && (!resultsForTest.modifiers || !string.contains(resultsForTest.modifiers, 'SLOW')))
-            missingExpectations.push('SLOW');
-        else if (isFastTest(resultsForTest) && resultsForTest.modifiers && string.contains(resultsForTest.modifiers, 'SLOW'))
-            extraExpectations.push('SLOW');
-
-        // If there are no missing results or modifiers besides build
-        // type, platform, or bug and the expectations are all extra
-        // that is, extraExpectations - expectations = PASS,
-        // include PASS as extra, since that means this line in
-        // test_expectations can be deleted..
-        if (!missingExpectations.length && !(resultsForTest.modifiers && realModifiers(resultsForTest.modifiers))) {
-            var extraPlusPass = extraExpectations.concat(['PASS']);
-            if (extraPlusPass.sort().toString() == expectationsArray.slice(0).sort().toString())
-                extraExpectations.push('PASS');
-        }
-
-    }
-
-    resultsForTest.meetsExpectations = !missingExpectations.length && !extraExpectations.length;
-    resultsForTest.missing = missingExpectations.sort().join(' ');
-    resultsForTest.extra = extraExpectations.sort().join(' ');
-}
-
-var BUG_URL_PREFIX = '<a href="http://';
-var BUG_URL_POSTFIX = '/$1">crbug.com/$1</a> ';
-var WEBKIT_BUG_URL_POSTFIX = '/$1">webkit.org/b/$1</a> ';
-var INTERNAL_BUG_REPLACE_VALUE = BUG_URL_PREFIX + 'b' + BUG_URL_POSTFIX;
-var EXTERNAL_BUG_REPLACE_VALUE = BUG_URL_PREFIX + 'crbug.com' + BUG_URL_POSTFIX;
-var WEBKIT_BUG_REPLACE_VALUE = BUG_URL_PREFIX + 'webkit.org/b' + WEBKIT_BUG_URL_POSTFIX;
-
-function htmlForBugs(bugs)
-{
-    bugs = bugs.replace(/crbug.com\/(\d+)(\ |$)/g, EXTERNAL_BUG_REPLACE_VALUE);
-    bugs = bugs.replace(/webkit.org\/b\/(\d+)(\ |$)/g, WEBKIT_BUG_REPLACE_VALUE);
-    return bugs;
 }
 
 function linkHTMLToOpenWindow(url, text)
@@ -1200,10 +497,11 @@
 {
     var currentIndex = 0;
     var rawResults = g_resultsByBuilder[builder].tests[testName].results;
+    var failureMap = g_resultsByBuilder[builder][FAILURE_MAP_KEY];
     for (var i = 0; i < rawResults.length; i++) {
         currentIndex += rawResults[i][RLE.LENGTH];
         if (currentIndex > index)
-            return isFailingResult(rawResults[i][RLE.VALUE]);
+            return isFailingResult(failureMap, rawResults[i][RLE.VALUE]);
     }
     console.error('Index exceeds number of results: ' + index);
 }
@@ -1213,11 +511,12 @@
 {
     var rawResults = g_resultsByBuilder[builder].tests[testName].results;
     var buildNumbers = g_resultsByBuilder[builder].buildNumbers;
+    var failureMap = g_resultsByBuilder[builder][FAILURE_MAP_KEY];
     var index = 0;
     var failures = [];
     for (var i = 0; i < rawResults.length; i++) {
         var numResults = rawResults[i][RLE.LENGTH];
-        if (isFailingResult(rawResults[i][RLE.VALUE])) {
+        if (isFailingResult(failureMap, rawResults[i][RLE.VALUE])) {
             for (var j = 0; j < numResults; j++)
                 failures.push(index + j);
         }
@@ -1246,8 +545,10 @@
     var master = builderMaster(builder);
     var buildBasePath = master.logPath(builder, buildNumber);
 
-    html += '<ul><li>' + linkHTMLToOpenWindow(buildBasePath, 'Build log') +
-        '</li><li>Blink: ' + ui.html.blinkRevisionLink(g_resultsByBuilder[builder], index) + '</li>';
+    html += '<ul><li>' + linkHTMLToOpenWindow(buildBasePath, 'Build log');
+
+    if (g_resultsByBuilder[builder][BLINK_REVISIONS_KEY])
+        html += '</li><li>Blink: ' + ui.html.blinkRevisionLink(g_resultsByBuilder[builder], index) + '</li>';
 
     html += '</li><li>Chromium: ' + ui.html.chromiumRevisionLink(g_resultsByBuilder[builder], index) + '</li>';
 
@@ -1264,6 +565,11 @@
     ui.popup.show(e.target, html);
 }
 
+function classNameForFailureString(failure)
+{
+    return failure.replace(/(\+|\ )/, '');
+}
+
 function htmlForTestResults(test)
 {
     var html = '';
@@ -1275,21 +581,18 @@
 
     var indexToReplaceCurrentResult = -1;
     var indexToReplaceCurrentTime = -1;
-    var currentResultArray, currentTimeArray, currentResult, innerHTML, resultString;
     for (var i = 0; i < buildNumbers.length; i++) {
+        var currentResultArray, currentTimeArray, innerHTML, resultString;
+
         if (i > indexToReplaceCurrentResult) {
             currentResultArray = results.shift();
             if (currentResultArray) {
-                currentResult = currentResultArray[RLE.VALUE];
-                // Treat simplified diff failures as just text failures.
-                if (currentResult == 'S')
-                    currentResult = 'F';
+                resultString = g_resultsByBuilder[builder][FAILURE_MAP_KEY][currentResultArray[RLE.VALUE]];
                 indexToReplaceCurrentResult += currentResultArray[RLE.LENGTH];
             } else {
-                currentResult = 'N';
+                resultString = NO_DATA;
                 indexToReplaceCurrentResult += buildNumbers.length;
             }
-            resultString = expectationsFileStringForResult(currentResult);
         }
 
         if (i > indexToReplaceCurrentTime) {
@@ -1304,70 +607,27 @@
             innerHTML = currentTime || '&nbsp;';
         }
 
-        html += '<td title="' + (resultString || 'NO DATA') + '. Click for more info." class="results ' + currentResult +
+        html += '<td title="' + resultString + '. Click for more info." class="results ' + classNameForFailureString(resultString) +
           '" onclick=\'showPopupForBuild(event, "' + builder + '",' + i + ',"' + test.test + '")\'>' + innerHTML;
     }
     return html;
 }
 
-function htmlForTestsWithExpectationsButNoFailures(builder)
+function shouldShowTest(testResult)
 {
-    var tests = g_perBuilderWithExpectationsButNoFailures[builder];
-    var skippedPaths = g_perBuilderSkippedPaths[builder];
-    var showUnexpectedPassesLink =  linkHTMLToToggleState('showUnexpectedPasses', 'tests that have not failed in last ' + g_resultsByBuilder[builder].buildNumbers.length + ' runs');
-    var showSkippedLink = linkHTMLToToggleState('showSkipped', 'skipped tests in TestExpectations');
-    
-    var html = '';
-    if (g_history.isLayoutTestResults() && (tests.length || skippedPaths.length)) {
-        var buildInfo = platformAndBuildType(builder);
-        html += '<h2 style="display:inline-block">Expectations for ' + buildInfo.platform + '-' + buildInfo.buildType + '</h2> ';
-        if (!g_history.dashboardSpecificState.showUnexpectedPasses && tests.length)
-            html += showUnexpectedPassesLink;
-        html += ' ';
-        if (!g_history.dashboardSpecificState.showSkipped && skippedPaths.length)
-            html += showSkippedLink;
-    }
-
-    var open = '<div onclick="selectContents(this)">';
-
-    if (g_history.dashboardSpecificState.showUnexpectedPasses && tests.length) {
-        html += '<div id="passing-tests">' + showUnexpectedPassesLink;
-        for (var i = 0; i < tests.length; i++)
-            html += open + tests[i].test + '</div>';
-        html += '</div>';
-    }
-
-    if (g_history.dashboardSpecificState.showSkipped && skippedPaths.length)
-        html += '<div id="skipped-tests">' + showSkippedLink + open + skippedPaths.join('</div>' + open) + '</div></div>';
-    return html + '<br>';
-}
-
-// Returns whether we should exclude test results from the test table.
-function shouldHideTest(testResult)
-{
-    // For non-layout tests, we always show everything.
     if (!g_history.isLayoutTestResults())
-        return false;
+        return true;
 
-    if (testResult.isWontFixSkip)
-        return !g_history.dashboardSpecificState.showWontFixSkip;
+    if (testResult.expectations == 'WONTFIX')
+        return g_history.dashboardSpecificState.showWontFix;
+
+    if (testResult.expectations == 'SKIP')
+        return g_history.dashboardSpecificState.showSkip;
 
     if (testResult.isFlaky)
-        return !g_history.dashboardSpecificState.showFlaky;
+        return g_history.dashboardSpecificState.showFlaky;
 
-    if (isSlowTest(testResult))
-        return !g_history.dashboardSpecificState.showSlow;
-
-    if (testResult.meetsExpectations)
-        return !g_history.dashboardSpecificState.showCorrectExpectations;
-
-    return !g_history.dashboardSpecificState.showWrongExpectations;
-}
-
-// Sets the browser's selection to the element's contents.
-function selectContents(element)
-{
-    window.getSelection().selectAllChildren(element);
+    return g_history.dashboardSpecificState.showNonFlaky;
 }
 
 function createBugHTML(test)
@@ -1379,12 +639,12 @@
         '[insert probable cause]');
     
     url = 'https://code.google.com/p/chromium/issues/entry?template=Layout%20Test%20Failure&summary=' + title + '&comment=' + description;
-    return '<a href="' + url + '" class="file-bug">FILE BUG</a>';
+    return '<a href="' + url + '">File new bug</a>';
 }
 
 function isCrossBuilderView()
 {
-    return g_history.dashboardSpecificState.tests || g_history.dashboardSpecificState.result || g_history.dashboardSpecificState.expectationsUpdate;
+    return g_history.dashboardSpecificState.tests || g_history.dashboardSpecificState.result;
 }
 
 function tableHeaders(opt_getAll)
@@ -1397,22 +657,28 @@
         headers.push('test');
 
     if (g_history.isLayoutTestResults() || opt_getAll)
-        headers.push('bugs', 'modifiers', 'expectations');
+        headers.push('bugs', 'expectations');
 
     headers.push('slowest run', 'flakiness (numbers are runtimes in seconds)');
     return headers;
 }
 
+function linkifyBugs(bugs)
+{
+    var html = '';
+    bugs.forEach(function(bug) {
+        var bugHtml;
+        if (string.startsWith(bug, 'Bug('))
+            bugHtml = bug;
+        else
+            bugHtml = '<a href="http://' + bug + '">' + bug + '</a>';
+        html += '<div>' + bugHtml + '</div>'
+    });
+    return html;
+}
+
 function htmlForSingleTestRow(test)
 {
-    if (!isCrossBuilderView() && shouldHideTest(test)) {
-        // The innerHTML call is considerably faster if we exclude the rows for
-        // items we're not showing than if we hide them using display:none.
-        // For the crossBuilderView, we want to show all rows the user is
-        // explicitly listing tests to view.
-        return '';
-    }
-
     var headers = tableHeaders();
     var html = '';
     for (var i = 0; i < headers.length; i++) {
@@ -1426,9 +692,8 @@
 
             html += '<tr><td class="' + testCellClassName + '">' + testCellHTML;
         } else if (string.startsWith(header, 'bugs'))
-            html += '<td class=options-container>' + (test.bugs ? htmlForBugs(test.bugs) : createBugHTML(test));
-        else if (string.startsWith(header, 'modifiers'))
-            html += '<td class=options-container>' + test.modifiers;
+            // FIXME: linkify bugs.
+            html += '<td class=options-container>' + (linkifyBugs(test.bugs) || createBugHTML(test));
         else if (string.startsWith(header, 'expectations'))
             html += '<td class=options-container>' + test.expectations;
         else if (string.startsWith(header, 'slowest'))
@@ -1550,243 +815,6 @@
     tests.sort(sortFunctionGetter(resultsProperty, order == BACKWARD));
 }
 
-// Sorts a space separated expectations string in alphanumeric order.
-// @param {string} str The expectations string.
-// @return {string} The sorted string.
-function sortExpectationsString(str)
-{
-    return str.split(' ').sort().join(' ');
-}
-
-function addUpdate(testsNeedingUpdate, test, builderName, missing, extra)
-{
-    if (!testsNeedingUpdate[test])
-        testsNeedingUpdate[test] = {};
-
-    var buildInfo = platformAndBuildType(builderName);
-    var builder = buildInfo.platform + ' ' + buildInfo.buildType;
-    if (!testsNeedingUpdate[test][builder])
-        testsNeedingUpdate[test][builder] = {};
-
-    if (missing)
-        testsNeedingUpdate[test][builder].missing = sortExpectationsString(missing);
-
-    if (extra)
-        testsNeedingUpdate[test][builder].extra = sortExpectationsString(extra);
-}
-
-
-// From a string of modifiers, returns a string of modifiers that
-// are for real result changes, like SLOW, and excludes modifiers
-// that specificy things like platform, build_type, bug.
-// @param {string} modifierString String containing all modifiers.
-// @return {string} String containing only modifiers that effect the results.
-function realModifiers(modifierString)
-{
-    var modifiers = modifierString.split(' ');;
-    return modifiers.filter(function(modifier) {
-        if (modifier in BUILD_TYPES || string.startsWith(modifier, 'BUG'))
-            return false;
-
-        var matchesPlatformOrUnion = false;
-        traversePlatformsTree(function(platform, platformName) {
-            if (matchesPlatformOrUnion)
-                return;
-
-            if (platform.fallbackPlatforms) {
-                platform.fallbackPlatforms.forEach(function(fallbackPlatform) {
-                    if (matchesPlatformOrUnion)
-                        return;
-
-                    var fallbackPlatformObject = platformObjectForName(fallbackPlatform);
-                    if (!fallbackPlatformObject.platformModifierUnions)
-                        return;
-
-                    matchesPlatformOrUnion = modifier in fallbackPlatformObject.subPlatforms || modifier in fallbackPlatformObject.platformModifierUnions;
-                });
-            }
-        });
-
-        return !matchesPlatformOrUnion;
-    }).join(' ');
-}
-
-function generatePageForExpectationsUpdate()
-{
-    // Always show all runs when auto-updating expectations.
-    if (!g_history.crossDashboardState.showAllRuns)
-        g_history.setQueryParameter('showAllRuns', true);
-
-    processTestRunsForAllBuilders();
-    var testsNeedingUpdate = {};
-    for (var test in g_testToResultsMap) {
-        var results = g_testToResultsMap[test];
-        for (var i = 0; i < results.length; i++) {
-            var thisResult = results[i];
-            
-            if (!thisResult.missing && !thisResult.extra)
-                continue;
-
-            var allPassesOrNoDatas = thisResult.rawResults.filter(function (x) { return x[1] != "P" && x[1] != "N"; }).length == 0;
-
-            if (allPassesOrNoDatas)
-                continue;
-
-            addUpdate(testsNeedingUpdate, test, thisResult.builder, thisResult.missing, thisResult.extra);
-        }
-    }
-
-    for (var builder in currentBuilders()) {
-        var tests = g_perBuilderWithExpectationsButNoFailures[builder]
-        for (var i = 0; i < tests.length; i++) {
-            // Anything extra in this case is what is listed in expectations
-            // plus modifiers other than bug, platform, build type.
-            var modifiers = realModifiers(tests[i].modifiers);
-            var extras = tests[i].expectations;
-            extras += modifiers ? ' ' + modifiers : '';
-            addUpdate(testsNeedingUpdate, tests[i].test, builder, null, extras);
-        }
-    }
-
-    // Get the keys in alphabetical order, so it is easy to process groups
-    // of tests.
-    var keys = Object.keys(testsNeedingUpdate).sort();
-    showUpdateInfoForTest(testsNeedingUpdate, keys);
-}
-
-// Show the test results and the json for differing expectations, and
-// allow the user to include or exclude this update.
-//
-// @param {Object} testsNeedingUpdate Tests that need updating.
-// @param {Array.<string>} keys Keys into the testNeedingUpdate object.
-function showUpdateInfoForTest(testsNeedingUpdate, keys)
-{
-    var test = keys[g_history.dashboardSpecificState.updateIndex];
-    document.body.innerHTML = '';
-
-    // FIXME: Make this DOM creation less verbose.
-    var index = document.createElement('div');
-    index.style.cssFloat = 'right';
-    index.textContent = (g_history.dashboardSpecificState.updateIndex + 1) + ' of ' + keys.length + ' tests';
-    document.body.appendChild(index);
-
-    var buttonRegion = document.createElement('div');
-    var includeBtn = document.createElement('input');
-    includeBtn.type = 'button';
-    includeBtn.value = 'include selected';
-    includeBtn.addEventListener('click', partial(handleUpdate, testsNeedingUpdate, keys), false);
-    buttonRegion.appendChild(includeBtn);
-
-    var previousBtn = document.createElement('input');
-    previousBtn.type = 'button';
-    previousBtn.value = 'previous';
-    previousBtn.addEventListener('click',
-        function() {
-          setUpdateIndex(g_history.dashboardSpecificState.updateIndex - 1, testsNeedingUpdate, keys);
-        },
-        false);
-    buttonRegion.appendChild(previousBtn);
-
-    var nextBtn = document.createElement('input');
-    nextBtn.type = 'button';
-    nextBtn.value = 'next';
-    nextBtn.addEventListener('click', partial(nextUpdate, testsNeedingUpdate, keys), false);
-    buttonRegion.appendChild(nextBtn);
-
-    var doneBtn = document.createElement('input');
-    doneBtn.type = 'button';
-    doneBtn.value = 'done';
-    doneBtn.addEventListener('click', finishUpdate, false);
-    buttonRegion.appendChild(doneBtn);
-
-    document.body.appendChild(buttonRegion);
-
-    var updates = testsNeedingUpdate[test];
-    var checkboxes = document.createElement('div');
-    for (var builder in updates) {
-        // Create a checkbox for each builder.
-        var checkboxRegion = document.createElement('div');
-        var checkbox = document.createElement('input');
-        checkbox.type = 'checkbox';
-        checkbox.id = builder;
-        checkbox.checked = true;
-        checkboxRegion.appendChild(checkbox);
-        checkboxRegion.appendChild(document.createTextNode(builder + ' : ' + JSON.stringify(updates[builder])));
-        checkboxes.appendChild(checkboxRegion);
-    }
-    document.body.appendChild(checkboxes);
-
-    var div = document.createElement('div');
-    div.innerHTML = htmlForIndividualTestOnAllBuildersWithResultsLinks(test);
-    document.body.appendChild(div);
-    appendExpectations();
-}
-
-
-// When the user has finished selecting expectations to update, provide them
-// with json to copy over.
-function finishUpdate()
-{
-    document.body.innerHTML = 'The next step is to copy the output below ' +
-        'into a local file and save it.  Then, run<br><code>python ' +
-        'src/webkit/tools/layout_tests/webkitpy/layout_tests/update_expectat' +
-        'ions_from_dashboard.py path/to/local/file</code><br>in order to ' +
-        'update the expectations file.<br><textarea id="results" '+
-        'style="width:600px;height:600px;"> ' +
-        JSON.stringify(g_confirmedTests) + '</textarea>';
-    results.focus();
-    document.execCommand('SelectAll');
-}
-
-// Handle user click on "include selected" button.
-// Includes the tests that are selected and exclude the rest.
-// @param {Object} testsNeedingUpdate Tests that need updating.
-// @param {Array.<string>} keys Keys into the testNeedingUpdate object.
-function handleUpdate(testsNeedingUpdate, keys)
-{
-    var test = keys[g_history.dashboardSpecificState.updateIndex];
-    var updates = testsNeedingUpdate[test];
-    for (var builder in updates) {
-        // Add included tests, and delete excluded tests if
-        // they were previously included.
-        if ($(builder).checked) {
-            if (!g_confirmedTests[test])
-                g_confirmedTests[test] = {};
-            g_confirmedTests[test][builder] = testsNeedingUpdate[test][builder];
-        } else if (g_confirmedTests[test] && g_confirmedTests[test][builder]) {
-            delete g_confirmedTests[test][builder];
-            if (!Object.keys(g_confirmedTests[test]).length)
-                delete g_confirmedTests[test];
-        }
-    }
-    nextUpdate(testsNeedingUpdate, keys);
-}
-
-
-// Move to the next item to update.
-// @param {Object} testsNeedingUpdate Tests that need updating.
-// @param {Array.<string>} keys Keys into the testNeedingUpdate object.
-function nextUpdate(testsNeedingUpdate, keys)
-{
-    setUpdateIndex(g_history.dashboardSpecificState.updateIndex + 1, testsNeedingUpdate, keys);
-}
-
-
-// Advance the index we are updating at.  If we walk over the end
-// or beginning, just loop.
-// @param {string} newIndex The index into the keys to move to.
-// @param {Object} testsNeedingUpdate Tests that need updating.
-// @param {Array.<string>} keys Keys into the testNeedingUpdate object.
-function setUpdateIndex(newIndex, testsNeedingUpdate, keys)
-{
-    if (newIndex == -1)
-        newIndex = keys.length - 1;
-    else if (newIndex == keys.length)
-        newIndex = 0;
-    g_history.setQueryParameter("updateIndex", newIndex);
-    showUpdateInfoForTest(testsNeedingUpdate, keys);
-}
-
 function htmlForIndividualTestOnAllBuilders(test)
 {
     processTestRunsForAllBuilders();
@@ -1932,15 +960,14 @@
     };
 
     var url = base + platformPart + path;
-    if (isImage || !string.startsWith(base, 'http://svn.webkit.org')) {
+    if (isImage) {
         var dummyNode = document.createElement(isImage ? 'img' : 'script');
         dummyNode.src = url;
         dummyNode.onload = function() {
             var item;
             if (isImage) {
                 item = dummyNode;
-                if (string.startsWith(base, 'http://svn.webkit.org'))
-                    maybeAddPngChecksum(item, url);
+                maybeAddPngChecksum(item, url);
             } else {
                 item = document.createElement('iframe');
                 item.src = url;
@@ -2191,27 +1218,6 @@
     container.appendChild(dummyNode);
 }
 
-function buildInfoForRevision(builder, revision)
-{
-    var revisions = g_resultsByBuilder[builder][BLINK_REVISION_KEY];
-    var revisionStart = 0, revisionEnd = 0, buildNumber = 0;
-    for (var i = 0; i < revisions.length; i++) {
-        if (revision > revisions[i]) {
-            revisionStart = revisions[i - 1];
-            revisionEnd = revisions[i];
-            buildNumber = g_resultsByBuilder[builder].buildNumbers[i - 1];
-            break;
-        }
-    }
-
-    if (revisionEnd)
-      revisionEnd++;
-    else
-      revisionEnd = '';
-
-    return {revisionStart: revisionStart, revisionEnd: revisionEnd, buildNumber: buildNumber};
-}
-
 function lookupVirtualTestSuite(test) {
     for (var suite in VIRTUAL_SUITES) {
         if (test.indexOf(suite) != -1)
@@ -2234,15 +1240,15 @@
     var suite = lookupVirtualTestSuite(test);
 
     if (!suite)
-        addExpectationItem(expectationsContainers, expectationsContainer, null, test, TEST_URL_BASE_PATH);
+        addExpectationItem(expectationsContainers, expectationsContainer, null, test, TEST_URL_BASE_PATH_FOR_XHR);
 
     addExpectations(expectationsContainers, expectationsContainer,
-        TEST_URL_BASE_PATH, '', text, png, reftest_html_file, reftest_mismatch_html_file, suite);
+        TEST_URL_BASE_PATH_FOR_XHR, '', text, png, reftest_html_file, reftest_mismatch_html_file, suite);
 
     var fallbacks = allFallbacks();
     for (var i = 0; i < fallbacks.length; i++) {
       var fallback = 'platform/' + fallbacks[i];
-      addExpectations(expectationsContainers, expectationsContainer, TEST_URL_BASE_PATH, fallback, text, png,
+      addExpectations(expectationsContainers, expectationsContainer, TEST_URL_BASE_PATH_FOR_XHR, fallback, text, png,
           reftest_html_file, reftest_mismatch_html_file, suite);
     }
 
@@ -2362,7 +1368,7 @@
             if (g_history.isLayoutTestResults()) {
                 var suite = lookupVirtualTestSuite(test);
                 var base = suite ? baseTest(test, suite) : test;
-                var versionControlUrl = TEST_URL_BASE_PATH_IN_VERSION_CONTROL + base;
+                var versionControlUrl = TEST_URL_BASE_PATH_FOR_BROWSING + base;
                 testNameHtml += '<h2>' + linkHTMLToOpenWindow(versionControlUrl, test) + '</h2>';
             } else
                 testNameHtml += '<h2>' + test + '</h2>';
@@ -2407,18 +1413,17 @@
 function headerForTestTableHtml()
 {
     return '<h2 style="display:inline-block">Failing tests</h2>' +
-        checkBoxToToggleState('showWontFixSkip', 'WONTFIX/SKIP') +
-        checkBoxToToggleState('showCorrectExpectations', 'tests with correct expectations') +
-        checkBoxToToggleState('showWrongExpectations', 'tests with wrong expectations') +
-        checkBoxToToggleState('showFlaky', 'flaky') +
-        checkBoxToToggleState('showSlow', 'slow');
+        checkBoxToToggleState('showFlaky', 'Show flaky') +
+        checkBoxToToggleState('showNonFlaky', 'Show non-flaky') +
+        checkBoxToToggleState('showSkip', 'Show Skip') +
+        checkBoxToToggleState('showWontFix', 'Show WontFix');
 }
 
 function generatePageForBuilder(builderName)
 {
     processTestRunsForBuilder(builderName);
 
-    var results = g_perBuilderFailures[builderName];
+    var results = g_perBuilderFailures[builderName].filter(shouldShowTest);
     sortTests(results, g_history.dashboardSpecificState.sortColumn, g_history.dashboardSpecificState.sortOrder);
 
     var testsHTML = '';
@@ -2428,17 +1433,16 @@
             tableRowsHTML += htmlForSingleTestRow(results[i])
         testsHTML = htmlForTestTable(tableRowsHTML);
     } else {
-        testsHTML = '<div>No tests found. ';
         if (g_history.isLayoutTestResults())
-            testsHTML += 'Try showing tests with correct expectations.</div>';
+            testsHTML += '<div>Fill in one of the text inputs or checkboxes above to show failures.</div>';
         else
-            testsHTML += 'This means no tests have failed!</div>';
+            testsHTML += '<div>No tests have failed!</div>';
     }
 
     var html = htmlForNavBar();
 
     if (g_history.isLayoutTestResults())
-        html += htmlForTestsWithExpectationsButNoFailures(builderName) + headerForTestTableHtml();
+        html += headerForTestTableHtml();
 
     html += '<br>' + testsHTML;
     appendHTML(html);
@@ -2458,7 +1462,6 @@
     showChrome: 1,
     showExpectations: 1,
     showLargeExpectations: 1,
-    legacyExpectationsSemantics: 1,
     resultsHeight: 1,
     revision: 1
 };
@@ -2499,20 +1502,26 @@
 
     var html = '<div id=legend-toggle onclick="hideLegend()">Hide ' +
         'legend [type esc]</div><div id=legend-contents>';
-    for (var expectation in expectationsMap())
-        html += '<div class=' + expectation + '>' + expectationsMap()[expectation] + '</div>';
+
+    // Just grab the first failureMap. Technically, different builders can have different maps if they
+    // haven't all cycled after the map was changed, but meh.
+    var failureMap = g_resultsByBuilder[Object.keys(g_resultsByBuilder)[0]][FAILURE_MAP_KEY];
+    for (var expectation in failureMap) {
+        var failureString = failureMap[expectation];
+        html += '<div class=' + classNameForFailureString(failureString) + '>' + failureString + '</div>';
+    }
 
     if (g_history.isLayoutTestResults()) {
       html += '</div><br style="clear:both">' +
-          '</div><h3>Test expectatons fallback order.</h3>';
+          '</div><h3>Test expectations fallback order.</h3>';
 
       for (var platform in g_fallbacksMap)
           html += '<div class=fallback-header>' + platform + '</div>' + htmlForFallbackHelp(g_fallbacksMap[platform]);
 
-      html += '<div>TIMES:</div>' +
-          htmlForSlowTimes(MIN_SECONDS_FOR_SLOW_TEST) +
-          '<div>DEBUG TIMES:</div>' +
-          htmlForSlowTimes(MIN_SECONDS_FOR_SLOW_TEST_DEBUG);
+      html += '<div>RELEASE TIMEOUTS:</div>' +
+          htmlForSlowTimes(RELEASE_TIMEOUT) +
+          '<div>DEBUG TIMEOUTS:</div>' +
+          htmlForSlowTimes(DEBUG_TIMEOUT);
     }
 
     legend.innerHTML = html;
@@ -2520,9 +1529,8 @@
 
 function htmlForSlowTimes(minTime)
 {
-    return '<ul><li>&lt;1 second == !SLOW</li><li>&gt;1 second && &lt;' +
-        minTime + ' seconds == SLOW || !SLOW is fine</li><li>&gt;' +
-        minTime + ' seconds == SLOW</li></ul>';
+    return '<ul><li>' + minTime + ' seconds</li><li>' +
+        SLOW_MULTIPLIER * minTime + ' seconds if marked Slow in TestExpectations</li></ul>';
 }
 
 function postHeightChangedMessage()
diff --git a/Tools/TestResultServer/static-dashboards/flakiness_dashboard_tests.css b/Tools/TestResultServer/static-dashboards/flakiness_dashboard_tests.css
index 55d912c..edabb5a 100644
--- a/Tools/TestResultServer/static-dashboards/flakiness_dashboard_tests.css
+++ b/Tools/TestResultServer/static-dashboards/flakiness_dashboard_tests.css
@@ -103,36 +103,39 @@
     float: left;
     border: 1px solid grey;
 }
-.P {
+.PASS {
     background-color: #3f3;
 }
-.N {
+.NODATA, .NOTRUN {
     background-color: #fff;
 }
-.X {
+.SKIP {
     background-color: lightgray;
 }
-.C {
+.CRASH {
     background-color: #c90;
 }
-.T {
+.TIMEOUT {
     background-color: #fffc6c;
 }
-.I {
+.IMAGE {
     background-color: #69f;
 }
-.S {
-    background-color: #c6c;
-}
-.F {
+.TEXT {
     background-color: #e98080;
 }
-.O {
+.MISSING {
     background-color: #8a7700;
 }
-.Z {
+.IMAGETEXT {
     background-color: #96f;
 }
+.AUDIO {
+    background-color: lightblue;
+}
+.FLAKY {
+    background-color: turquoise;
+}
 .separator {
     border: 1px solid lightgray;
     height: 0px;
@@ -233,10 +236,6 @@
     right: 0;
     z-index: 1;
 }
-.file-bug {
-    font-weight: bold;
-    font-size: 11px;
-}
 .pngchecksum {
     position: absolute;
     right: 0;
diff --git a/Tools/TestResultServer/static-dashboards/flakiness_dashboard_unittests.js b/Tools/TestResultServer/static-dashboards/flakiness_dashboard_unittests.js
index a7a792b..1cef5fb 100644
--- a/Tools/TestResultServer/static-dashboards/flakiness_dashboard_unittests.js
+++ b/Tools/TestResultServer/static-dashboards/flakiness_dashboard_unittests.js
@@ -26,14 +26,13 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+module('flakiness_dashboard');
+
 // FIXME(jparent): Rename this once it isn't globals.
 function resetGlobals()
 {
     allExpectations = null;
-    allTests = null;
-    g_expectationsByPlatform = {};
     g_resultsByBuilder = {};
-    g_allExpectations = null;
     g_allTestsTrie = null;
     var historyInstance = new history.History(flakinessConfig);
     // FIXME(jparent): Remove this once global isn't used.
@@ -55,346 +54,8 @@
     return historyInstance;
 }
 
-test('getParsedExpectationsCommentLine', 1, function() {
-    var expectations  = getParsedExpectations('# Comment line crbug.com/12345 [ Release ] tests/foobar.html [ Failure ]');
-    equal(expectations.length, 0, 'Number of expectations');
-});
-
-test('getParsedExpectationsSimpleInput', 4, function() {
-    var expectations  = getParsedExpectations('crbug.com/12345 [ Release ] tests/foobar.html [ Failure ]');
-    equal(expectations.length, 1, 'Number of expectations');
-    equal(expectations[0].modifiers, 'crbug.com/12345 RELEASE', 'modifiers');
-    equal(expectations[0].path, 'tests/foobar.html', 'path');
-    equal(expectations[0].expectations, 'FAIL', 'expectations');
-});
-
-test('getParsedExpectationsSimpleInputWithComment', 4, function() {
-    var expectations  = getParsedExpectations('crbug.com/12345 [ Release ] tests/foobar.html [ Failure ] # Comment');
-    equal(expectations.length, 1, 'Number of expectations');
-    equal(expectations[0].modifiers, 'crbug.com/12345 RELEASE', 'modifiers');
-    equal(expectations[0].path, 'tests/foobar.html', 'path');
-    equal(expectations[0].expectations, 'FAIL', 'expectations');
-});
-
-test('getParsedExpectationsOnlyBug', 1, function() {
-    var expectations  = getParsedExpectations('crbug.com/12345');
-    equal(expectations.length, 0, 'Number of expectations');
-});
-
-test('getParsedExpectationsTwoBugs', 4, function() {
-    var expectations  = getParsedExpectations('crbug.com/12345 tests/foobar.html [ Failure ]');
-    equal(expectations.length, 1, 'Number of expectations');
-    equal(expectations[0].modifiers, 'crbug.com/12345', 'modifiers');
-    equal(expectations[0].path, 'tests/foobar.html', 'path');
-    equal(expectations[0].expectations, 'FAIL', 'expectations');
-});
-
-test('getParsedExpectationsNoBug', 4, function() {
-    var expectations  = getParsedExpectations('tests/foobar.html [ Failure ]');
-    equal(expectations.length, 1, 'Number of expectations');
-    equal(expectations[0].modifiers, '', 'modifiers');
-    equal(expectations[0].path, 'tests/foobar.html', 'path');
-    equal(expectations[0].expectations, 'FAIL', 'expectations');
-});
-
-test('getParsedExpectationsBugPrefixInPath', 4, function() {
-    var expectations = getParsedExpectations('Bug12345 Bug67890 tests/Bug.html [ Failure ]');
-    equal(expectations.length, 1, 'Number of expectations');
-    equal(expectations[0].modifiers, 'Bug12345 Bug67890', 'modifiers');
-    equal(expectations[0].path, 'tests/Bug.html', 'path');
-    equal(expectations[0].expectations, 'FAIL', 'expectations');
-});
-
-test('getParsedExpectationsTwoModifiers', 4, function() {
-    var expectations  = getParsedExpectations('crbug.com/12345 [ Release Debug ] tests/foobar.html [ Failure ]');
-    equal(expectations.length, 1, 'Number of expectations');
-    equal(expectations[0].modifiers, 'crbug.com/12345 RELEASE DEBUG', 'modifiers');
-    equal(expectations[0].path, 'tests/foobar.html', 'path');
-    equal(expectations[0].expectations, 'FAIL', 'expectations');
-});
-
-test('getParsedExpectationsUnknownModifier', 1, function() {
-    var expectations  = getParsedExpectations('crbug.com/12345 [ ImaginaryOS ] tests/foobar.html');
-    equal(expectations.length, 0, 'Number of expectations');
-});
-
-test('getParsedExpectationsTwoPaths', 1, function() {
-    var expectations  = getParsedExpectations('crbug.com/12345 tests/foo.html tests/bar.html [ Failure ]');
-    equal(expectations.length, 0, 'Number of expectations');
-});
-
-test('getParsedExpectationsNoPath', 1, function() {
-    var expectations  = getParsedExpectations('crbug.com/12345 [ Failure ]');
-    equal(expectations.length, 0, 'Number of expectations');
-});
-
-test('getParsedExpectationsHashInPath', 1, function() {
-    var expectations = getParsedExpectations('crbug.com/12345 # [ Failure ]');
-    equal(expectations.length, 0, 'Number of expectations');
-});
-
-test('getParsedExpectationsTwoExpectations', 4, function() {
-    expectations  = getParsedExpectations('crbug.com/12345 tests/foobar.html [ Pass Failure ]');
-    equal(expectations.length, 1, 'Number of expectations');
-    equal(expectations[0].modifiers, 'crbug.com/12345', 'modifiers');
-    equal(expectations[0].path, 'tests/foobar.html', 'path');
-    equal(expectations[0].expectations, 'PASS FAIL', 'expectations');
-});
-
-test('getParsedExpectationsNoExpectation', 4, function() {
-    var expectations  = getParsedExpectations('crbug.com/12345 tests/foobar.html');
-    equal(expectations.length, 1, 'Number of expectations');
-    equal(expectations[0].modifiers, 'crbug.com/12345 Skip', 'modifiers');
-    equal(expectations[0].path, 'tests/foobar.html', 'path');
-    equal(expectations[0].expectations, 'Pass', 'expectations');
-});
-
-test('getParsedExpectationsNoExpectationWithComment', 4, function() {
-    var expectations  = getParsedExpectations('crbug.com/12345 tests/foobar.html # Comment');
-    equal(expectations.length, 1, 'Number of expectations');
-    equal(expectations[0].modifiers, 'crbug.com/12345 Skip', 'modifiers');
-    equal(expectations[0].path, 'tests/foobar.html', 'path');
-    equal(expectations[0].expectations, 'Pass', 'expectations');
-});
-
-test('getParsedExpectationsExpectationConversionToModifier', 4, function() {
-    var expectations  = getParsedExpectations('crbug.com/12345 tests/foobar.html [ Rebaseline ]');
-    equal(expectations.length, 1, 'Number of expectations');
-    equal(expectations[0].modifiers, 'crbug.com/12345 REBASELINE Skip', 'modifiers');
-    equal(expectations[0].path, 'tests/foobar.html', 'path');
-    equal(expectations[0].expectations, 'Pass', 'expectations');
-});
-
-test('getParsedExpectationsUnknownExpectation', 1, function() {
-    var expectations  = getParsedExpectations('crbug.com/12345 tests/foobar.html [ PANIC ]');
-    equal(expectations.length, 0, 'Number of expectations');
-});
-
-function stubResultsByBuilder(data)
-{
-    for (var builder in currentBuilders())
-    {
-        g_resultsByBuilder[builder] = data[builder] || {'tests': []};
-    };
-}
-
-function runExpectationsTest(builder, test, expectations, modifiers)
-{
-    // Put in some dummy results. processExpectations expects the test to be
-    // there.
-    var tests = {};
-    tests[test] = {'results': [[100, 'F']], 'times': [[100, 0]]};
-    var results = {};
-    results[builder] = {'tests': tests};
-    stubResultsByBuilder(results);
-
-    processExpectations();
-    var resultsForTest = createResultsObjectForTest(test, builder);
-    populateExpectationsData(resultsForTest);
-
-    var message = 'Builder: ' + resultsForTest.builder + ' test: ' + resultsForTest.test;
-    equal(resultsForTest.expectations, expectations, message);
-    equal(resultsForTest.modifiers, modifiers, message);
-}
-
-test('releaseFail', 2, function() {
-    resetGlobals();
-    loadBuildersList('@ToT - chromium.org', 'layout-tests');
-
-    var builder = 'WebKit Win';
-    var test = 'foo/1.html';
-    var expectationsArray = [
-        {'modifiers': 'RELEASE', 'expectations': 'FAIL'}
-    ];
-    g_expectationsByPlatform['CHROMIUM'] = getParsedExpectations('[ Release ] ' + test + ' [ Failure ]');
-    runExpectationsTest(builder, test, 'FAIL', 'RELEASE');
-});
-
-test('releaseFailDebugCrashReleaseBuilder', 2, function() {
-    resetGlobals();
-    loadBuildersList('@ToT - chromium.org', 'layout-tests');
-    var builder = 'WebKit Win';
-    var test = 'foo/1.html';
-    var expectationsArray = [
-        {'modifiers': 'RELEASE', 'expectations': 'FAIL'},
-        {'modifiers': 'DEBUG', 'expectations': 'CRASH'}
-    ];
-    g_expectationsByPlatform['CHROMIUM'] = getParsedExpectations('[ Release ] ' + test + ' [ Failure ]\n' +
-        '[ Debug ] ' + test + ' [ Crash ]');
-    runExpectationsTest(builder, test, 'FAIL', 'RELEASE');
-});
-
-test('releaseFailDebugCrashDebugBuilder', 2, function() {
-    resetGlobals();
-    loadBuildersList('@ToT - chromium.org', 'layout-tests');
-    var builder = 'WebKit Win (dbg)';
-    var test = 'foo/1.html';
-    var expectationsArray = [
-        {'modifiers': 'RELEASE', 'expectations': 'FAIL'},
-        {'modifiers': 'DEBUG', 'expectations': 'CRASH'}
-    ];
-    g_expectationsByPlatform['CHROMIUM'] = getParsedExpectations('[ Release ] ' + test + ' [ Failure ]\n' +
-        '[ Debug ] ' + test + ' [ Crash ]');
-    runExpectationsTest(builder, test, 'CRASH', 'DEBUG');
-});
-
-test('overrideJustBuildType', 12, function() {
-    resetGlobals();
-    loadBuildersList('@ToT - chromium.org', 'layout-tests');
-    var test = 'bar/1.html';
-    g_expectationsByPlatform['CHROMIUM'] = getParsedExpectations('bar [ WontFix Failure Pass Timeout ]\n' +
-        '[ Mac ] ' + test + ' [ WontFix Failure ]\n' +
-        '[ Linux Debug ] ' + test + ' [ Crash ]');
-    
-    runExpectationsTest('WebKit Win', test, 'FAIL PASS TIMEOUT', 'WONTFIX');
-    runExpectationsTest('WebKit Win (dbg)(3)', test, 'FAIL PASS TIMEOUT', 'WONTFIX');
-    runExpectationsTest('WebKit Linux', test, 'FAIL PASS TIMEOUT', 'WONTFIX');
-    runExpectationsTest('WebKit Linux (dbg)(3)', test, 'CRASH', 'LINUX DEBUG');
-    runExpectationsTest('WebKit Mac10.7', test, 'FAIL', 'MAC WONTFIX');
-    runExpectationsTest('WebKit Mac10.7 (dbg)(3)', test, 'FAIL', 'MAC WONTFIX');
-});
-
-test('platformAndBuildType', 42, function() {
-    var historyInstance = new history.History(flakinessConfig);
-    // FIXME(jparent): Change to use the flakiness_db's history object
-    // once it exists, rather than tracking global.
-    g_history = historyInstance;
-
-    var runPlatformAndBuildTypeTest = function(builder, expectedPlatform, expectedBuildType) {
-        g_perBuilderPlatformAndBuildType = {};
-        buildInfo = platformAndBuildType(builder);
-        var message = 'Builder: ' + builder;
-        equal(buildInfo.platform, expectedPlatform, message);
-        equal(buildInfo.buildType, expectedBuildType, message);
-    }
-    runPlatformAndBuildTypeTest('WebKit Win (deps)', 'CHROMIUM_XP', 'RELEASE');
-    runPlatformAndBuildTypeTest('WebKit Win (deps)(dbg)(1)', 'CHROMIUM_XP', 'DEBUG');
-    runPlatformAndBuildTypeTest('WebKit Win (deps)(dbg)(2)', 'CHROMIUM_XP', 'DEBUG');
-    runPlatformAndBuildTypeTest('WebKit Linux (deps)', 'CHROMIUM_LUCID', 'RELEASE');
-    runPlatformAndBuildTypeTest('WebKit Linux (deps)(dbg)(1)', 'CHROMIUM_LUCID', 'DEBUG');
-    runPlatformAndBuildTypeTest('WebKit Linux (deps)(dbg)(2)', 'CHROMIUM_LUCID', 'DEBUG');
-    runPlatformAndBuildTypeTest('WebKit Mac10.6 (deps)', 'CHROMIUM_SNOWLEOPARD', 'RELEASE');
-    runPlatformAndBuildTypeTest('WebKit Mac10.6 (deps)(dbg)(1)', 'CHROMIUM_SNOWLEOPARD', 'DEBUG');
-    runPlatformAndBuildTypeTest('WebKit Mac10.6 (deps)(dbg)(2)', 'CHROMIUM_SNOWLEOPARD', 'DEBUG');
-    runPlatformAndBuildTypeTest('WebKit Win', 'CHROMIUM_XP', 'RELEASE');
-    runPlatformAndBuildTypeTest('WebKit Win7', 'CHROMIUM_WIN7', 'RELEASE');
-    runPlatformAndBuildTypeTest('WebKit Win (dbg)(1)', 'CHROMIUM_XP', 'DEBUG');
-    runPlatformAndBuildTypeTest('WebKit Win (dbg)(2)', 'CHROMIUM_XP', 'DEBUG');
-    runPlatformAndBuildTypeTest('WebKit Linux', 'CHROMIUM_LUCID', 'RELEASE');
-    runPlatformAndBuildTypeTest('WebKit Linux 32', 'CHROMIUM_LUCID', 'RELEASE');
-    runPlatformAndBuildTypeTest('WebKit Linux (dbg)(1)', 'CHROMIUM_LUCID', 'DEBUG');
-    runPlatformAndBuildTypeTest('WebKit Linux (dbg)(2)', 'CHROMIUM_LUCID', 'DEBUG');
-    runPlatformAndBuildTypeTest('WebKit Mac10.6', 'CHROMIUM_SNOWLEOPARD', 'RELEASE');
-    runPlatformAndBuildTypeTest('WebKit Mac10.6 (dbg)', 'CHROMIUM_SNOWLEOPARD', 'DEBUG');
-    runPlatformAndBuildTypeTest('XP Tests', 'CHROMIUM_XP', 'RELEASE');
-    runPlatformAndBuildTypeTest('Interactive Tests (dbg)', 'CHROMIUM_XP', 'DEBUG');
-});
-
-test('realModifiers', 3, function() {
-    equal(realModifiers('BUG(Foo) LINUX LION WIN DEBUG SLOW'), 'SLOW');
-    equal(realModifiers('BUG(Foo) LUCID MAC XP RELEASE SKIP'), 'SKIP');
-    equal(realModifiers('BUG(Foo)'), '');
-});
-
-test('allTestsWithSamePlatformAndBuildType', 1, function() {
-    // FIXME: test that allTestsWithSamePlatformAndBuildType actually returns the right set of tests.
-    var expectedPlatformsList = ['CHROMIUM_LION', 'CHROMIUM_SNOWLEOPARD', 'CHROMIUM_XP', 'CHROMIUM_VISTA', 'CHROMIUM_WIN7', 'CHROMIUM_LUCID',
-                                 'CHROMIUM_ANDROID', 'APPLE_MAC_LION_WK1', 'APPLE_MAC_LION_WK2', 'APPLE_MAC_SNOWLEOPARD_WK1', 'APPLE_MAC_SNOWLEOPARD_WK2',
-                                 'APPLE_WIN_XP', 'APPLE_WIN_WIN7',  'GTK_LINUX_WK1', 'GTK_LINUX_WK2', 'QT_LINUX', 'EFL_LINUX_WK1', 'EFL_LINUX_WK2'];
-    var actualPlatformsList = Object.keys(g_allTestsByPlatformAndBuildType);
-    deepEqual(expectedPlatformsList, actualPlatformsList);
-});
-
-test('filterBugs',4, function() {
-    var filtered = filterBugs('Skip crbug.com/123 webkit.org/b/123 Slow Bug(Tony) Debug')
-    equal(filtered.modifiers, 'Skip Slow Debug');
-    equal(filtered.bugs, 'crbug.com/123 webkit.org/b/123 Bug(Tony)');
-
-    filtered = filterBugs('Skip Slow Debug')
-    equal(filtered.modifiers, 'Skip Slow Debug');
-    equal(filtered.bugs, '');
-});
-
-test('getExpectations', 16, function() {
-    resetGlobals();
-    loadBuildersList('@ToT - chromium.org', 'layout-tests');
- 
-    stubResultsByBuilder({
-        'WebKit Win' : {
-            'tests': {
-                'foo/test1.html': {'results': [[100, 'F']], 'times': [[100, 0]]},
-                'foo/test2.html': {'results': [[100, 'F']], 'times': [[100, 0]]},
-                'foo/test3.html': {'results': [[100, 'F']], 'times': [[100, 0]]},
-                'test1.html': {'results': [[100, 'F']], 'times': [[100, 0]]}
-            }
-        }
-    });
-
-    g_expectationsByPlatform['CHROMIUM'] = getParsedExpectations('Bug(123) foo [ Failure Pass Crash ]\n' +
-        'Bug(Foo) [ Release ] foo/test1.html [ Failure ]\n' +
-        '[ Debug ] foo/test1.html [ Crash ]\n' +
-        'Bug(456) foo/test2.html [ Failure ]\n' +
-        '[ Linux Debug ] foo/test2.html [ Crash ]\n' +
-        '[ Release ] test1.html [ Failure ]\n' +
-        '[ Debug ] test1.html [ Crash ]\n');
-    g_expectationsByPlatform['CHROMIUM_ANDROID'] = getParsedExpectations('Bug(654) foo/test2.html [ Crash ]\n');
-
-    g_expectationsByPlatform['GTK'] = getParsedExpectations('Bug(42) foo/test2.html [ ImageOnlyFailure ]\n' +
-        '[ Debug ] test1.html [ Crash ]\n');
-    g_expectationsByPlatform['GTK_LINUX_WK1'] = getParsedExpectations('[ Release ] foo/test1.html [ ImageOnlyFailure ]\n' +
-        'Bug(789) foo/test2.html [ Crash ]\n');
-    g_expectationsByPlatform['GTK_LINUX_WK2'] = getParsedExpectations('Bug(987) foo/test2.html [ Failure ]\n');
-
-    processExpectations();
-    
-    var expectations = getExpectations('foo/test1.html', 'CHROMIUM_XP', 'DEBUG');
-    equal(JSON.stringify(expectations), '{"modifiers":"DEBUG","expectations":"CRASH"}');
-
-    var expectations = getExpectations('foo/test1.html', 'CHROMIUM_LUCID', 'RELEASE');
-    equal(JSON.stringify(expectations), '{"modifiers":"Bug(Foo) RELEASE","expectations":"FAIL"}');
-
-    var expectations = getExpectations('foo/test2.html', 'CHROMIUM_LUCID', 'RELEASE');
-    equal(JSON.stringify(expectations), '{"modifiers":"Bug(456)","expectations":"FAIL"}');
-
-    var expectations = getExpectations('foo/test2.html', 'CHROMIUM_LION', 'DEBUG');
-    equal(JSON.stringify(expectations), '{"modifiers":"Bug(456)","expectations":"FAIL"}');
-
-    var expectations = getExpectations('foo/test2.html', 'CHROMIUM_LUCID', 'DEBUG');
-    equal(JSON.stringify(expectations), '{"modifiers":"LINUX DEBUG","expectations":"CRASH"}');
-
-    var expectations = getExpectations('foo/test2.html', 'CHROMIUM_ANDROID', 'RELEASE');
-    equal(JSON.stringify(expectations), '{"modifiers":"Bug(654)","expectations":"CRASH"}');
-
-    var expectations = getExpectations('test1.html', 'CHROMIUM_ANDROID', 'RELEASE');
-    equal(JSON.stringify(expectations), '{"modifiers":"RELEASE","expectations":"FAIL"}');
-
-    var expectations = getExpectations('foo/test3.html', 'CHROMIUM_LUCID', 'DEBUG');
-    equal(JSON.stringify(expectations), '{"modifiers":"Bug(123)","expectations":"FAIL PASS CRASH"}');
-
-    var expectations = getExpectations('test1.html', 'CHROMIUM_XP', 'DEBUG');
-    equal(JSON.stringify(expectations), '{"modifiers":"DEBUG","expectations":"CRASH"}');
-
-    var expectations = getExpectations('test1.html', 'CHROMIUM_LUCID', 'RELEASE');
-    equal(JSON.stringify(expectations), '{"modifiers":"RELEASE","expectations":"FAIL"}');
-
-    var expectations = getExpectations('foo/test1.html', 'GTK_LINUX_WK1', 'RELEASE');
-    equal(JSON.stringify(expectations), '{"modifiers":"RELEASE","expectations":"IMAGE"}');
-
-    var expectations = getExpectations('foo/test2.html', 'GTK_LINUX_WK1', 'DEBUG');
-    equal(JSON.stringify(expectations), '{"modifiers":"Bug(789)","expectations":"CRASH"}');
-
-    var expectations = getExpectations('test1.html', 'GTK_LINUX_WK1', 'DEBUG');
-    equal(JSON.stringify(expectations), '{"modifiers":"DEBUG","expectations":"CRASH"}');
-
-    var expectations = getExpectations('foo/test2.html', 'GTK_LINUX_WK2', 'DEBUG');
-    equal(JSON.stringify(expectations), '{"modifiers":"Bug(987)","expectations":"FAIL"}');
-
-    var expectations = getExpectations('foo/test2.html', 'GTK_LINUX_WK2', 'RELEASE');
-    equal(JSON.stringify(expectations), '{"modifiers":"Bug(987)","expectations":"FAIL"}');
-
-    var expectations = getExpectations('test1.html', 'GTK_LINUX_WK2', 'DEBUG');
-    equal(JSON.stringify(expectations), '{"modifiers":"DEBUG","expectations":"CRASH"}');
-});
+var FAILURE_MAP = {"A": "AUDIO", "C": "CRASH", "F": "TEXT", "I": "IMAGE", "O": "MISSING",
+    "N": "NO DATA", "P": "PASS", "T": "TIMEOUT", "Y": "NOTRUN", "X": "SKIP", "Z": "IMAGE+TEXT"}
 
 test('substringList', 2, function() {
     var historyInstance = new history.History(flakinessConfig);
@@ -409,40 +70,10 @@
     equal(substringList().toString(), 'foo/bar.FLAKY_foo.html');
 });
 
-test('htmlForTestsWithExpectationsButNoFailures', 4, function() {
-    var historyInstance = new history.History(defaultDashboardSpecificStateValues, generatePage, handleValidHashParameter);
-    // FIXME(jparent): Remove this once global isn't used.
-    g_history = historyInstance;
-    loadBuildersList('@ToT - chromium.org', 'layout-tests');
-    var builder = 'WebKit Win';
-    g_perBuilderWithExpectationsButNoFailures[builder] = ['passing-test1.html', 'passing-test2.html'];
-    g_perBuilderSkippedPaths[builder] = ['skipped-test1.html'];
-    g_resultsByBuilder[builder] = { buildNumbers: [5, 4, 3, 1] };
-
-    historyInstance.dashboardSpecificState.showUnexpectedPasses = true;
-    historyInstance.dashboardSpecificState.showSkipped = true;
-
-    historyInstance.crossDashboardState.group = '@ToT - chromium.org';
-    historyInstance.crossDashboardState.testType = 'layout-tests';
-    
-    var container = document.createElement('div');
-    container.innerHTML = htmlForTestsWithExpectationsButNoFailures(builder);
-    equal(container.querySelectorAll('#passing-tests > div').length, 2);
-    equal(container.querySelectorAll('#skipped-tests > div').length, 1);
-    
-    historyInstance.dashboardSpecificState.showUnexpectedPasses = false;
-    historyInstance.dashboardSpecificState.showSkipped = false;
-    
-    var container = document.createElement('div');
-    container.innerHTML = htmlForTestsWithExpectationsButNoFailures(builder);
-    equal(container.querySelectorAll('#passing-tests > div').length, 0);
-    equal(container.querySelectorAll('#skipped-tests > div').length, 0);
-});
-
 test('headerForTestTableHtml', 1, function() {
     var container = document.createElement('div');
     container.innerHTML = headerForTestTableHtml();
-    equal(container.querySelectorAll('input').length, 5);
+    equal(container.querySelectorAll('input').length, 4);
 });
 
 test('htmlForTestTypeSwitcherGroup', 6, function() {
@@ -492,19 +123,31 @@
     loadBuildersList('@ToT - chromium.org', 'layout-tests');
 
     var builderName = 'WebKit Linux';
+    g_resultsByBuilder[builderName] = {buildNumbers: [2, 1], blinkRevision: [1234, 1233], failure_map: FAILURE_MAP};
+
     var test = 'dummytest.html';
-    g_testToResultsMap[test] = [createResultsObjectForTest(test, builderName)];
+    var resultsObject = createResultsObjectForTest(test, builderName);
+    resultsObject.rawResults = [[1, 'F']];
+    resultsObject.rawTimes = [[1, 0]];
+    resultsObject.bugs = ["crbug.com/1234", "webkit.org/5678"];
+    g_testToResultsMap[test] = [resultsObject];
 
     equal(htmlForIndividualTestOnAllBuildersWithResultsLinks(test),
         '<table class=test-table><thead><tr>' +
                 '<th sortValue=test><div class=table-header-content><span></span><span class=header-text>test</span></div></th>' +
                 '<th sortValue=bugs><div class=table-header-content><span></span><span class=header-text>bugs</span></div></th>' +
-                '<th sortValue=modifiers><div class=table-header-content><span></span><span class=header-text>modifiers</span></div></th>' +
                 '<th sortValue=expectations><div class=table-header-content><span></span><span class=header-text>expectations</span></div></th>' +
                 '<th sortValue=slowest><div class=table-header-content><span></span><span class=header-text>slowest run</span></div></th>' +
                 '<th sortValue=flakiness colspan=10000><div class=table-header-content><span></span><span class=header-text>flakiness (numbers are runtimes in seconds)</span></div></th>' +
             '</tr></thead>' +
-            '<tbody></tbody>' +
+            '<tbody><tr>' +
+                '<td class="test-link"><span class="link" onclick="g_history.setQueryParameter(\'tests\',\'dummytest.html\');">dummytest.html</span>' +
+                '<td class=options-container>' +
+                    '<div><a href="http://crbug.com/1234">crbug.com/1234</a></div>' +
+                    '<div><a href="http://webkit.org/5678">webkit.org/5678</a></div>' +
+                '<td class=options-container><td><td title="TEXT. Click for more info." class="results TEXT" onclick=\'showPopupForBuild(event, "WebKit Linux",0,"dummytest.html")\'>&nbsp;' +
+                '<td title="NO DATA. Click for more info." class="results NODATA" onclick=\'showPopupForBuild(event, "WebKit Linux",1,"dummytest.html")\'>&nbsp;' +
+            '</tbody>' +
         '</table>' +
         '<div>The following builders either don\'t run this test (e.g. it\'s skipped) or all runs passed:</div>' +
         '<div class=skipped-builder-list>' +
@@ -527,7 +170,7 @@
 
     var tests = [test1, test2];
     equal(htmlForIndividualTests(tests),
-        '<h2><a href="' + TEST_URL_BASE_PATH_IN_VERSION_CONTROL + 'foo/nonexistant.html" target="_blank">foo/nonexistant.html</a></h2>' +
+        '<h2><a href="' + TEST_URL_BASE_PATH_FOR_BROWSING + 'foo/nonexistant.html" target="_blank">foo/nonexistant.html</a></h2>' +
         htmlForIndividualTestOnAllBuilders(test1) + 
         '<div class=expectations test=foo/nonexistant.html>' +
             '<div><span class=link onclick=\"g_history.setQueryParameter(\'showExpectations\', true)\">Show results</span> | ' +
@@ -535,7 +178,7 @@
             '<b>Only shows actual results/diffs from the most recent *failure* on each bot.</b></div>' +
         '</div>' +
         '<hr>' +
-        '<h2><a href="' + TEST_URL_BASE_PATH_IN_VERSION_CONTROL + 'bar/nonexistant.html" target="_blank">bar/nonexistant.html</a></h2>' +
+        '<h2><a href="' + TEST_URL_BASE_PATH_FOR_BROWSING + 'bar/nonexistant.html" target="_blank">bar/nonexistant.html</a></h2>' +
         htmlForIndividualTestOnAllBuilders(test2) +
         '<div class=expectations test=bar/nonexistant.html>' +
             '<div><span class=link onclick=\"g_history.setQueryParameter(\'showExpectations\', true)\">Show results</span> | ' +
@@ -554,45 +197,52 @@
     historyInstance.dashboardSpecificState.showChrome = true;
 
     equal(htmlForIndividualTests(tests),
-        '<h2><a href="' + TEST_URL_BASE_PATH_IN_VERSION_CONTROL + 'foo/nonexistant.html" target="_blank">foo/nonexistant.html</a></h2>' +
+        '<h2><a href="' + TEST_URL_BASE_PATH_FOR_BROWSING + 'foo/nonexistant.html" target="_blank">foo/nonexistant.html</a></h2>' +
         htmlForIndividualTestOnAllBuildersWithResultsLinks(test1));
 
     tests = [test1, test2];
     equal(htmlForIndividualTests(tests),
-        '<h2><a href="' + TEST_URL_BASE_PATH_IN_VERSION_CONTROL + 'foo/nonexistant.html" target="_blank">foo/nonexistant.html</a></h2>' +
+        '<h2><a href="' + TEST_URL_BASE_PATH_FOR_BROWSING + 'foo/nonexistant.html" target="_blank">foo/nonexistant.html</a></h2>' +
         htmlForIndividualTestOnAllBuildersWithResultsLinks(test1) + '<hr>' +
-        '<h2><a href="' + TEST_URL_BASE_PATH_IN_VERSION_CONTROL + 'bar/nonexistant.html" target="_blank">bar/nonexistant.html</a></h2>' +
+        '<h2><a href="' + TEST_URL_BASE_PATH_FOR_BROWSING + 'bar/nonexistant.html" target="_blank">bar/nonexistant.html</a></h2>' +
         htmlForIndividualTestOnAllBuildersWithResultsLinks(test2));
 });
 
+test('linkifyBugs', 4, function() {
+    equal(linkifyBugs(["crbug.com/1234", "webkit.org/5678"]),
+        '<div><a href="http://crbug.com/1234">crbug.com/1234</a></div><div><a href="http://webkit.org/5678">webkit.org/5678</a></div>');
+    equal(linkifyBugs(["crbug.com/1234"]), '<div><a href="http://crbug.com/1234">crbug.com/1234</a></div>');
+    equal(linkifyBugs(["Bug(nick)"]), '<div>Bug(nick)</div>');
+    equal(linkifyBugs([]), '');
+});
+
 test('htmlForSingleTestRow', 1, function() {
     var historyInstance = resetGlobals();
     var builder = 'dummyBuilder';
     BUILDER_TO_MASTER[builder] = CHROMIUM_WEBKIT_BUILDER_MASTER;
     var test = createResultsObjectForTest('foo/exists.html', builder);
-    historyInstance.dashboardSpecificState.showCorrectExpectations = true;
-    g_resultsByBuilder[builder] = {buildNumbers: [2, 1], blinkRevision: [1234, 1233]};
+    historyInstance.dashboardSpecificState.showNonFlaky = true;
+    g_resultsByBuilder[builder] = {buildNumbers: [2, 1], blinkRevision: [1234, 1233], failure_map: FAILURE_MAP};
     test.rawResults = [[1, 'F'], [2, 'I']];
     test.rawTimes = [[1, 0], [2, 5]];
     var expected = '<tr>' +
         '<td class="test-link"><span class="link" onclick="g_history.setQueryParameter(\'tests\',\'foo/exists.html\');">foo/exists.html</span>' +
-        '<td class=options-container><a href="https://bugs.webkit.org/enter_bug.cgi?assigned_to=webkit-unassigned%40lists.webkit.org&product=WebKit&form_name=enter_bug&component=Tools%20%2F%20Tests&short_desc=Layout%20Test%20foo%2Fexists.html%20is%20failing&comment=The%20following%20layout%20test%20is%20failing%20on%20%5Binsert%20platform%5D%0A%0Afoo%2Fexists.html%0A%0AProbable%20cause%3A%0A%0A%5Binsert%20probable%20cause%5D" class="file-bug">FILE BUG</a>' +
+        '<td class=options-container><a href="https://code.google.com/p/chromium/issues/entry?template=Layout%20Test%20Failure&summary=Layout%20Test%20foo%2Fexists.html%20is%20failing&comment=The%20following%20layout%20test%20is%20failing%20on%20%5Binsert%20platform%5D%0A%0Afoo%2Fexists.html%0A%0AProbable%20cause%3A%0A%0A%5Binsert%20probable%20cause%5D">File new bug</a>' +
         '<td class=options-container>' +
-            '<td class=options-container>' +
-                '<td><td title="TEXT. Click for more info." class="results F" onclick=\'showPopupForBuild(event, "dummyBuilder",0,"foo/exists.html")\'>&nbsp;' +
-                '<td title="IMAGE. Click for more info." class="results I" onclick=\'showPopupForBuild(event, "dummyBuilder",1,"foo/exists.html")\'>5';
-
+        '<td>' +
+        '<td title="TEXT. Click for more info." class="results TEXT" onclick=\'showPopupForBuild(event, "dummyBuilder",0,"foo/exists.html")\'>&nbsp;' +
+        '<td title="IMAGE. Click for more info." class="results IMAGE" onclick=\'showPopupForBuild(event, "dummyBuilder",1,"foo/exists.html")\'>5';
     equal(htmlForSingleTestRow(test), expected);
 });
 
 test('lookupVirtualTestSuite', 2, function() {
     equal(lookupVirtualTestSuite('fast/canvas/foo.html'), '');
-    equal(lookupVirtualTestSuite('platform/chromium/virtual/gpu/fast/canvas/foo.html'), 'platform/chromium/virtual/gpu/fast/canvas');
+    equal(lookupVirtualTestSuite('virtual/gpu/fast/canvas/foo.html'), 'virtual/gpu/fast/canvas');
 });
 
 test('baseTest', 2, function() {
     equal(baseTest('fast/canvas/foo.html', ''), 'fast/canvas/foo.html');
-    equal(baseTest('platform/chromium/virtual/gpu/fast/canvas/foo.html', 'platform/chromium/virtual/gpu/fast/canvas'), 'fast/canvas/foo.html');
+    equal(baseTest('virtual/gpu/fast/canvas/foo.html', 'virtual/gpu/fast/canvas'), 'fast/canvas/foo.html');
 });
 
 // FIXME: Create builders_tests.js and move this there.
@@ -642,14 +292,14 @@
     var test1 = createResultsObjectForTest('foo/test1.html', 'dummyBuilder');
     var test2 = createResultsObjectForTest('foo/test2.html', 'dummyBuilder');
     var test3 = createResultsObjectForTest('foo/test3.html', 'dummyBuilder');
-    test1.modifiers = 'b';
-    test2.modifiers = 'a';
-    test3.modifiers = '';
+    test1.expectations = 'b';
+    test2.expectations = 'a';
+    test3.expectations = '';
 
     var tests = [test1, test2, test3];
-    sortTests(tests, 'modifiers', FORWARD);
+    sortTests(tests, 'expectations', FORWARD);
     deepEqual(tests, [test2, test1, test3]);
-    sortTests(tests, 'modifiers', BACKWARD);
+    sortTests(tests, 'expectations', BACKWARD);
     deepEqual(tests, [test3, test1, test2]);
 
     test1.bugs = 'b';
@@ -752,7 +402,7 @@
     notEqual(historyInstance.crossDashboardState.group, originalGroup, "group should have been invalidated");   
 });
 
-test('shouldHideTest', 10, function() {
+test('shouldShowTest', 9, function() {
     var historyInstance = new history.History(flakinessConfig);
     historyInstance.parseParameters();
     // FIXME(jparent): Change to use the flakiness_dashboard's history object
@@ -760,35 +410,33 @@
     g_history = historyInstance;
     var test = createResultsObjectForTest('foo/test.html', 'dummyBuilder');
 
-    equal(shouldHideTest(test), true, 'default layout test, hide it.');
-    historyInstance.dashboardSpecificState.showCorrectExpectations = true;
-    equal(shouldHideTest(test), false, 'show correct expectations.');
-    historyInstance.dashboardSpecificState.showCorrectExpectations = false;
+    equal(shouldShowTest(test), false, 'default layout test, hide it.');
+    historyInstance.dashboardSpecificState.showNonFlaky = true;
+    equal(shouldShowTest(test), true, 'show correct expectations.');
+    historyInstance.dashboardSpecificState.showNonFlaky = false;
 
     test = createResultsObjectForTest('foo/test.html', 'dummyBuilder');
-    test.isWontFixSkip = true;
-    equal(shouldHideTest(test), true, 'by default hide these too');
-    historyInstance.dashboardSpecificState.showWontFixSkip = true;
-    equal(shouldHideTest(test), false, 'now we should show it');
-    historyInstance.dashboardSpecificState.showWontFixSkip = false;
+    test.expectations = "WONTFIX";
+    equal(shouldShowTest(test), false, 'by default hide wontfix');
+    historyInstance.dashboardSpecificState.showWontFix = true;
+    equal(shouldShowTest(test), true, 'show wontfix');
+    historyInstance.dashboardSpecificState.showWontFix = false;
+
+    test = createResultsObjectForTest('foo/test.html', 'dummyBuilder');
+    test.expectations = "SKIP";
+    equal(shouldShowTest(test), false, 'we hide skip tests by default');
+    historyInstance.dashboardSpecificState.showSkip = true;
+    equal(shouldShowTest(test), true, 'show skip test');
+    historyInstance.dashboardSpecificState.showSkip = false;
 
     test = createResultsObjectForTest('foo/test.html', 'dummyBuilder');
     test.isFlaky = true;
-    equal(shouldHideTest(test), false, 'we show flaky tests by default');
-    historyInstance.dashboardSpecificState.showFlaky = false;
-    equal(shouldHideTest(test), true, 'do not show flaky test');
+    equal(shouldShowTest(test), false, 'hide flaky tests by default');
     historyInstance.dashboardSpecificState.showFlaky = true;
-
-    test = createResultsObjectForTest('foo/test.html', 'dummyBuilder');
-    test.slowestNonTimeoutCrashTime = MIN_SECONDS_FOR_SLOW_TEST + 1;
-    equal(shouldHideTest(test), true, 'we hide slow tests by default');
-    historyInstance.dashboardSpecificState.showSlow = true;
-    equal(shouldHideTest(test), false, 'now show slow test');
-    historyInstance.dashboardSpecificState.showSlow = false;
+    equal(shouldShowTest(test), true, 'show flaky test');
+    historyInstance.dashboardSpecificState.showFlaky = false;
 
     test = createResultsObjectForTest('foo/test.html', 'dummyBuilder');
     historyInstance.crossDashboardState.testType = 'not layout tests';
-    equal(shouldHideTest(test), false, 'show all non layout tests');
-    test.isWontFixSkip = true;
-    equal(shouldHideTest(test), false, 'show all non layout tests, even if wont fix');
+    equal(shouldShowTest(test), true, 'show all non layout tests');
 });
diff --git a/Tools/TestResultServer/static-dashboards/history_unittests.js b/Tools/TestResultServer/static-dashboards/history_unittests.js
index 7559453..4594a61 100644
--- a/Tools/TestResultServer/static-dashboards/history_unittests.js
+++ b/Tools/TestResultServer/static-dashboards/history_unittests.js
@@ -26,6 +26,8 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+module('history');
+
 test('queryHashAsMap', 2, function() {
     equal(window.location.hash, '#useTestData=true');
     deepEqual(history.queryHashAsMap(), {useTestData: 'true'});
diff --git a/Tools/TestResultServer/static-dashboards/loader.js b/Tools/TestResultServer/static-dashboards/loader.js
index 8d84682..9fd00b8 100644
--- a/Tools/TestResultServer/static-dashboards/loader.js
+++ b/Tools/TestResultServer/static-dashboards/loader.js
@@ -32,7 +32,6 @@
 (function() {
 
 var TEST_RESULTS_SERVER = 'http://test-results.appspot.com/';
-var CHROMIUM_EXPECTATIONS_URL = 'http://svn.webkit.org/repository/webkit/trunk/LayoutTests/platform/chromium/TestExpectations';
 
 function pathToBuilderResultsFile(builderName) {
     return TEST_RESULTS_SERVER + 'testfile?builder=' + builderName +
@@ -62,7 +61,6 @@
     this._loadingSteps = [
         this._loadBuildersList,
         this._loadResultsFiles,
-        this._loadExpectationsFiles,
     ];
 
     this._buildersThatFailedToLoad = [];
@@ -165,26 +163,19 @@
     {
         var builds = JSON.parse(fileData);
 
-        var json_version = builds['version'];
-        for (var builderName in builds) {
-            if (builderName == 'version')
-                continue;
+        // If a test suite stops being run on a given builder, we don't want to show it.
+        // Assume any builder without a run in two weeks for a given test suite isn't
+        // running that suite anymore.
+        // FIXME: Grab which bots run which tests directly from the buildbot JSON instead.
+        var lastRunSeconds = builds[builderName].secondsSinceEpoch[0];
+        if ((Date.now() / 1000) - lastRunSeconds > ONE_WEEK_SECONDS)
+            return;
 
-            // If a test suite stops being run on a given builder, we don't want to show it.
-            // Assume any builder without a run in two weeks for a given test suite isn't
-            // running that suite anymore.
-            // FIXME: Grab which bots run which tests directly from the buildbot JSON instead.
-            var lastRunSeconds = builds[builderName].secondsSinceEpoch[0];
-            if ((Date.now() / 1000) - lastRunSeconds > ONE_WEEK_SECONDS)
-                continue;
+        if ((Date.now() / 1000) - lastRunSeconds > ONE_DAY_SECONDS)
+            this._staleBuilders.push(builderName);
 
-            if ((Date.now() / 1000) - lastRunSeconds > ONE_DAY_SECONDS)
-                this._staleBuilders.push(builderName);
-
-            if (json_version >= 4)
-                builds[builderName][TESTS_KEY] = loader.Loader._flattenTrie(builds[builderName][TESTS_KEY]);
-            g_resultsByBuilder[builderName] = builds[builderName];
-        }
+        builds[builderName][TESTS_KEY] = loader.Loader._flattenTrie(builds[builderName][TESTS_KEY]);
+        g_resultsByBuilder[builderName] = builds[builderName];
     },
     _handleResultsFileLoadError: function(builderName)
     {
@@ -213,39 +204,6 @@
         }
         return true;
     },
-    _loadExpectationsFiles: function()
-    {
-        if (!isFlakinessDashboard() && !this._history.crossDashboardState.useTestData) {
-            this._loadNext();
-            return;
-        }
-
-        var expectationsFilesToRequest = {};
-        traversePlatformsTree(function(platform, platformName) {
-            if (platform.fallbackPlatforms)
-                platform.fallbackPlatforms.forEach(function(fallbackPlatform) {
-                    var fallbackPlatformObject = platformObjectForName(fallbackPlatform);
-                    if (fallbackPlatformObject.expectationsDirectory && !(fallbackPlatform in expectationsFilesToRequest))
-                        expectationsFilesToRequest[fallbackPlatform] = EXPECTATIONS_URL_BASE_PATH + fallbackPlatformObject.expectationsDirectory + '/TestExpectations';
-                });
-
-            if (platform.expectationsDirectory)
-                expectationsFilesToRequest[platformName] = EXPECTATIONS_URL_BASE_PATH + platform.expectationsDirectory + '/TestExpectations';
-        });
-
-        for (platformWithExpectations in expectationsFilesToRequest)
-            loader.request(expectationsFilesToRequest[platformWithExpectations],
-                    partial(function(loader, platformName, xhr) {
-                        g_expectationsByPlatform[platformName] = getParsedExpectations(xhr.responseText);
-
-                        delete expectationsFilesToRequest[platformName];
-                        if (!Object.keys(expectationsFilesToRequest).length)
-                            loader._loadNext();
-                    }, this, platformWithExpectations),
-                    partial(function(platformName, xhr) {
-                        console.error('Could not load expectations file for ' + platformName);
-                    }, platformWithExpectations));
-    },
     _addErrors: function()
     {
         if (this._buildersThatFailedToLoad.length)
diff --git a/Tools/TestResultServer/static-dashboards/loader_unittests.js b/Tools/TestResultServer/static-dashboards/loader_unittests.js
index 186067a..cfeccd6 100644
--- a/Tools/TestResultServer/static-dashboards/loader_unittests.js
+++ b/Tools/TestResultServer/static-dashboards/loader_unittests.js
@@ -67,7 +67,7 @@
     loader.request = function(url, successCallback, errorCallback) {
         var builderName = /builder=([\w ().]+)&/.exec(url)[1];
         loadedBuilders.push(builderName);
-        successCallback({responseText: '{"version": 4, "' + builderName + '": {"secondsSinceEpoch": [' + Date.now() + '], "tests": {}}}'});
+        successCallback({responseText: '{"version":4,"' + builderName + '":{"failure_map":{"A":"AUDIO","C":"CRASH","F":"TEXT"},"secondsSinceEpoch":[' + Date.now() + '],"tests":{}}}'});
     }
 
     loadBuildersList('@ToT - chromium.org', 'layout-tests');
@@ -79,31 +79,6 @@
     }
 });
 
-test('expectations files loading', 1, function() {
-    resetGlobals();
-    g_history.parseCrossDashboardParameters();
-    // FIXME: re-enable once added back in flakiness_dashboard.js
-    var expectedLoadedPlatforms = [/* "chromium", "chromium-android", */"efl", "efl-wk1", "efl-wk2", "gtk",
-                                   "gtk-wk2", "mac", "mac-lion", /*"mac-snowleopard", */"qt", "win", "wk2"];
-    var loadedPlatforms = [];
-    var resourceLoader = new loader.Loader();
-    resourceLoader._loadNext = function() {
-        deepEqual(loadedPlatforms.sort(), expectedLoadedPlatforms);
-    }
-
-    var requestFunction = loader.request;
-    loader.request = function(url, successCallback, errorCallback) {
-        loadedPlatforms.push(/LayoutTests\/platform\/(.+)\/TestExpectations/.exec(url)[1]);
-        successCallback({responseText: ''});
-    }
-
-    try {
-        resourceLoader._loadExpectationsFiles();
-    } finally {
-        loader.request = requestFunction;
-    }
-});
-
 test('results file failing to load', 2, function() {
     resetGlobals();
     loadBuildersList('@ToT - chromium.org', 'layout-tests');
diff --git a/Tools/TestResultServer/static-dashboards/run-embedded-unittests.html b/Tools/TestResultServer/static-dashboards/run-embedded-unittests.html
index 10f058a..08c7b29 100644
--- a/Tools/TestResultServer/static-dashboards/run-embedded-unittests.html
+++ b/Tools/TestResultServer/static-dashboards/run-embedded-unittests.html
@@ -52,6 +52,7 @@
 <script src="history_unittests.js"></script>
 <script src="dashboard_base.js"></script>
 <script src="ui.js"></script>
+<script src="ui_unittests.js"></script>
 <script src="loader.js"></script>
 <script src="loader_unittests.js"></script>
 <script>
diff --git a/Tools/TestResultServer/static-dashboards/run-unittests.html b/Tools/TestResultServer/static-dashboards/run-unittests.html
index 15bc212..fb42134 100644
--- a/Tools/TestResultServer/static-dashboards/run-unittests.html
+++ b/Tools/TestResultServer/static-dashboards/run-unittests.html
@@ -46,6 +46,7 @@
 <script src="history_unittests.js"></script>
 <script src="dashboard_base.js"></script>
 <script src="ui.js"></script>
+<script src="ui_unittests.js"></script>
 <script src="loader.js"></script>
 <script src="loader_unittests.js"></script>
 <script>
@@ -60,5 +61,8 @@
 
 <!-- FIXME: Split this up into multiple unittest.js, e.g. one for builders.js and one for dashboard_base.js. -->
 <script src="flakiness_dashboard_unittests.js"></script>
+
+<script src="aggregate_results.js"></script>
+<script src="aggregate_results_unittest.js"></script>
 </body>
 </html>
diff --git a/Tools/TestResultServer/static-dashboards/string.js b/Tools/TestResultServer/static-dashboards/string.js
index 3e1f7f9..6424dbf 100644
--- a/Tools/TestResultServer/static-dashboards/string.js
+++ b/Tools/TestResultServer/static-dashboards/string.js
@@ -53,7 +53,7 @@
 
 string.isValidName = function(str)
 {
-    return str.match(/[A-Za-z0-9\-\_,]/);
+    return str.match(/[A-Za-z0-9\-\_,\+]/);
 }
 
 string.trimString = function(str)
diff --git a/Tools/TestResultServer/static-dashboards/timeline_explorer.js b/Tools/TestResultServer/static-dashboards/timeline_explorer.js
index 3ff7f92..2be0fcd 100644
--- a/Tools/TestResultServer/static-dashboards/timeline_explorer.js
+++ b/Tools/TestResultServer/static-dashboards/timeline_explorer.js
@@ -45,10 +45,12 @@
 
 function generatePage(historyInstance)
 {
-    g_buildIndicesByTimestamp = {};
     var results = g_resultsByBuilder[historyInstance.dashboardSpecificState.builder || currentBuilderGroup().defaultBuilder()];
 
-    for (var i = 0; i < results[FIXABLE_COUNTS_KEY].length; i++) {
+    g_totalFailureCount = getTotalTestCounts(results[FAILURES_BY_TYPE_KEY]).totalFailingTests;
+
+    g_buildIndicesByTimestamp = {};
+    for (var i = 0; i < g_totalFailureCount.length; i++) {
         var buildDate = new Date(results[TIMESTAMPS_KEY][i] * 1000);
         g_buildIndicesByTimestamp[buildDate.getTime()] = i;
     }
@@ -106,9 +108,7 @@
 
 function initCurrentBuilderTestResults()
 {
-    var startTime = Date.now();
     g_currentBuilderTestResults = _decompressResults(g_resultsByBuilder[g_history.dashboardSpecificState.builder || currentBuilderGroup().defaultBuilder()]);
-    console.log( 'Time to get test results by build: ' + (Date.now() - startTime));
 }
 
 function shouldShowBlinkRevisionsOnly()
@@ -125,11 +125,11 @@
     var annotations = [];
 
     // Dygraph prefers to be handed data in chronological order.
-    for (var i = results[FIXABLE_COUNTS_KEY].length - 1; i >= 0; i--) {
+    for (var i = g_totalFailureCount.length - 1; i >= 0; i--) {
         var buildDate = new Date(results[TIMESTAMPS_KEY][i] * 1000);
         // FIXME: Find a better way to exclude outliers. This is just so we
         // exclude runs where every test failed.
-        var failureCount = Math.min(results[FIXABLE_COUNT_KEY][i], 10000);
+        var failureCount = Math.min(g_totalFailureCount[i], 10000);
 
         if (g_history.dashboardSpecificState.ignoreFlakyTests)
             failureCount -= g_currentBuilderTestResults.flakyDeltasByBuild[i].total || 0;
@@ -254,22 +254,23 @@
         addRow(label, currentValue + deltaText);
     }
 
-    var expectations = expectationsMap();
     var flakyDeltasByBuild = g_currentBuilderTestResults.flakyDeltasByBuild;
-    for (var expectationKey in expectations) {
-        if (expectationKey in results[FIXABLE_COUNTS_KEY][index]) {
-            var currentCount = results[FIXABLE_COUNTS_KEY][index][expectationKey];
-            var previousCount = results[FIXABLE_COUNTS_KEY][index + 1][expectationKey];
-            if (g_history.dashboardSpecificState.ignoreFlakyTests) {
-                currentCount -= flakyDeltasByBuild[index][expectationKey] || 0;
-                previousCount -= flakyDeltasByBuild[index + 1][expectationKey] || 0;
-            }
-            addNumberRow(expectations[expectationKey], currentCount, previousCount);
+    var failures_by_type = results[FAILURES_BY_TYPE_KEY];
+    for (var failureType in failures_by_type) {
+        var failureCount = failures_by_type[failureType];
+        var currentCount = failureCount[index];
+        var previousCount = failureCount[index + 1];
+        if (!currentCount && !previousCount)
+            continue;
+        if (g_history.dashboardSpecificState.ignoreFlakyTests) {
+            currentCount -= flakyDeltasByBuild[index][failureType] || 0;
+            previousCount -= flakyDeltasByBuild[index + 1][failureType] || 0;
         }
+        addNumberRow(failureType, currentCount, previousCount);
     }
 
-    var currentTotal = results[FIXABLE_COUNT_KEY][index];
-    var previousTotal = results[FIXABLE_COUNT_KEY][index + 1];
+    var currentTotal = g_totalFailureCount[index];
+    var previousTotal = g_totalFailureCount[index + 1];
     if (g_history.dashboardSpecificState.ignoreFlakyTests) {
         currentTotal -= flakyDeltasByBuild[index].total || 0;
         previousTotal -= flakyDeltasByBuild[index + 1].total || 0;
@@ -306,7 +307,7 @@
     var currentResults = g_currentBuilderTestResults.resultsByBuild[index];
     var testNames = g_currentBuilderTestResults.testNames;
     var previousResults = g_currentBuilderTestResults.resultsByBuild[index + 1];
-    var expectations = expectationsMap();
+    var expectations = g_currentBuilderTestResults.failureMap;
 
     var deltas = {};
     function addDelta(category, testIndex)
@@ -363,14 +364,6 @@
     deltaWindow.document.write(html);
 }
 
-var _FAILURE_EXPECTATIONS = {
-    'T': 1,
-    'F': 1,
-    'C': 1,
-    'I': 1,
-    'Z': 1
-};
-
 // "Decompresses" the RLE-encoding of test results so that we can query it
 // by build index and test name.
 //
@@ -383,7 +376,7 @@
 function _decompressResults(builderResults)
 {
     var builderTestResults = builderResults[TESTS_KEY];
-    var buildCount = builderResults[FIXABLE_COUNTS_KEY].length;
+    var buildCount = g_totalFailureCount.length;
     var resultsByBuild = new Array(buildCount);
     var flakyDeltasByBuild = new Array(buildCount);
 
@@ -403,6 +396,8 @@
     var testNames = new Array(testCount);
     var flakyTests = new Array(testCount);
 
+    var failureMap = builderResults[FAILURE_MAP_KEY];
+
     // Decompress and "invert" test results (by build instead of by test) and
     // determine which are flaky.
     for (var testName in builderTestResults) {
@@ -414,7 +409,7 @@
             var count = rleResult[RLE.LENGTH];
             var value = rleResult[RLE.VALUE];
 
-            if (count == 1 && value in _FAILURE_EXPECTATIONS)
+            if (count == 1 && isFailingResult(failureMap, value))
                 oneBuildFailureCount++;
 
             for (var j = 0; j < count; j++) {
@@ -451,7 +446,7 @@
                     buildTestResults[key]++;
                 }
                 addFlakyDelta(value);
-                if (value != 'P' && value != 'N')
+                if (isFailingResult(failureMap, value))
                     addFlakyDelta('total');
                 if (currentBuildIndex == buildCount)
                     break;
@@ -463,7 +458,8 @@
         testNames: testNames,
         resultsByBuild: resultsByBuild,
         flakyTests: flakyTests,
-        flakyDeltasByBuild: flakyDeltasByBuild
+        flakyDeltasByBuild: flakyDeltasByBuild,
+        failureMap: failureMap
     };
 }
 
diff --git a/Tools/TestResultServer/static-dashboards/treemap.js b/Tools/TestResultServer/static-dashboards/treemap.js
index db99b8a..f72b0bb 100644
--- a/Tools/TestResultServer/static-dashboards/treemap.js
+++ b/Tools/TestResultServer/static-dashboards/treemap.js
@@ -97,7 +97,7 @@
 var g_history = new history.History(treemapConfig);
 g_history.parseCrossDashboardParameters();
 
-var TEST_URL_BASE_PATH = "http://svn.webkit.org/repository/webkit/trunk/";
+var TEST_URL_BASE_PATH = "http://src.chromium.org/blink/trunk/";
 
 function humanReadableTime(milliseconds)
 {
diff --git a/Tools/TestResultServer/static-dashboards/ui.js b/Tools/TestResultServer/static-dashboards/ui.js
index 120e2a9..abce48d 100644
--- a/Tools/TestResultServer/static-dashboards/ui.js
+++ b/Tools/TestResultServer/static-dashboards/ui.js
@@ -169,7 +169,7 @@
 
     var rangeUrl = 'http://build.chromium.org/f/chromium/perf/dashboard/ui/changelog' +
         (isChrome ? '' : '_blink') + '.html?url=/trunk' + (isChrome ? '/src' : '') +
-        '&range=' + previousRevision + ':' + currentRevision + '&mode=html';
+        '&range=' + (previousRevision + 1) + ':' + currentRevision + '&mode=html';
     return '<a href="' + rangeUrl + '">r' + (previousRevision + 1) + ' to r' + currentRevision + '</a>';
 }
 
diff --git a/Tools/TestResultServer/static-dashboards/ui_unittests.js b/Tools/TestResultServer/static-dashboards/ui_unittests.js
new file mode 100644
index 0000000..3a3ff7d
--- /dev/null
+++ b/Tools/TestResultServer/static-dashboards/ui_unittests.js
@@ -0,0 +1,71 @@
+// Copyright (C) 2013 Google Inc. All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//         * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//         * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//         * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+module('ui');
+
+test('chromiumRevisionLinkOneRevision', 1, function() {
+    var results = {};
+    results[CHROME_REVISIONS_KEY] = [3, 2, 1];
+    var html = ui.html.chromiumRevisionLink(results, 1);
+    equal(html, '<a href="http://src.chromium.org/viewvc/chrome?view=rev&revision=2">r2</a>');
+});
+
+test('chromiumRevisionLinkAtRevision', 1, function() {
+    var results = {};
+    results[CHROME_REVISIONS_KEY] = [3, 2, 2];
+    var html = ui.html.chromiumRevisionLink(results, 1);
+    equal(html, 'At <a href="http://src.chromium.org/viewvc/chrome?view=rev&revision=2">r2</a>');
+});
+
+test('chromiumRevisionLinkRevisionRange', 1, function() {
+    var results = {};
+    results[CHROME_REVISIONS_KEY] = [5, 2];
+    var html = ui.html.chromiumRevisionLink(results, 0);
+    equal(html, '<a href="http://build.chromium.org/f/chromium/perf/dashboard/ui/changelog.html?url=/trunk/src&range=3:5&mode=html">r3 to r5</a>');
+});
+
+test('blinkRevisionLinkOneRevision', 1, function() {
+    var results = {};
+    results[BLINK_REVISIONS_KEY] = [3, 2, 1];
+    var html = ui.html.blinkRevisionLink(results, 1);
+    equal(html, '<a href="http://src.chromium.org/viewvc/blink?view=rev&revision=2">r2</a>');
+});
+
+test('blinkRevisionLinkAtRevision', 1, function() {
+    var results = {};
+    results[BLINK_REVISIONS_KEY] = [3, 2, 2];
+    var html = ui.html.blinkRevisionLink(results, 1);
+    equal(html, 'At <a href="http://src.chromium.org/viewvc/blink?view=rev&revision=2">r2</a>');
+});
+
+test('blinkRevisionLinkRevisionRange', 1, function() {
+    var results = {};
+    results[BLINK_REVISIONS_KEY] = [5, 2];
+    var html = ui.html.blinkRevisionLink(results, 0);
+    equal(html, '<a href="http://build.chromium.org/f/chromium/perf/dashboard/ui/changelog_blink.html?url=/trunk&range=3:5&mode=html">r3 to r5</a>');
+});
diff --git a/Tools/TestResultServer/templates/uploadform.html b/Tools/TestResultServer/templates/uploadform.html
index 9974a24..e4389f2 100644
--- a/Tools/TestResultServer/templates/uploadform.html
+++ b/Tools/TestResultServer/templates/uploadform.html
@@ -23,8 +23,6 @@
     </tr>
     </table>
     <br>
-    <div><input class=button type="checkbox" name="incremental">Incremental results, merge with server file.</div>
-    <br>
     <div><input class=button type="file" name="file" multiple></div>
     <br>
     <div><input class=button type="submit" value="Upload"></div>