Merge from Chromium at DEPS revision r202854
This commit was generated by merge_to_master.py.
Change-Id: I5b225b76b2157384357897051ee5866dd4bb23a8
diff --git a/Tools/TestResultServer/handlers/testfilehandler.py b/Tools/TestResultServer/handlers/testfilehandler.py
index fd17202..0f18abb 100644
--- a/Tools/TestResultServer/handlers/testfilehandler.py
+++ b/Tools/TestResultServer/handlers/testfilehandler.py
@@ -46,7 +46,6 @@
PARAM_NAME = "name"
PARAM_KEY = "key"
PARAM_TEST_TYPE = "testtype"
-PARAM_INCREMENTAL = "incremental"
PARAM_TEST_LIST_JSON = "testlistjson"
PARAM_CALLBACK = "callback"
@@ -226,7 +225,6 @@
master = self.request.get(PARAM_MASTER)
test_type = self.request.get(PARAM_TEST_TYPE)
- incremental = self.request.get(PARAM_INCREMENTAL)
logging.debug(
"Processing upload request, master: %s, builder: %s, test_type: %s.",
@@ -244,14 +242,18 @@
errors = []
for file in files:
- filename = file.filename.lower()
- if ((incremental and filename == "results.json") or
- (filename == "incremental_results.json")):
- # Merge incremental json results.
- update_succeeded = JsonResults.update(master, builder, test_type, file.value)
+ if file.filename == "incremental_results.json":
+ # FIXME: Remove this check once we stop uploading incremental_results.json files for layout tests.
+ if test_type == "layout-tests":
+ update_succeeded = True
+ else:
+ update_succeeded = JsonResults.update(master, builder, test_type, file.value, is_full_results_format=False)
else:
- update_succeeded = TestFile.add_file(
- master, builder, test_type, file.filename, file.value)
+ update_succeeded = bool(TestFile.add_file(master, builder, test_type, file.filename, file.value))
+ # FIXME: Upload full_results.json files for non-layout tests as well and stop supporting the
+ # incremental_results.json file format.
+ if file.filename == "full_results.json" and test_type == "layout-tests":
+ update_succeeded |= JsonResults.update(master, builder, test_type, file.value, is_full_results_format=True)
if not update_succeeded:
errors.append(
diff --git a/Tools/TestResultServer/main.py b/Tools/TestResultServer/main.py
index 2fa61e5..a9b00cf 100644
--- a/Tools/TestResultServer/main.py
+++ b/Tools/TestResultServer/main.py
@@ -28,7 +28,7 @@
# Request a modern Django
from google.appengine.dist import use_library
-use_library('django', '1.1')
+use_library('django', '1.3')
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
diff --git a/Tools/TestResultServer/model/datastorefile.py b/Tools/TestResultServer/model/datastorefile.py
index ac28d64..84642b8 100755
--- a/Tools/TestResultServer/model/datastorefile.py
+++ b/Tools/TestResultServer/model/datastorefile.py
@@ -29,6 +29,7 @@
from datetime import datetime
import logging
+from google.appengine.ext import blobstore
from google.appengine.ext import db
MAX_DATA_ENTRY_PER_FILE = 10
@@ -65,9 +66,20 @@
data = None
+ # FIXME: Remove this once all the bots have cycled after converting to the high-replication database.
+ def _convert_blob_keys(self, keys):
+ converted_keys = []
+ for key in keys:
+ new_key = blobstore.BlobMigrationRecord.get_new_blob_key(key)
+ if new_key:
+ converted_keys.append(new_key)
+ else:
+ converted_keys.append(key)
+ return keys
+
def delete_data(self, keys=None):
if not keys:
- keys = self.data_keys
+ keys = self._convert_blob_keys(self.data_keys)
for key in keys:
data_entry = DataEntry.get(key)
@@ -91,9 +103,11 @@
# reason, only the data pointed by new_data_keys may be corrupted,
# the existing data_keys data remains untouched. The corrupted data
# in new_data_keys will be overwritten in next update.
- keys = self.new_data_keys
+ keys = self._convert_blob_keys(self.new_data_keys)
self.new_data_keys = []
+ # FIXME: is all this complexity with storing the file in chunks really needed anymore?
+ # Can we just store it in a single blob?
while start < len(data):
if keys:
key = keys[0]
@@ -123,7 +137,7 @@
if keys:
self.delete_data(keys)
- temp_keys = self.data_keys
+ temp_keys = self._convert_blob_keys(self.data_keys)
self.data_keys = self.new_data_keys
self.new_data_keys = temp_keys
self.data = data
@@ -136,7 +150,7 @@
return None
data = []
- for key in self.data_keys:
+ for key in self._convert_blob_keys(self.data_keys):
logging.info("Loading data for key: %s.", key)
data_entry = DataEntry.get(key)
if not data_entry:
diff --git a/Tools/TestResultServer/model/jsonresults.py b/Tools/TestResultServer/model/jsonresults.py
index 16316f3..4b3ad73 100755
--- a/Tools/TestResultServer/model/jsonresults.py
+++ b/Tools/TestResultServer/model/jsonresults.py
@@ -27,81 +27,92 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from datetime import datetime
-from django.utils import simplejson
import logging
import sys
import traceback
+# FIXME: Once we're on python 2.7, just use json directly.
+try:
+ from django.utils import simplejson
+except:
+ import json as simplejson
+
from model.testfile import TestFile
JSON_RESULTS_FILE = "results.json"
JSON_RESULTS_FILE_SMALL = "results-small.json"
JSON_RESULTS_PREFIX = "ADD_RESULTS("
JSON_RESULTS_SUFFIX = ");"
-JSON_RESULTS_VERSION_KEY = "version"
-JSON_RESULTS_BUILD_NUMBERS = "buildNumbers"
-JSON_RESULTS_TESTS = "tests"
-JSON_RESULTS_RESULTS = "results"
-JSON_RESULTS_TIMES = "times"
-JSON_RESULTS_PASS = "P"
-JSON_RESULTS_SKIP = "X"
-JSON_RESULTS_NO_DATA = "N"
+
JSON_RESULTS_MIN_TIME = 3
JSON_RESULTS_HIERARCHICAL_VERSION = 4
JSON_RESULTS_MAX_BUILDS = 500
JSON_RESULTS_MAX_BUILDS_SMALL = 100
+BUG_KEY = "bugs"
+BUILD_NUMBERS_KEY = "buildNumbers"
+EXPECTED_KEY = "expected"
+FAILURE_MAP_KEY = "failure_map"
+FAILURES_BY_TYPE_KEY = "num_failures_by_type"
+FIXABLE_COUNTS_KEY = "fixableCounts"
+RESULTS_KEY = "results"
+TESTS_KEY = "tests"
+TIME_KEY = "time"
+TIMES_KEY = "times"
+VERSIONS_KEY = "version"
-def _add_path_to_trie(path, value, trie):
- if not "/" in path:
- trie[path] = value
- return
+AUDIO = "A"
+CRASH = "C"
+IMAGE = "I"
+IMAGE_PLUS_TEXT = "Z"
+# This is only output by gtests.
+FLAKY = "L"
+MISSING = "O"
+NO_DATA = "N"
+NOTRUN = "Y"
+PASS = "P"
+SKIP = "X"
+TEXT = "F"
+TIMEOUT = "T"
- directory, slash, rest = path.partition("/")
- if not directory in trie:
- trie[directory] = {}
- _add_path_to_trie(rest, value, trie[directory])
+AUDIO_STRING = "AUDIO"
+CRASH_STRING = "CRASH"
+IMAGE_PLUS_TEXT_STRING = "IMAGE+TEXT"
+IMAGE_STRING = "IMAGE"
+FLAKY_STRING = "FLAKY"
+MISSING_STRING = "MISSING"
+NO_DATA_STRING = "NO DATA"
+NOTRUN_STRING = "NOTRUN"
+PASS_STRING = "PASS"
+SKIP_STRING = "SKIP"
+TEXT_STRING = "TEXT"
+TIMEOUT_STRING = "TIMEOUT"
+FAILURE_TO_CHAR = {
+ AUDIO_STRING: AUDIO,
+ CRASH_STRING: CRASH,
+ IMAGE_PLUS_TEXT_STRING: IMAGE_PLUS_TEXT,
+ IMAGE_STRING: IMAGE,
+ FLAKY_STRING: FLAKY,
+ MISSING_STRING: MISSING,
+ NO_DATA_STRING: NO_DATA,
+ NOTRUN_STRING: NOTRUN,
+ PASS_STRING: PASS,
+ SKIP_STRING: SKIP,
+ TEXT_STRING: TEXT,
+ TIMEOUT_STRING: TIMEOUT,
+}
-def _trie_json_tests(tests):
- """Breaks a test name into chunks by directory and puts the test time as a value in the lowest part, e.g.
- foo/bar/baz.html: VALUE1
- foo/bar/baz1.html: VALUE2
-
- becomes
- foo: {
- bar: {
- baz.html: VALUE1,
- baz1.html: VALUE2
- }
- }
- """
- trie = {}
- for test, value in tests.iteritems():
- _add_path_to_trie(test, value, trie)
- return trie
-
+# FIXME: Use dict comprehensions once we update the server to python 2.7.
+CHAR_TO_FAILURE = dict((value, key) for key, value in FAILURE_TO_CHAR.items())
def _is_directory(subtree):
- # FIXME: Some data got corrupted and has results/times at the directory level.
- # Once the data is fixed, this should assert that the directory level does not have
- # results or times and just return "JSON_RESULTS_RESULTS not in subtree".
- if JSON_RESULTS_RESULTS not in subtree:
- return True
-
- for key in subtree:
- if key not in (JSON_RESULTS_RESULTS, JSON_RESULTS_TIMES):
- del subtree[JSON_RESULTS_RESULTS]
- del subtree[JSON_RESULTS_TIMES]
- return True
-
- return False
+ return RESULTS_KEY not in subtree
class JsonResults(object):
@classmethod
def _strip_prefix_suffix(cls, data):
- # FIXME: Stop stripping jsonp callback once we upload pure json everywhere.
if data.startswith(JSON_RESULTS_PREFIX) and data.endswith(JSON_RESULTS_SUFFIX):
return data[len(JSON_RESULTS_PREFIX):len(data) - len(JSON_RESULTS_SUFFIX)]
return data
@@ -126,19 +137,36 @@
@classmethod
def _merge_json(cls, aggregated_json, incremental_json, num_runs):
+ # We have to delete expected entries because the incremental json may not have any
+ # entry for every test in the aggregated json. But, the incremental json will have
+ # all the correct expected entries for that run.
+ cls._delete_expected_entries(aggregated_json[TESTS_KEY])
cls._merge_non_test_data(aggregated_json, incremental_json, num_runs)
- incremental_tests = incremental_json[JSON_RESULTS_TESTS]
+ incremental_tests = incremental_json[TESTS_KEY]
if incremental_tests:
- aggregated_tests = aggregated_json[JSON_RESULTS_TESTS]
+ aggregated_tests = aggregated_json[TESTS_KEY]
cls._merge_tests(aggregated_tests, incremental_tests, num_runs)
- cls._normalize_results(aggregated_tests, num_runs)
+
+ @classmethod
+ def _delete_expected_entries(cls, aggregated_json):
+ for key in aggregated_json:
+ item = aggregated_json[key]
+ if _is_directory(item):
+ cls._delete_expected_entries(item)
+ else:
+ if EXPECTED_KEY in item:
+ del item[EXPECTED_KEY]
+ if BUG_KEY in item:
+ del item[BUG_KEY]
@classmethod
def _merge_non_test_data(cls, aggregated_json, incremental_json, num_runs):
- incremental_builds = incremental_json[JSON_RESULTS_BUILD_NUMBERS]
- aggregated_builds = aggregated_json[JSON_RESULTS_BUILD_NUMBERS]
+ incremental_builds = incremental_json[BUILD_NUMBERS_KEY]
+ aggregated_builds = aggregated_json[BUILD_NUMBERS_KEY]
aggregated_build_number = int(aggregated_builds[0])
+ # FIXME: It's no longer possible to have multiple runs worth of data in the incremental_json,
+ # So we can get rid of this for-loop and the associated index.
for index in reversed(range(len(incremental_builds))):
build_number = int(incremental_builds[index])
logging.debug("Merging build %s, incremental json index: %d.", build_number, index)
@@ -151,12 +179,15 @@
for key in incremental_json.keys():
# Merge json results except "tests" properties (results, times etc).
# "tests" properties will be handled separately.
- if key == JSON_RESULTS_TESTS:
+ if key == TESTS_KEY or key == FAILURE_MAP_KEY:
continue
if key in aggregated_json:
- aggregated_json[key].insert(0, incremental_json[key][incremental_index])
- aggregated_json[key] = aggregated_json[key][:num_runs]
+ if key == FAILURES_BY_TYPE_KEY:
+ cls._merge_one_build(aggregated_json[key], incremental_json[key], incremental_index, num_runs=num_runs)
+ else:
+ aggregated_json[key].insert(0, incremental_json[key][incremental_index])
+ aggregated_json[key] = aggregated_json[key][:num_runs]
else:
aggregated_json[key] = incremental_json[key]
@@ -164,11 +195,11 @@
def _merge_tests(cls, aggregated_json, incremental_json, num_runs):
# FIXME: Some data got corrupted and has results/times at the directory level.
# Once the data is fixe, this should assert that the directory level does not have
- # results or times and just return "JSON_RESULTS_RESULTS not in subtree".
- if JSON_RESULTS_RESULTS in aggregated_json:
- del aggregated_json[JSON_RESULTS_RESULTS]
- if JSON_RESULTS_TIMES in aggregated_json:
- del aggregated_json[JSON_RESULTS_TIMES]
+ # results or times and just return "RESULTS_KEY not in subtree".
+ if RESULTS_KEY in aggregated_json:
+ del aggregated_json[RESULTS_KEY]
+ if TIMES_KEY in aggregated_json:
+ del aggregated_json[TIMES_KEY]
all_tests = set(aggregated_json.iterkeys())
if incremental_json:
@@ -184,16 +215,21 @@
cls._merge_tests(aggregated_json[test_name], incremental_sub_result, num_runs)
continue
+ aggregated_test = aggregated_json[test_name]
+
if incremental_sub_result:
- results = incremental_sub_result[JSON_RESULTS_RESULTS]
- times = incremental_sub_result[JSON_RESULTS_TIMES]
+ results = incremental_sub_result[RESULTS_KEY]
+ times = incremental_sub_result[TIMES_KEY]
+ if EXPECTED_KEY in incremental_sub_result and incremental_sub_result[EXPECTED_KEY] != PASS_STRING:
+ aggregated_test[EXPECTED_KEY] = incremental_sub_result[EXPECTED_KEY]
+ if BUG_KEY in incremental_sub_result:
+ aggregated_test[BUG_KEY] = incremental_sub_result[BUG_KEY]
else:
- results = [[1, JSON_RESULTS_NO_DATA]]
+ results = [[1, NO_DATA]]
times = [[1, 0]]
- aggregated_test = aggregated_json[test_name]
- cls._insert_item_run_length_encoded(results, aggregated_test[JSON_RESULTS_RESULTS], num_runs)
- cls._insert_item_run_length_encoded(times, aggregated_test[JSON_RESULTS_TIMES], num_runs)
+ cls._insert_item_run_length_encoded(results, aggregated_test[RESULTS_KEY], num_runs)
+ cls._insert_item_run_length_encoded(times, aggregated_test[TIMES_KEY], num_runs)
@classmethod
def _insert_item_run_length_encoded(cls, incremental_item, aggregated_item, num_runs):
@@ -209,10 +245,13 @@
for test_name in aggregated_json:
if _is_directory(aggregated_json[test_name]):
cls._normalize_results(aggregated_json[test_name], num_runs)
+ # If normalizing deletes all the children of this directory, also delete the directory.
+ if not aggregated_json[test_name]:
+ names_to_delete.append(test_name)
else:
leaf = aggregated_json[test_name]
- leaf[JSON_RESULTS_RESULTS] = cls._remove_items_over_max_number_of_builds(leaf[JSON_RESULTS_RESULTS], num_runs)
- leaf[JSON_RESULTS_TIMES] = cls._remove_items_over_max_number_of_builds(leaf[JSON_RESULTS_TIMES], num_runs)
+ leaf[RESULTS_KEY] = cls._remove_items_over_max_number_of_builds(leaf[RESULTS_KEY], num_runs)
+ leaf[TIMES_KEY] = cls._remove_items_over_max_number_of_builds(leaf[TIMES_KEY], num_runs)
if cls._should_delete_leaf(leaf):
names_to_delete.append(test_name)
@@ -221,12 +260,18 @@
@classmethod
def _should_delete_leaf(cls, leaf):
- deletable_types = set((JSON_RESULTS_PASS, JSON_RESULTS_NO_DATA, JSON_RESULTS_SKIP))
- for result in leaf[JSON_RESULTS_RESULTS]:
+ if leaf.get(EXPECTED_KEY, PASS_STRING) != PASS_STRING:
+ return False
+
+ if BUG_KEY in leaf:
+ return False
+
+ deletable_types = set((PASS, NO_DATA, NOTRUN))
+ for result in leaf[RESULTS_KEY]:
if result[1] not in deletable_types:
return False
- for time in leaf[JSON_RESULTS_TIMES]:
+ for time in leaf[TIMES_KEY]:
if time[1] >= JSON_RESULTS_MIN_TIME:
return False
@@ -245,11 +290,28 @@
return encoded_list
@classmethod
+ def _convert_gtest_json_to_aggregate_results_format(cls, json):
+ # FIXME: Change gtests over to uploading the full results format like layout-tests
+ # so we don't have to do this normalizing.
+
+ if FAILURES_BY_TYPE_KEY in json:
+ # This is already in the right format.
+ return
+
+ failures_by_type = {}
+ for fixableCount in json[FIXABLE_COUNTS_KEY]:
+ for failure_type, count in fixableCount.items():
+ failure_string = CHAR_TO_FAILURE[failure_type]
+ if failure_string not in failures_by_type:
+ failures_by_type[failure_string] = []
+ failures_by_type[failure_string].append(count)
+ json[FAILURES_BY_TYPE_KEY] = failures_by_type
+
+ @classmethod
def _check_json(cls, builder, json):
- version = json[JSON_RESULTS_VERSION_KEY]
+ version = json[VERSIONS_KEY]
if version > JSON_RESULTS_HIERARCHICAL_VERSION:
- logging.error("Results JSON version '%s' is not supported.",
- version)
+ logging.error("Results JSON version '%s' is not supported.", version)
return False
if not builder in json:
@@ -257,89 +319,184 @@
return False
results_for_builder = json[builder]
- if not JSON_RESULTS_BUILD_NUMBERS in results_for_builder:
+ if not BUILD_NUMBERS_KEY in results_for_builder:
logging.error("Missing build number in json results.")
return False
- # FIXME: Once all the bots have cycled, we can remove this code since all the results will be heirarchical.
- if version < JSON_RESULTS_HIERARCHICAL_VERSION:
- json[builder][JSON_RESULTS_TESTS] = _trie_json_tests(results_for_builder[JSON_RESULTS_TESTS])
- json[JSON_RESULTS_VERSION_KEY] = JSON_RESULTS_HIERARCHICAL_VERSION
-
+ cls._convert_gtest_json_to_aggregate_results_format(json[builder])
return True
@classmethod
- def merge(cls, builder, aggregated, incremental, num_runs, sort_keys=False):
- if not incremental:
+ def _populate_tests_from_full_results(cls, full_results, new_results):
+ if EXPECTED_KEY in full_results:
+ expected = full_results[EXPECTED_KEY]
+ if expected != PASS_STRING and expected != NOTRUN_STRING:
+ new_results[EXPECTED_KEY] = expected
+ time = int(round(full_results[TIME_KEY])) if TIME_KEY in full_results else 0
+ new_results[TIMES_KEY] = [[1, time]]
+
+ actual_failures = full_results['actual']
+ # Treat unexpected skips like NOTRUNs to avoid exploding the results JSON files
+ # when a bot exits early (e.g. due to too many crashes/timeouts).
+ if expected != SKIP_STRING and actual_failures == SKIP_STRING:
+ expected = first_actual_failure = NOTRUN_STRING
+ elif expected == NOTRUN_STRING:
+ first_actual_failure = expected
+ else:
+ # FIXME: Include the retry result as well and find a nice way to display it in the flakiness dashboard.
+ first_actual_failure = actual_failures.split(' ')[0]
+ new_results[RESULTS_KEY] = [[1, FAILURE_TO_CHAR[first_actual_failure]]]
+
+ if BUG_KEY in full_results:
+ new_results[BUG_KEY] = full_results[BUG_KEY]
+ return
+
+ for key in full_results:
+ new_results[key] = {}
+ cls._populate_tests_from_full_results(full_results[key], new_results[key])
+
+ @classmethod
+ def _convert_full_results_format_to_aggregate(cls, full_results_format):
+ num_total_tests = 0
+ num_failing_tests = 0
+ fixableCounts = {}
+ failures_by_type = full_results_format[FAILURES_BY_TYPE_KEY]
+
+ # FIXME: full_results format has "FAIL" entries, but that is no longer a possible result type.
+ if 'FAIL' in failures_by_type:
+ del failures_by_type['FAIL']
+
+ for failure_type in failures_by_type:
+ count = failures_by_type[failure_type]
+ num_total_tests += count
+ if failure_type != PASS_STRING:
+ num_failing_tests += count
+ fixableCounts[FAILURE_TO_CHAR[failure_type]] = count
+
+ tests = {}
+ cls._populate_tests_from_full_results(full_results_format[TESTS_KEY], tests)
+
+ aggregate_results_format = {
+ VERSIONS_KEY: JSON_RESULTS_HIERARCHICAL_VERSION,
+ full_results_format['builder_name']: {
+ # FIXME: Use dict comprehensions once we update the server to python 2.7.
+ FAILURES_BY_TYPE_KEY: dict((key, [value]) for key, value in failures_by_type.items()),
+ TESTS_KEY: tests,
+ # FIXME: Have the consumers of these use num_failures_by_type directly and stop include these counts.
+ 'allFixableCount': [num_total_tests],
+ 'fixableCount': [num_failing_tests],
+ FIXABLE_COUNTS_KEY: [fixableCounts],
+ # FIXME: Have all the consumers of this switch over to the full_results_format keys
+ # so we don't have to do this silly conversion.
+ BUILD_NUMBERS_KEY: [full_results_format['build_number']],
+ 'chromeRevision': [full_results_format['chromium_revision']],
+ 'blinkRevision': [full_results_format['blink_revision']],
+ 'secondsSinceEpoch': [full_results_format['seconds_since_epoch']],
+ }
+ }
+ return aggregate_results_format
+
+ @classmethod
+ def _get_incremental_json(cls, builder, incremental_string, is_full_results_format):
+ if not incremental_string:
logging.warning("Nothing to merge.")
return None
- logging.info("Loading incremental json...")
- incremental_json = cls._load_json(incremental)
+ logging.info("Loading incremental json.")
+ incremental_json = cls._load_json(incremental_string)
if not incremental_json:
return None
- logging.info("Checking incremental json...")
+ if is_full_results_format:
+ logging.info("Converting full results format to aggregate.")
+ incremental_json = cls._convert_full_results_format_to_aggregate(incremental_json)
+
+ logging.info("Checking incremental json.")
if not cls._check_json(builder, incremental_json):
return None
+ return incremental_json
- logging.info("Loading existing aggregated json...")
- aggregated_json = cls._load_json(aggregated)
+ @classmethod
+ def _get_aggregated_json(cls, builder, aggregated_string):
+ logging.info("Loading existing aggregated json.")
+ aggregated_json = cls._load_json(aggregated_string)
if not aggregated_json:
- return incremental
-
- logging.info("Checking existing aggregated json...")
- if not cls._check_json(builder, aggregated_json):
- return incremental
-
- if aggregated_json[builder][JSON_RESULTS_BUILD_NUMBERS][0] == incremental_json[builder][JSON_RESULTS_BUILD_NUMBERS][0]:
- logging.error("Incremental JSON's build number is the latest build number in the aggregated JSON: %d." % aggregated_json[builder][JSON_RESULTS_BUILD_NUMBERS][0])
- return aggregated
-
- logging.info("Merging json results...")
- try:
- cls._merge_json(aggregated_json[builder], incremental_json[builder], num_runs)
- except:
- logging.error("Failed to merge json results: %s", traceback.print_exception(*sys.exc_info()))
return None
- aggregated_json[JSON_RESULTS_VERSION_KEY] = JSON_RESULTS_HIERARCHICAL_VERSION
+ logging.info("Checking existing aggregated json.")
+ if not cls._check_json(builder, aggregated_json):
+ return None
+ return aggregated_json
+
+ @classmethod
+ def merge(cls, builder, aggregated_string, incremental_json, num_runs, sort_keys=False):
+ aggregated_json = cls._get_aggregated_json(builder, aggregated_string)
+ if not aggregated_json:
+ aggregated_json = incremental_json
+ else:
+ if aggregated_json[builder][BUILD_NUMBERS_KEY][0] == incremental_json[builder][BUILD_NUMBERS_KEY][0]:
+ logging.error("Incremental JSON's build number is the latest build number in the aggregated JSON: %d." % aggregated_json[builder][BUILD_NUMBERS_KEY][0])
+ return None
+
+ logging.info("Merging json results.")
+ try:
+ cls._merge_json(aggregated_json[builder], incremental_json[builder], num_runs)
+ except:
+ logging.error("Failed to merge json results: %s", traceback.print_exception(*sys.exc_info()))
+ return None
+
+ aggregated_json[VERSIONS_KEY] = JSON_RESULTS_HIERARCHICAL_VERSION
+ aggregated_json[builder][FAILURE_MAP_KEY] = CHAR_TO_FAILURE
+ cls._normalize_results(aggregated_json[builder][TESTS_KEY], num_runs)
return cls._generate_file_data(aggregated_json, sort_keys)
@classmethod
- def update(cls, master, builder, test_type, incremental):
- small_file_updated = cls.update_file(master, builder, test_type, incremental, JSON_RESULTS_FILE_SMALL, JSON_RESULTS_MAX_BUILDS_SMALL)
- large_file_updated = cls.update_file(master, builder, test_type, incremental, JSON_RESULTS_FILE, JSON_RESULTS_MAX_BUILDS)
+ def _get_file(cls, master, builder, test_type, filename):
+ files = TestFile.get_files(master, builder, test_type, filename)
+ if files:
+ return files[0]
+
+ file = TestFile()
+ file.master = master
+ file.builder = builder
+ file.test_type = test_type
+ file.name = filename
+ file.data = ""
+ return file
+
+ @classmethod
+ def update(cls, master, builder, test_type, incremental_string, is_full_results_format):
+ logging.info("Updating %s and %s." % (JSON_RESULTS_FILE_SMALL, JSON_RESULTS_FILE))
+ small_file = cls._get_file(master, builder, test_type, JSON_RESULTS_FILE_SMALL)
+ large_file = cls._get_file(master, builder, test_type, JSON_RESULTS_FILE)
+ return cls.update_files(builder, incremental_string, small_file, large_file, is_full_results_format)
+
+ @classmethod
+ def update_files(cls, builder, incremental_string, small_file, large_file, is_full_results_format):
+ incremental_json = cls._get_incremental_json(builder, incremental_string, is_full_results_format)
+ if not incremental_json:
+ return False
+
+ small_file_updated = cls.update_file(builder, small_file, incremental_json, JSON_RESULTS_MAX_BUILDS_SMALL)
+ if not small_file_updated:
+ logging.info("Update for %s failed." % JSON_RESULTS_FILE_SMALL)
+
+ large_file_updated = cls.update_file(builder, large_file, incremental_json, JSON_RESULTS_MAX_BUILDS)
+ if not large_file_updated:
+ logging.info("Update for %s failed." % JSON_RESULTS_FILE)
return small_file_updated and large_file_updated
@classmethod
- def update_file(cls, master, builder, test_type, incremental, filename, num_runs):
- files = TestFile.get_files(master, builder, test_type, filename)
- if files:
- file = files[0]
- new_results = cls.merge(builder, file.data, incremental, num_runs)
- else:
- # Use the incremental data if there is no aggregated file to merge.
- file = TestFile()
- file.master = master
- file.builder = builder
- file.test_type = test_type
- file.name = filename
- new_results = incremental
- logging.info("No existing json results, incremental json is saved.")
-
- if not new_results or not file.save(new_results):
- logging.info("Update failed, master: %s, builder: %s, test_type: %s, name: %s." % (master, builder, test_type, filename))
- return False
-
- return True
+ def update_file(cls, builder, file, incremental_json, num_runs):
+ new_results = cls.merge(builder, file.data, incremental_json, num_runs)
+ return new_results and file.save(new_results)
@classmethod
def _delete_results_and_times(cls, tests):
for key in tests.keys():
- if key in (JSON_RESULTS_RESULTS, JSON_RESULTS_TIMES):
+ if key in (RESULTS_KEY, TIMES_KEY):
del tests[key]
else:
cls._delete_results_and_times(tests[key])
@@ -356,7 +513,7 @@
return None
test_list_json = {}
- tests = json[builder][JSON_RESULTS_TESTS]
+ tests = json[builder][TESTS_KEY]
cls._delete_results_and_times(tests)
- test_list_json[builder] = {"tests": tests}
+ test_list_json[builder] = {TESTS_KEY: tests}
return cls._generate_file_data(test_list_json)
diff --git a/Tools/TestResultServer/model/jsonresults_unittest.py b/Tools/TestResultServer/model/jsonresults_unittest.py
index 3ddbf4c..d0e35c9 100755
--- a/Tools/TestResultServer/model/jsonresults_unittest.py
+++ b/Tools/TestResultServer/model/jsonresults_unittest.py
@@ -28,64 +28,176 @@
try:
import jsonresults
- from jsonresults import JsonResults
+ from jsonresults import *
except ImportError:
print "ERROR: Add the TestResultServer, google_appengine and yaml/lib directories to your PYTHONPATH"
raise
-from django.utils import simplejson
+# FIXME: Once we're on python 2.7, just use json directly.
+try:
+ from django.utils import simplejson
+except:
+ import json as simplejson
import unittest
+FULL_RESULT_EXAMPLE = """ADD_RESULTS({
+ "seconds_since_epoch": 1368146629,
+ "tests": {
+ "media": {
+ "encrypted-media": {
+ "encrypted-media-v2-events.html": {
+ "bugs": ["crbug.com/1234"],
+ "expected": "TIMEOUT",
+ "actual": "TIMEOUT",
+ "time": 6.0
+ },
+ "encrypted-media-v2-syntax.html": {
+ "expected": "TIMEOUT",
+ "actual": "TIMEOUT"
+ }
+ },
+ "progress-events-generated-correctly.html": {
+ "expected": "PASS FAIL IMAGE TIMEOUT CRASH MISSING",
+ "actual": "TIMEOUT",
+ "time": 6.0
+ },
+ "W3C": {
+ "audio": {
+ "src": {
+ "src_removal_does_not_trigger_loadstart.html": {
+ "expected": "PASS",
+ "actual": "PASS",
+ "time": 3.5
+ }
+ }
+ },
+ "video": {
+ "src": {
+ "src_removal_does_not_trigger_loadstart.html": {
+ "expected": "PASS",
+ "actual": "PASS",
+ "time": 1.1
+ },
+ "notrun.html": {
+ "expected": "NOTRUN",
+ "actual": "SKIP",
+ "time": 1.1
+ }
+ }
+ }
+ },
+ "unexpected-skip.html": {
+ "expected": "PASS",
+ "actual": "SKIP"
+ },
+ "media-document-audio-repaint.html": {
+ "expected": "IMAGE",
+ "image_diff_percent": 0,
+ "actual": "IMAGE",
+ "time": 0.1
+ }
+ }
+ },
+ "skipped": 2,
+ "num_regressions": 0,
+ "build_number": "3",
+ "interrupted": false,
+ "num_missing": 0,
+ "uses_expectations_file": true,
+ "layout_tests_dir": "\/tmp\/cr\/src\/third_party\/WebKit\/LayoutTests",
+ "version": 3,
+ "builder_name": "Webkit",
+ "num_passes": 10,
+ "pixel_tests_enabled": true,
+ "blink_revision": "1234",
+ "has_pretty_patch": true,
+ "fixable": 25,
+ "num_flaky": 0,
+ "num_failures_by_type": {
+ "CRASH": 3,
+ "MISSING": 0,
+ "TEXT": 3,
+ "IMAGE": 1,
+ "PASS": 10,
+ "SKIP": 2,
+ "TIMEOUT": 16,
+ "IMAGE+TEXT": 0,
+ "FAIL": 0,
+ "AUDIO": 0
+ },
+ "has_wdiff": true,
+ "chromium_revision": "5678"
+});"""
-JSON_RESULTS_TEMPLATE = (
- '{"Webkit":{'
+JSON_RESULTS_OLD_TEMPLATE = (
+ '{"[BUILDER_NAME]":{'
'"allFixableCount":[[TESTDATA_COUNT]],'
'"blinkRevision":[[TESTDATA_WEBKITREVISION]],'
'"buildNumbers":[[TESTDATA_BUILDNUMBERS]],'
'"chromeRevision":[[TESTDATA_CHROMEREVISION]],'
- '"deferredCounts":[[TESTDATA_COUNTS]],'
+ '"failure_map": %s,'
'"fixableCount":[[TESTDATA_COUNT]],'
'"fixableCounts":[[TESTDATA_COUNTS]],'
'"secondsSinceEpoch":[[TESTDATA_TIMES]],'
- '"tests":{[TESTDATA_TESTS]},'
- '"wontfixCounts":[[TESTDATA_COUNTS]]'
+ '"tests":{[TESTDATA_TESTS]}'
'},'
'"version":[VERSION]'
- '}')
+ '}') % simplejson.dumps(CHAR_TO_FAILURE)
-JSON_RESULTS_COUNTS_TEMPLATE = (
- '{'
- '"C":[TESTDATA],'
- '"F":[TESTDATA],'
- '"I":[TESTDATA],'
- '"O":[TESTDATA],'
- '"P":[TESTDATA],'
- '"T":[TESTDATA],'
- '"X":[TESTDATA],'
- '"Z":[TESTDATA]}')
+JSON_RESULTS_COUNTS = '{"' + '":[[TESTDATA_COUNT]],"'.join([char for char in CHAR_TO_FAILURE.values()]) + '":[[TESTDATA_COUNT]]}'
-JSON_RESULTS_DIRECTORY_TEMPLATE = '[[TESTDATA_DIRECTORY]]:{[TESTDATA_DATA]}'
+JSON_RESULTS_TEMPLATE = (
+ '{"[BUILDER_NAME]":{'
+ '"allFixableCount":[[TESTDATA_COUNT]],'
+ '"blinkRevision":[[TESTDATA_WEBKITREVISION]],'
+ '"buildNumbers":[[TESTDATA_BUILDNUMBERS]],'
+ '"chromeRevision":[[TESTDATA_CHROMEREVISION]],'
+ '"failure_map": %s,'
+ '"fixableCount":[[TESTDATA_COUNT]],'
+ '"fixableCounts":[[TESTDATA_COUNTS]],'
+ '"num_failures_by_type":%s,'
+ '"secondsSinceEpoch":[[TESTDATA_TIMES]],'
+ '"tests":{[TESTDATA_TESTS]}'
+ '},'
+ '"version":[VERSION]'
+ '}') % (simplejson.dumps(CHAR_TO_FAILURE), JSON_RESULTS_COUNTS)
-JSON_RESULTS_TESTS_TEMPLATE = (
- '[[TESTDATA_TEST_NAME]]:{'
- '"results":[[TESTDATA_TEST_RESULTS]],'
- '"times":[[TESTDATA_TEST_TIMES]]}')
+JSON_RESULTS_COUNTS_TEMPLATE = '{"' + '":[TESTDATA],"'.join([char for char in CHAR_TO_FAILURE]) + '":[TESTDATA]}'
-JSON_RESULTS_TEST_LIST_TEMPLATE = (
- '{"Webkit":{"tests":{[TESTDATA_TESTS]}}}')
+JSON_RESULTS_TEST_LIST_TEMPLATE = '{"Webkit":{"tests":{[TESTDATA_TESTS]}}}'
+
+
+class MockFile(object):
+ def __init__(self, name='results.json', data=''):
+ self.master = 'MockMasterName'
+ self.builder = 'MockBuilderName'
+ self.test_type = 'MockTestType'
+ self.name = name
+ self.data = data
+
+ def save(self, data):
+ self.data = data
+ return True
class JsonResultsTest(unittest.TestCase):
def setUp(self):
self._builder = "Webkit"
+ # Use this to get better error messages than just string compare gives.
+ def assert_json_equal(self, a, b):
+ self.maxDiff = None
+ a = simplejson.loads(a) if isinstance(a, str) else a
+ b = simplejson.loads(b) if isinstance(b, str) else b
+ self.assertEqual(a, b)
+
def test_strip_prefix_suffix(self):
json = "['contents']"
self.assertEqual(JsonResults._strip_prefix_suffix("ADD_RESULTS(" + json + ");"), json)
self.assertEqual(JsonResults._strip_prefix_suffix(json), json)
- def _make_test_json(self, test_data):
+ def _make_test_json(self, test_data, json_string=JSON_RESULTS_TEMPLATE, builder_name="Webkit"):
if not test_data:
return ""
@@ -94,8 +206,6 @@
if not builds or not tests:
return ""
- json = JSON_RESULTS_TEMPLATE
-
counts = []
build_numbers = []
webkit_revision = []
@@ -108,26 +218,27 @@
chrome_revision.append("3000%s" % build)
times.append("100000%s000" % build)
- json = json.replace("[TESTDATA_COUNTS]", ",".join(counts))
- json = json.replace("[TESTDATA_COUNT]", ",".join(builds))
- json = json.replace("[TESTDATA_BUILDNUMBERS]", ",".join(build_numbers))
- json = json.replace("[TESTDATA_WEBKITREVISION]", ",".join(webkit_revision))
- json = json.replace("[TESTDATA_CHROMEREVISION]", ",".join(chrome_revision))
- json = json.replace("[TESTDATA_TIMES]", ",".join(times))
+ json_string = json_string.replace("[BUILDER_NAME]", builder_name)
+ json_string = json_string.replace("[TESTDATA_COUNTS]", ",".join(counts))
+ json_string = json_string.replace("[TESTDATA_COUNT]", ",".join(builds))
+ json_string = json_string.replace("[TESTDATA_BUILDNUMBERS]", ",".join(build_numbers))
+ json_string = json_string.replace("[TESTDATA_WEBKITREVISION]", ",".join(webkit_revision))
+ json_string = json_string.replace("[TESTDATA_CHROMEREVISION]", ",".join(chrome_revision))
+ json_string = json_string.replace("[TESTDATA_TIMES]", ",".join(times))
version = str(test_data["version"]) if "version" in test_data else "4"
- json = json.replace("[VERSION]", version)
- json = json.replace("{[TESTDATA_TESTS]}", simplejson.dumps(tests, separators=(',', ':'), sort_keys=True))
- return json
+ json_string = json_string.replace("[VERSION]", version)
+ json_string = json_string.replace("{[TESTDATA_TESTS]}", simplejson.dumps(tests, separators=(',', ':'), sort_keys=True))
+ return json_string
def _test_merge(self, aggregated_data, incremental_data, expected_data, max_builds=jsonresults.JSON_RESULTS_MAX_BUILDS):
aggregated_results = self._make_test_json(aggregated_data)
- incremental_results = self._make_test_json(incremental_data)
- merged_results = JsonResults.merge(self._builder, aggregated_results, incremental_results, max_builds, sort_keys=True)
+ incremental_json = JsonResults._get_incremental_json(self._builder, self._make_test_json(incremental_data), is_full_results_format=False)
+ merged_results = JsonResults.merge(self._builder, aggregated_results, incremental_json, num_runs=max_builds, sort_keys=True)
if expected_data:
expected_results = self._make_test_json(expected_data)
- self.assertEqual(merged_results, expected_results)
+ self.assert_json_equal(merged_results, expected_results)
else:
self.assertFalse(merged_results)
@@ -135,36 +246,180 @@
input_results = self._make_test_json(input_data)
expected_results = JSON_RESULTS_TEST_LIST_TEMPLATE.replace("{[TESTDATA_TESTS]}", simplejson.dumps(expected_data, separators=(',', ':')))
actual_results = JsonResults.get_test_list(self._builder, input_results)
- self.assertEqual(actual_results, expected_results)
+ self.assert_json_equal(actual_results, expected_results)
- def test_merge_null_incremental_results(self):
- # Empty incremental results json.
- # Nothing to merge.
- self._test_merge(
- # Aggregated results
- {"builds": ["2", "1"],
- "tests": {"001.html": {
- "results": [[200,"F"]],
- "times": [[200,0]]}}},
- # Incremental results
- None,
- # Expect no merge happens.
- None)
+ def test_update_files_empty_aggregate_data(self):
+ small_file = MockFile(name='results-small.json')
+ large_file = MockFile(name='results.json')
- def test_merge_empty_incremental_results(self):
- # No actual incremental test results (only prefix and suffix) to merge.
- # Nothing to merge.
- self._test_merge(
- # Aggregated results
- {"builds": ["2", "1"],
- "tests": {"001.html": {
- "results": [[200,"F"]],
- "times": [[200,0]]}}},
- # Incremental results
- {"builds": [],
- "tests": {}},
- # Expected no merge happens.
- None)
+ incremental_data = {
+ "builds": ["2", "1"],
+ "tests": {
+ "001.html": {
+ "results": [[200, TEXT]],
+ "times": [[200, 0]],
+ }
+ }
+ }
+ incremental_string = self._make_test_json(incremental_data, builder_name=small_file.builder)
+
+ self.assertTrue(JsonResults.update_files(small_file.builder, incremental_string, small_file, large_file, is_full_results_format=False))
+ self.assert_json_equal(small_file.data, incremental_string)
+ self.assert_json_equal(large_file.data, incremental_string)
+
+ def test_update_files_null_incremental_data(self):
+ small_file = MockFile(name='results-small.json')
+ large_file = MockFile(name='results.json')
+
+ aggregated_data = {
+ "builds": ["2", "1"],
+ "tests": {
+ "001.html": {
+ "results": [[200, TEXT]],
+ "times": [[200, 0]],
+ }
+ }
+ }
+ aggregated_string = self._make_test_json(aggregated_data, builder_name=small_file.builder)
+
+ small_file.data = large_file.data = aggregated_string
+
+ incremental_string = ""
+
+ self.assertFalse(JsonResults.update_files(small_file.builder, incremental_string, small_file, large_file, is_full_results_format=False))
+ self.assert_json_equal(small_file.data, aggregated_string)
+ self.assert_json_equal(large_file.data, aggregated_string)
+
+ def test_update_files_empty_incremental_data(self):
+ small_file = MockFile(name='results-small.json')
+ large_file = MockFile(name='results.json')
+
+ aggregated_data = {
+ "builds": ["2", "1"],
+ "tests": {
+ "001.html": {
+ "results": [[200, TEXT]],
+ "times": [[200, 0]],
+ }
+ }
+ }
+ aggregated_string = self._make_test_json(aggregated_data, builder_name=small_file.builder)
+
+ small_file.data = large_file.data = aggregated_string
+
+ incremental_data = {
+ "builds": [],
+ "tests": {}
+ }
+ incremental_string = self._make_test_json(incremental_data, builder_name=small_file.builder)
+
+ self.assertFalse(JsonResults.update_files(small_file.builder, incremental_string, small_file, large_file, is_full_results_format=False))
+ self.assert_json_equal(small_file.data, aggregated_string)
+ self.assert_json_equal(large_file.data, aggregated_string)
+
+ def test_merge_with_empty_aggregated_results(self):
+ incremental_data = {
+ "builds": ["2", "1"],
+ "tests": {
+ "001.html": {
+ "results": [[200, TEXT]],
+ "times": [[200, 0]],
+ }
+ }
+ }
+ incremental_results = JsonResults._get_incremental_json(self._builder, self._make_test_json(incremental_data), is_full_results_format=False)
+ aggregated_results = ""
+ merged_results = JsonResults.merge(self._builder, aggregated_results, incremental_results, num_runs=jsonresults.JSON_RESULTS_MAX_BUILDS, sort_keys=True)
+ self.assert_json_equal(merged_results, incremental_results)
+
+ def test_failures_by_type_added(self):
+ aggregated_results = self._make_test_json({
+ "builds": ["2", "1"],
+ "tests": {
+ "001.html": {
+ "results": [[100, TEXT]],
+ "times": [[100, 0]],
+ }
+ }
+ }, json_string=JSON_RESULTS_OLD_TEMPLATE)
+ incremental_results = self._make_test_json({
+ "builds": ["3"],
+ "tests": {
+ "001.html": {
+ "results": [[1, TEXT]],
+ "times": [[1, 0]],
+ }
+ }
+ }, json_string=JSON_RESULTS_OLD_TEMPLATE)
+ incremental_json = JsonResults._get_incremental_json(self._builder, incremental_results, is_full_results_format=False)
+ merged_results = JsonResults.merge(self._builder, aggregated_results, incremental_json, num_runs=200, sort_keys=True)
+ self.assert_json_equal(merged_results, self._make_test_json({
+ "builds": ["3", "2", "1"],
+ "tests": {
+ "001.html": {
+ "results": [[101, TEXT]],
+ "times": [[101, 0]],
+ }
+ }
+ }))
+
+ def test_merge_full_results_format(self):
+ expected_incremental_results = {
+ "Webkit": {
+ "allFixableCount": [35],
+ "blinkRevision": ["1234"],
+ "buildNumbers": ["3"],
+ "chromeRevision": ["5678"],
+ "failure_map": CHAR_TO_FAILURE,
+ "fixableCount": [25],
+ "fixableCounts": [{AUDIO: 0, CRASH: 3, TEXT: 3, IMAGE: 1, MISSING: 0, PASS: 10, TIMEOUT: 16, SKIP: 2, IMAGE_PLUS_TEXT: 0}],
+ "num_failures_by_type": {"AUDIO": [0], "CRASH": [3], "IMAGE": [1], "IMAGE+TEXT": [0], "MISSING": [0], "PASS": [10], "SKIP": [2], "TEXT": [3], "TIMEOUT": [16]},
+ "secondsSinceEpoch": [1368146629],
+ "tests": {
+ "media": {
+ "W3C": {
+ "audio": {
+ "src": {
+ "src_removal_does_not_trigger_loadstart.html": {
+ "results": [[1, PASS]],
+ "times": [[1, 4]],
+ }
+ }
+ }
+ },
+ "encrypted-media": {
+ "encrypted-media-v2-events.html": {
+ "bugs": ["crbug.com/1234"],
+ "expected": "TIMEOUT",
+ "results": [[1, TIMEOUT]],
+ "times": [[1, 6]],
+ },
+ "encrypted-media-v2-syntax.html": {
+ "expected": "TIMEOUT",
+ "results": [[1, TIMEOUT]],
+ "times": [[1, 0]],
+ }
+ },
+ "media-document-audio-repaint.html": {
+ "expected": "IMAGE",
+ "results": [[1, IMAGE]],
+ "times": [[1, 0]],
+ },
+ "progress-events-generated-correctly.html": {
+ "expected": "PASS FAIL IMAGE TIMEOUT CRASH MISSING",
+ "results": [[1, TIMEOUT]],
+ "times": [[1, 6]],
+ }
+ }
+ }
+ },
+ "version": 4
+ }
+
+ aggregated_results = ""
+ incremental_json = JsonResults._get_incremental_json(self._builder, FULL_RESULT_EXAMPLE, is_full_results_format=True)
+ merged_results = JsonResults.merge("Webkit", aggregated_results, incremental_json, num_runs=jsonresults.JSON_RESULTS_MAX_BUILDS, sort_keys=True)
+ self.assert_json_equal(merged_results, expected_incremental_results)
def test_merge_empty_aggregated_results(self):
# No existing aggregated results.
@@ -173,56 +428,52 @@
# Aggregated results
None,
# Incremental results
-
{"builds": ["2", "1"],
"tests": {"001.html": {
- "results": [[200,"F"]],
- "times": [[200,0]]}}},
+ "results": [[200, TEXT]],
+ "times": [[200, 0]]}}},
# Expected result
{"builds": ["2", "1"],
"tests": {"001.html": {
- "results": [[200,"F"]],
- "times": [[200,0]]}}})
+ "results": [[200, TEXT]],
+ "times": [[200, 0]]}}})
def test_merge_duplicate_build_number(self):
self._test_merge(
# Aggregated results
{"builds": ["2", "1"],
"tests": {"001.html": {
- "results": [[100, "F"]],
+ "results": [[100, TEXT]],
"times": [[100, 0]]}}},
# Incremental results
{"builds": ["2"],
"tests": {"001.html": {
- "results": [[1, "F"]],
+ "results": [[1, TEXT]],
"times": [[1, 0]]}}},
# Expected results
- {"builds": ["2", "1"],
- "tests": {"001.html": {
- "results": [[100, "F"]],
- "times": [[100, 0]]}}})
+ None)
def test_merge_incremental_single_test_single_run_same_result(self):
# Incremental results has the latest build and same test results for
# that run.
# Insert the incremental results at the first place and sum number
- # of runs for "F" (200 + 1) to get merged results.
+ # of runs for TEXT (200 + 1) to get merged results.
self._test_merge(
# Aggregated results
{"builds": ["2", "1"],
"tests": {"001.html": {
- "results": [[200,"F"]],
- "times": [[200,0]]}}},
+ "results": [[200, TEXT]],
+ "times": [[200, 0]]}}},
# Incremental results
{"builds": ["3"],
"tests": {"001.html": {
- "results": [[1,"F"]],
- "times": [[1,0]]}}},
+ "results": [[1, TEXT]],
+ "times": [[1, 0]]}}},
# Expected results
{"builds": ["3", "2", "1"],
"tests": {"001.html": {
- "results": [[201,"F"]],
- "times": [[201,0]]}}})
+ "results": [[201, TEXT]],
+ "times": [[201, 0]]}}})
def test_merge_single_test_single_run_different_result(self):
# Incremental results has the latest build but different test results
@@ -232,18 +483,18 @@
# Aggregated results
{"builds": ["2", "1"],
"tests": {"001.html": {
- "results": [[200,"F"]],
- "times": [[200,0]]}}},
+ "results": [[200, TEXT]],
+ "times": [[200, 0]]}}},
# Incremental results
{"builds": ["3"],
"tests": {"001.html": {
- "results": [[1, "I"]],
- "times": [[1,1]]}}},
+ "results": [[1, IMAGE]],
+ "times": [[1, 1]]}}},
# Expected results
{"builds": ["3", "2", "1"],
"tests": {"001.html": {
- "results": [[1,"I"],[200,"F"]],
- "times": [[1,1],[200,0]]}}})
+ "results": [[1, IMAGE], [200, TEXT]],
+ "times": [[1, 1], [200, 0]]}}})
def test_merge_single_test_single_run_result_changed(self):
# Incremental results has the latest build but results which differ from
@@ -252,18 +503,18 @@
# Aggregated results
{"builds": ["2", "1"],
"tests": {"001.html": {
- "results": [[200,"F"],[10,"I"]],
- "times": [[200,0],[10,1]]}}},
+ "results": [[200, TEXT], [10, IMAGE]],
+ "times": [[200, 0], [10, 1]]}}},
# Incremental results
{"builds": ["3"],
"tests": {"001.html": {
- "results": [[1,"I"]],
- "times": [[1,1]]}}},
+ "results": [[1, IMAGE]],
+ "times": [[1, 1]]}}},
# Expected results
{"builds": ["3", "2", "1"],
"tests": {"001.html": {
- "results": [[1,"I"],[200,"F"],[10,"I"]],
- "times": [[1,1],[200,0],[10,1]]}}})
+ "results": [[1, IMAGE], [200, TEXT], [10, IMAGE]],
+ "times": [[1, 1], [200, 0], [10, 1]]}}})
def test_merge_multiple_tests_single_run(self):
# All tests have incremental updates.
@@ -271,96 +522,96 @@
# Aggregated results
{"builds": ["2", "1"],
"tests": {"001.html": {
- "results": [[200,"F"]],
- "times": [[200,0]]},
+ "results": [[200, TEXT]],
+ "times": [[200, 0]]},
"002.html": {
- "results": [[100,"I"]],
- "times": [[100,1]]}}},
+ "results": [[100, IMAGE]],
+ "times": [[100, 1]]}}},
# Incremental results
{"builds": ["3"],
"tests": {"001.html": {
- "results": [[1,"F"]],
- "times": [[1,0]]},
+ "results": [[1, TEXT]],
+ "times": [[1, 0]]},
"002.html": {
- "results": [[1,"I"]],
- "times": [[1,1]]}}},
+ "results": [[1, IMAGE]],
+ "times": [[1, 1]]}}},
# Expected results
{"builds": ["3", "2", "1"],
"tests": {"001.html": {
- "results": [[201,"F"]],
- "times": [[201,0]]},
+ "results": [[201, TEXT]],
+ "times": [[201, 0]]},
"002.html": {
- "results": [[101,"I"]],
- "times": [[101,1]]}}})
+ "results": [[101, IMAGE]],
+ "times": [[101, 1]]}}})
def test_merge_multiple_tests_single_run_one_no_result(self):
self._test_merge(
# Aggregated results
{"builds": ["2", "1"],
"tests": {"001.html": {
- "results": [[200,"F"]],
- "times": [[200,0]]},
+ "results": [[200, TEXT]],
+ "times": [[200, 0]]},
"002.html": {
- "results": [[100,"I"]],
- "times": [[100,1]]}}},
+ "results": [[100, IMAGE]],
+ "times": [[100, 1]]}}},
# Incremental results
{"builds": ["3"],
"tests": {"002.html": {
- "results": [[1,"I"]],
- "times": [[1,1]]}}},
+ "results": [[1, IMAGE]],
+ "times": [[1, 1]]}}},
# Expected results
{"builds": ["3", "2", "1"],
"tests": {"001.html": {
- "results": [[1,"N"],[200,"F"]],
- "times": [[201,0]]},
+ "results": [[1, NO_DATA], [200, TEXT]],
+ "times": [[201, 0]]},
"002.html": {
- "results": [[101,"I"]],
- "times": [[101,1]]}}})
+ "results": [[101, IMAGE]],
+ "times": [[101, 1]]}}})
def test_merge_single_test_multiple_runs(self):
self._test_merge(
# Aggregated results
{"builds": ["2", "1"],
"tests": {"001.html": {
- "results": [[200,"F"]],
- "times": [[200,0]]}}},
+ "results": [[200, TEXT]],
+ "times": [[200, 0]]}}},
# Incremental results
{"builds": ["4", "3"],
"tests": {"001.html": {
- "results": [[2, "I"]],
- "times": [[2,2]]}}},
+ "results": [[2, IMAGE]],
+ "times": [[2, 2]]}}},
# Expected results
{"builds": ["4", "3", "2", "1"],
"tests": {"001.html": {
- "results": [[2,"I"],[200,"F"]],
- "times": [[2,2],[200,0]]}}})
+ "results": [[2, IMAGE], [200, TEXT]],
+ "times": [[2, 2], [200, 0]]}}})
def test_merge_multiple_tests_multiple_runs(self):
self._test_merge(
# Aggregated results
{"builds": ["2", "1"],
"tests": {"001.html": {
- "results": [[200,"F"]],
- "times": [[200,0]]},
+ "results": [[200, TEXT]],
+ "times": [[200, 0]]},
"002.html": {
- "results": [[10,"Z"]],
- "times": [[10,0]]}}},
+ "results": [[10, IMAGE_PLUS_TEXT]],
+ "times": [[10, 0]]}}},
# Incremental results
{"builds": ["4", "3"],
"tests": {"001.html": {
- "results": [[2, "I"]],
- "times": [[2,2]]},
+ "results": [[2, IMAGE]],
+ "times": [[2, 2]]},
"002.html": {
- "results": [[1,"C"]],
- "times": [[1,1]]}}},
+ "results": [[1, CRASH]],
+ "times": [[1, 1]]}}},
# Expected results
{"builds": ["4", "3", "2", "1"],
"tests": {"001.html": {
- "results": [[2,"I"],[200,"F"]],
- "times": [[2,2],[200,0]]},
+ "results": [[2, IMAGE], [200, TEXT]],
+ "times": [[2, 2], [200, 0]]},
"002.html": {
- "results": [[1,"C"],[10,"Z"]],
- "times": [[1,1],[10,0]]}}})
+ "results": [[1, CRASH], [10, IMAGE_PLUS_TEXT]],
+ "times": [[1, 1], [10, 0]]}}})
def test_merge_incremental_result_older_build(self):
# Test the build in incremental results is older than the most recent
@@ -369,18 +620,18 @@
# Aggregated results
{"builds": ["3", "1"],
"tests": {"001.html": {
- "results": [[5,"F"]],
- "times": [[5,0]]}}},
+ "results": [[5, TEXT]],
+ "times": [[5, 0]]}}},
# Incremental results
{"builds": ["2"],
"tests": {"001.html": {
- "results": [[1, "F"]],
- "times": [[1,0]]}}},
+ "results": [[1, TEXT]],
+ "times": [[1, 0]]}}},
# Expected no merge happens.
{"builds": ["2", "3", "1"],
"tests": {"001.html": {
- "results": [[6,"F"]],
- "times": [[6,0]]}}})
+ "results": [[6, TEXT]],
+ "times": [[6, 0]]}}})
def test_merge_incremental_result_same_build(self):
# Test the build in incremental results is same as the build in
@@ -389,111 +640,211 @@
# Aggregated results
{"builds": ["2", "1"],
"tests": {"001.html": {
- "results": [[5,"F"]],
- "times": [[5,0]]}}},
+ "results": [[5, TEXT]],
+ "times": [[5, 0]]}}},
# Incremental results
{"builds": ["3", "2"],
"tests": {"001.html": {
- "results": [[2, "F"]],
- "times": [[2,0]]}}},
+ "results": [[2, TEXT]],
+ "times": [[2, 0]]}}},
# Expected no merge happens.
{"builds": ["3", "2", "2", "1"],
"tests": {"001.html": {
- "results": [[7,"F"]],
- "times": [[7,0]]}}})
+ "results": [[7, TEXT]],
+ "times": [[7, 0]]}}})
def test_merge_remove_new_test(self):
self._test_merge(
# Aggregated results
{"builds": ["2", "1"],
"tests": {"001.html": {
- "results": [[199, "F"]],
+ "results": [[199, TEXT]],
"times": [[199, 0]]},
}},
# Incremental results
{"builds": ["3"],
"tests": {"001.html": {
- "results": [[1, "F"]],
+ "results": [[1, TEXT]],
"times": [[1, 0]]},
"002.html": {
- "results": [[1, "P"]],
+ "results": [[1, PASS]],
+ "times": [[1, 0]]},
+ "notrun.html": {
+ "results": [[1, NOTRUN]],
"times": [[1, 0]]},
"003.html": {
- "results": [[1, "N"]],
+ "results": [[1, NO_DATA]],
"times": [[1, 0]]},
- "004.html": {
- "results": [[1, "X"]],
- "times": [[1, 0]]},
- }},
+ }},
# Expected results
{"builds": ["3", "2", "1"],
"tests": {"001.html": {
- "results": [[200, "F"]],
+ "results": [[200, TEXT]],
"times": [[200, 0]]},
}},
max_builds=200)
-
def test_merge_remove_test(self):
self._test_merge(
# Aggregated results
- {"builds": ["2", "1"],
- "tests": {"001.html": {
- "results": [[200,"P"]],
- "times": [[200,0]]},
- "002.html": {
- "results": [[10,"F"]],
- "times": [[10,0]]},
- "003.html": {
- "results": [[190, 'X'], [9, 'N'], [1,"F"]],
- "times": [[200,0]]},
- }},
+ {
+ "builds": ["2", "1"],
+ "tests": {
+ "directory": {
+ "directory": {
+ "001.html": {
+ "results": [[200, PASS]],
+ "times": [[200, 0]]
+ }
+ }
+ },
+ "002.html": {
+ "results": [[10, TEXT]],
+ "times": [[10, 0]]
+ },
+ "003.html": {
+ "results": [[190, PASS], [9, NO_DATA], [1, TEXT]],
+ "times": [[200, 0]]
+ },
+ }
+ },
# Incremental results
- {"builds": ["3"],
- "tests": {"001.html": {
- "results": [[1,"P"]],
- "times": [[1,0]]},
- "002.html": {
- "results": [[1,"P"]],
- "times": [[1,0]]},
- "003.html": {
- "results": [[1,"P"]],
- "times": [[1,0]]},
- }},
+ {
+ "builds": ["3"],
+ "tests": {
+ "directory": {
+ "directory": {
+ "001.html": {
+ "results": [[1, PASS]],
+ "times": [[1, 0]]
+ }
+ }
+ },
+ "002.html": {
+ "results": [[1, PASS]],
+ "times": [[1, 0]]
+ },
+ "003.html": {
+ "results": [[1, PASS]],
+ "times": [[1, 0]]
+ },
+ }
+ },
# Expected results
- {"builds": ["3", "2", "1"],
- "tests": {"002.html": {
- "results": [[1,"P"],[10,"F"]],
- "times": [[11,0]]}}},
+ {
+ "builds": ["3", "2", "1"],
+ "tests": {
+ "002.html": {
+ "results": [[1, PASS], [10, TEXT]],
+ "times": [[11, 0]]
+ }
+ }
+ },
max_builds=200)
+ def test_merge_updates_expected(self):
+ self._test_merge(
+ # Aggregated results
+ {
+ "builds": ["2", "1"],
+ "tests": {
+ "directory": {
+ "directory": {
+ "001.html": {
+ "expected": "FAIL",
+ "results": [[200, PASS]],
+ "times": [[200, 0]]
+ }
+ }
+ },
+ "002.html": {
+ "bugs": ["crbug.com/1234"],
+ "expected": "FAIL",
+ "results": [[10, TEXT]],
+ "times": [[10, 0]]
+ },
+ "003.html": {
+ "expected": "FAIL",
+ "results": [[190, PASS], [9, NO_DATA], [1, TEXT]],
+ "times": [[200, 0]]
+ },
+ "004.html": {
+ "results": [[199, PASS], [1, TEXT]],
+ "times": [[200, 0]]
+ },
+ }
+ },
+ # Incremental results
+ {
+ "builds": ["3"],
+ "tests": {
+ "002.html": {
+ "expected": "PASS",
+ "results": [[1, PASS]],
+ "times": [[1, 0]]
+ },
+ "003.html": {
+ "expected": "TIMEOUT",
+ "results": [[1, PASS]],
+ "times": [[1, 0]]
+ },
+ "004.html": {
+ "bugs": ["crbug.com/1234"],
+ "results": [[1, PASS]],
+ "times": [[1, 0]]
+ },
+ }
+ },
+ # Expected results
+ {
+ "builds": ["3", "2", "1"],
+ "tests": {
+ "002.html": {
+ "results": [[1, PASS], [10, TEXT]],
+ "times": [[11, 0]]
+ },
+ "003.html": {
+ "expected": "TIMEOUT",
+ "results": [[191, PASS], [9, NO_DATA]],
+ "times": [[200, 0]]
+ },
+ "004.html": {
+ "bugs": ["crbug.com/1234"],
+ "results": [[200, PASS]],
+ "times": [[200, 0]]
+ },
+ }
+ },
+ max_builds=200)
+
+
def test_merge_keep_test_with_all_pass_but_slow_time(self):
# Do not remove test where all run pass but max running time >= 5 seconds
self._test_merge(
# Aggregated results
{"builds": ["2", "1"],
"tests": {"001.html": {
- "results": [[200,"P"]],
- "times": [[200,jsonresults.JSON_RESULTS_MIN_TIME]]},
+ "results": [[200, PASS]],
+ "times": [[200, jsonresults.JSON_RESULTS_MIN_TIME]]},
"002.html": {
- "results": [[10,"F"]],
- "times": [[10,0]]}}},
+ "results": [[10, TEXT]],
+ "times": [[10, 0]]}}},
# Incremental results
{"builds": ["3"],
"tests": {"001.html": {
- "results": [[1,"P"]],
- "times": [[1,1]]},
+ "results": [[1, PASS]],
+ "times": [[1, 1]]},
"002.html": {
- "results": [[1,"P"]],
- "times": [[1,0]]}}},
+ "results": [[1, PASS]],
+ "times": [[1, 0]]}}},
# Expected results
{"builds": ["3", "2", "1"],
"tests": {"001.html": {
- "results": [[201,"P"]],
- "times": [[1,1],[200,jsonresults.JSON_RESULTS_MIN_TIME]]},
+ "results": [[201, PASS]],
+ "times": [[1, 1], [200, jsonresults.JSON_RESULTS_MIN_TIME]]},
"002.html": {
- "results": [[1,"P"],[10,"F"]],
- "times": [[11,0]]}}})
+ "results": [[1, PASS], [10, TEXT]],
+ "times": [[11, 0]]}}})
def test_merge_prune_extra_results(self):
# Remove items from test results and times that exceed the max number
@@ -503,18 +854,18 @@
# Aggregated results
{"builds": ["2", "1"],
"tests": {"001.html": {
- "results": [[max_builds,"F"],[1,"I"]],
- "times": [[max_builds,0],[1,1]]}}},
+ "results": [[max_builds, TEXT], [1, IMAGE]],
+ "times": [[max_builds, 0], [1, 1]]}}},
# Incremental results
{"builds": ["3"],
"tests": {"001.html": {
- "results": [[1,"T"]],
- "times": [[1,1]]}}},
+ "results": [[1, TIMEOUT]],
+ "times": [[1, 1]]}}},
# Expected results
{"builds": ["3", "2", "1"],
"tests": {"001.html": {
- "results": [[1,"T"],[max_builds,"F"]],
- "times": [[1,1],[max_builds,0]]}}})
+ "results": [[1, TIMEOUT], [max_builds, TEXT]],
+ "times": [[1, 1], [max_builds, 0]]}}})
def test_merge_prune_extra_results_small(self):
# Remove items from test results and times that exceed the max number
@@ -524,18 +875,18 @@
# Aggregated results
{"builds": ["2", "1"],
"tests": {"001.html": {
- "results": [[max_builds,"F"],[1,"I"]],
- "times": [[max_builds,0],[1,1]]}}},
+ "results": [[max_builds, TEXT], [1, IMAGE]],
+ "times": [[max_builds, 0], [1, 1]]}}},
# Incremental results
{"builds": ["3"],
"tests": {"001.html": {
- "results": [[1,"T"]],
- "times": [[1,1]]}}},
+ "results": [[1, TIMEOUT]],
+ "times": [[1, 1]]}}},
# Expected results
{"builds": ["3", "2", "1"],
"tests": {"001.html": {
- "results": [[1,"T"],[max_builds,"F"]],
- "times": [[1,1],[max_builds,0]]}}},
+ "results": [[1, TIMEOUT], [max_builds, TEXT]],
+ "times": [[1, 1], [max_builds, 0]]}}},
int(max_builds))
def test_merge_prune_extra_results_with_new_result_of_same_type(self):
@@ -546,93 +897,67 @@
# Aggregated results
{"builds": ["2", "1"],
"tests": {"001.html": {
- "results": [[max_builds,"F"],[1,"N"]],
- "times": [[max_builds,0],[1,1]]}}},
+ "results": [[max_builds, TEXT], [1, NO_DATA]],
+ "times": [[max_builds, 0], [1, 1]]}}},
# Incremental results
{"builds": ["3"],
"tests": {"001.html": {
- "results": [[1,"F"]],
- "times": [[1,0]]}}},
+ "results": [[1, TEXT]],
+ "times": [[1, 0]]}}},
# Expected results
{"builds": ["3", "2", "1"],
"tests": {"001.html": {
- "results": [[max_builds,"F"]],
- "times": [[max_builds,0]]}}},
+ "results": [[max_builds, TEXT]],
+ "times": [[max_builds, 0]]}}},
int(max_builds))
- # FIXME: Some data got corrupted and has results and times at the directory level.
- # Once we've purged this from all the data, we should throw an error on this case.
- def test_merge_directory_hierarchy_extra_results_and_times(self):
- self._test_merge(
- # Aggregated results
- {"builds": ["2", "1"],
- "tests": {"baz": {
- "003.html": {
- "results": [[25,"F"]],
- "times": [[25,0]]}},
- "results": [[25,"F"]],
- "times": [[25,0]]}},
- # Incremental results
- {"builds": ["3"],
- "tests": {"baz": {
- "003.html": {
- "results": [[1,"F"]],
- "times": [[1,0]]}}}},
- # Expected results
- {"builds": ["3", "2", "1"],
- "tests": {"baz": {
- "003.html": {
- "results": [[26,"F"]],
- "times": [[26,0]]}}},
- "version": 4})
-
def test_merge_build_directory_hierarchy(self):
self._test_merge(
# Aggregated results
{"builds": ["2", "1"],
"tests": {"bar": {"baz": {
"003.html": {
- "results": [[25,"F"]],
- "times": [[25,0]]}}},
+ "results": [[25, TEXT]],
+ "times": [[25, 0]]}}},
"foo": {
"001.html": {
- "results": [[50,"F"]],
- "times": [[50,0]]},
+ "results": [[50, TEXT]],
+ "times": [[50, 0]]},
"002.html": {
- "results": [[100,"I"]],
- "times": [[100,0]]}}},
+ "results": [[100, IMAGE]],
+ "times": [[100, 0]]}}},
"version": 4},
# Incremental results
{"builds": ["3"],
"tests": {"baz": {
"004.html": {
- "results": [[1,"I"]],
- "times": [[1,0]]}},
+ "results": [[1, IMAGE]],
+ "times": [[1, 0]]}},
"foo": {
"001.html": {
- "results": [[1,"F"]],
- "times": [[1,0]]},
+ "results": [[1, TEXT]],
+ "times": [[1, 0]]},
"002.html": {
- "results": [[1,"I"]],
- "times": [[1,0]]}}},
+ "results": [[1, IMAGE]],
+ "times": [[1, 0]]}}},
"version": 4},
# Expected results
{"builds": ["3", "2", "1"],
"tests": {"bar": {"baz": {
"003.html": {
- "results": [[1,"N"],[25,"F"]],
- "times": [[26,0]]}}},
+ "results": [[1, NO_DATA], [25, TEXT]],
+ "times": [[26, 0]]}}},
"baz": {
"004.html": {
- "results": [[1,"I"]],
- "times": [[1,0]]}},
+ "results": [[1, IMAGE]],
+ "times": [[1, 0]]}},
"foo": {
"001.html": {
- "results": [[51,"F"]],
- "times": [[51,0]]},
+ "results": [[51, TEXT]],
+ "times": [[51, 0]]},
"002.html": {
- "results": [[101,"I"]],
- "times": [[101,0]]}}},
+ "results": [[101, IMAGE]],
+ "times": [[101, 0]]}}},
"version": 4})
# FIXME(aboxhall): Add some tests for xhtml/svg test results.
@@ -648,49 +973,49 @@
{"builds": ["3", "2", "1"],
"tests": {"foo": {
"001.html": {
- "results": [[200,"P"]],
- "times": [[200,0]]},
- "results": [[1,"N"]],
- "times": [[1,0]]},
+ "results": [[200, PASS]],
+ "times": [[200, 0]]},
+ "results": [[1, NO_DATA]],
+ "times": [[1, 0]]},
"002.html": {
- "results": [[10,"F"]],
- "times": [[10,0]]}}},
+ "results": [[10, TEXT]],
+ "times": [[10, 0]]}}},
# Expected results
- {"foo": {"001.html":{}}, "002.html":{}})
+ {"foo": {"001.html": {}}, "002.html": {}})
def test_gtest(self):
self._test_merge(
# Aggregated results
{"builds": ["2", "1"],
"tests": {"foo.bar": {
- "results": [[50,"F"]],
- "times": [[50,0]]},
+ "results": [[50, TEXT]],
+ "times": [[50, 0]]},
"foo.bar2": {
- "results": [[100,"I"]],
- "times": [[100,0]]},
+ "results": [[100, IMAGE]],
+ "times": [[100, 0]]},
},
"version": 3},
# Incremental results
{"builds": ["3"],
"tests": {"foo.bar2": {
- "results": [[1,"I"]],
- "times": [[1,0]]},
+ "results": [[1, IMAGE]],
+ "times": [[1, 0]]},
"foo.bar3": {
- "results": [[1,"F"]],
- "times": [[1,0]]},
+ "results": [[1, TEXT]],
+ "times": [[1, 0]]},
},
"version": 4},
# Expected results
{"builds": ["3", "2", "1"],
"tests": {"foo.bar": {
- "results": [[1, "N"], [50,"F"]],
- "times": [[51,0]]},
+ "results": [[1, NO_DATA], [50, TEXT]],
+ "times": [[51, 0]]},
"foo.bar2": {
- "results": [[101,"I"]],
- "times": [[101,0]]},
+ "results": [[101, IMAGE]],
+ "times": [[101, 0]]},
"foo.bar3": {
- "results": [[1,"F"]],
- "times": [[1,0]]},
+ "results": [[1, TEXT]],
+ "times": [[1, 0]]},
},
"version": 4})
diff --git a/Tools/TestResultServer/static-dashboards/aggregate_results.html b/Tools/TestResultServer/static-dashboards/aggregate_results.html
index 2790ddc..a949633 100644
--- a/Tools/TestResultServer/static-dashboards/aggregate_results.html
+++ b/Tools/TestResultServer/static-dashboards/aggregate_results.html
@@ -49,6 +49,22 @@
margin-right: 5px;
padding: 2px;
}
+table {
+ margin-bottom: 1em;
+}
+td {
+ white-space: nowrap;
+}
+td:first-child {
+ font-weight: bold;
+ font-size: 90%;
+}
+tr:nth-child(odd) {
+ background-color: #eeeeee;
+}
+tr:nth-child(even) {
+ background-color: #e0eaf1;
+}
</style>
<script src="builders.js"></script>
<script src="loader.js"></script>
diff --git a/Tools/TestResultServer/static-dashboards/aggregate_results.js b/Tools/TestResultServer/static-dashboards/aggregate_results.js
index 4400302..dd2535c 100644
--- a/Tools/TestResultServer/static-dashboards/aggregate_results.js
+++ b/Tools/TestResultServer/static-dashboards/aggregate_results.js
@@ -36,7 +36,11 @@
function generatePage(historyInstance)
{
- var html = ui.html.testTypeSwitcher(true) + '<br>';
+ var html = ui.html.testTypeSwitcher(true);
+ html += '<div>' +
+ ui.html.checkbox('rawValues', 'Show raw values', g_history.dashboardSpecificState.rawValues) +
+ ui.html.checkbox('showOutliers', 'Show outliers', g_history.dashboardSpecificState.showOutliers) +
+ '</div>';
for (var builder in currentBuilders())
html += htmlForBuilder(builder);
document.body.innerHTML = html;
@@ -46,6 +50,7 @@
{
switch(key) {
case 'rawValues':
+ case 'showOutliers':
historyInstance.dashboardSpecificState[key] = value == 'true';
return true;
@@ -55,10 +60,10 @@
}
var defaultDashboardSpecificStateValues = {
- rawValues: false
+ rawValues: false,
+ showOutliers: true
};
-
var aggregateResultsConfig = {
defaultStateValues: defaultDashboardSpecificStateValues,
generatePage: generatePage,
@@ -69,53 +74,44 @@
var g_history = new history.History(aggregateResultsConfig);
g_history.parseCrossDashboardParameters();
+g_totalFailureCounts = {};
+
+function totalFailureCountFor(builder)
+{
+ if (!g_totalFailureCounts[builder])
+ g_totalFailureCounts[builder] = getTotalTestCounts(g_resultsByBuilder[builder][FAILURES_BY_TYPE_KEY]);
+ return g_totalFailureCounts[builder];
+}
+
function htmlForBuilder(builder)
{
- var results = g_resultsByBuilder[builder];
- // Some keys were added later than others, so they don't have as many
- // builds. Use the shortest.
- // FIXME: Once 500 runs have finished, we can get rid of passing this
- // around and just assume all keys have the same number of builders for a
- // given builder.
- var numColumns = results[ALL_FIXABLE_COUNT_KEY].length;
var html = '<div class=container><h2>' + builder + '</h2>';
- if (g_history.dashboardSpecificState.rawValues)
- html += rawValuesHTML(results, numColumns);
- else {
+ if (g_history.dashboardSpecificState.rawValues) {
+ html += htmlForTestType(builder);
+ } else {
html += '<a href="timeline_explorer.html' + (location.hash ? location.hash + '&' : '#') + 'builder=' + builder + '">' +
- chartHTML(results, numColumns) + '</a>';
+ chartHTML(builder) + '</a>';
}
- html += '</div>';
- return html;
+ return html + '</div>';
}
-function rawValuesHTML(results, numColumns)
+function chartHTML(builder)
{
- var html = htmlForSummaryTable(results, numColumns) +
- htmlForTestType(results, FIXABLE_COUNTS_KEY, FIXABLE_DESCRIPTION, numColumns);
- if (g_history.isLayoutTestResults()) {
- html += htmlForTestType(results, DEFERRED_COUNTS_KEY, DEFERRED_DESCRIPTION, numColumns) +
- htmlForTestType(results, WONTFIX_COUNTS_KEY, WONTFIX_DESCRIPTION, numColumns);
- }
- return html;
-}
-
-function chartHTML(results, numColumns)
-{
+ var results = g_resultsByBuilder[builder];
+ var totalFailingTests = totalFailureCountFor(builder).totalFailingTests;
var shouldShowBlinkRevisions = isTipOfTreeWebKitBuilder();
var revisionKey = shouldShowBlinkRevisions ? BLINK_REVISIONS_KEY : CHROME_REVISIONS_KEY;
- var startRevision = results[revisionKey][numColumns - 1];
+ var startRevision = results[revisionKey][totalFailingTests.length - 1];
var endRevision = results[revisionKey][0];
- var revisionLabel = shouldShowBlinkRevisions ? "WebKit Revision" : "Chromium Revision";
+ var revisionLabel = shouldShowBlinkRevisions ? "Blink Revision" : "Chromium Revision";
- var fixable = results[FIXABLE_COUNT_KEY].slice(0, numColumns);
- var html = chart("Total failing", {"": fixable}, revisionLabel, startRevision, endRevision);
+ var html = chart("Total failing", {"": totalFailingTests}, revisionLabel, startRevision, endRevision);
- var values = valuesPerExpectation(results[FIXABLE_COUNTS_KEY], numColumns);
+ var values = results[FAILURES_BY_TYPE_KEY];
// Don't care about number of passes for the charts.
- delete(values['P']);
+ delete(values[PASS]);
return html + chart("Detailed breakdown", values, revisionLabel, startRevision, endRevision);
}
@@ -124,23 +120,25 @@
// FIXME: Find a better way to exclude outliers. This is just so we exclude
// runs where every test failed.
-var MAX_VALUE = 10000;
+var MAX_VALUE = 2000;
function filteredValues(values, desiredNumberOfPoints)
{
// Filter out values to make the graph a bit more readable and to keep URLs
// from exceeding the browsers max length restriction.
var filterAmount = Math.floor(values.length / desiredNumberOfPoints);
- if (filterAmount < 1)
- return values;
-
return values.filter(function(element, index, array) {
- // Include the most recent and oldest values and exclude outliers.
- return (index % filterAmount == 0 || index == array.length - 1) && (array[index] < MAX_VALUE && array[index] != 0);
+ if (!g_history.dashboardSpecificState.showOutliers && element > MAX_VALUE)
+ return false;
+ if (filterAmount <= 1)
+ return true;
+ // Include the most recent and oldest values.
+ return index % filterAmount == 0 || index == array.length - 1;
});
}
-function chartUrl(title, values, revisionLabel, startRevision, endRevision, desiredNumberOfPoints) {
+function chartUrl(title, values, revisionLabel, startRevision, endRevision, desiredNumberOfPoints)
+{
var maxValue = 0;
for (var expectation in values)
maxValue = Math.max(maxValue, Math.max.apply(null, filteredValues(values[expectation], desiredNumberOfPoints)));
@@ -149,22 +147,19 @@
var labels = '';
var numLabels = 0;
- var first = true;
for (var expectation in values) {
- chartData += (first ? 'e:' : ',') + extendedEncode(filteredValues(values[expectation], desiredNumberOfPoints).reverse(), maxValue);
+ chartData += (chartData ? ',' : 'e:') + extendedEncode(filteredValues(values[expectation], desiredNumberOfPoints).reverse(), maxValue);
if (expectation) {
numLabels++;
- labels += (first ? '' : '|') + expectationsMap()[expectation];
+ labels += (labels ? '|' : '') + expectation;
}
- first = false;
}
var url = "http://chart.apis.google.com/chart?cht=lc&chs=600x400&chd=" +
chartData + "&chg=15,15,1,3&chxt=x,x,y&chxl=1:||" + revisionLabel +
"|&chxr=0," + startRevision + "," + endRevision + "|2,0," + maxValue + "&chtt=" + title;
-
if (labels)
url += "&chdl=" + labels + "&chco=" + LABEL_COLORS.slice(0, numLabels).join(',');
return url;
@@ -187,56 +182,35 @@
function htmlForRevisionRows(results, numColumns)
{
- return htmlForTableRow('WebKit Revision', results[BLINK_REVISIONS_KEY].slice(0, numColumns)) +
+ return htmlForTableRow('Blink Revision', results[BLINK_REVISIONS_KEY].slice(0, numColumns)) +
htmlForTableRow('Chrome Revision', results[CHROME_REVISIONS_KEY].slice(0, numColumns));
}
-function wrapHTMLInTable(description, html)
+function htmlForTestType(builder)
{
- return '<h3>' + description + '</h3><table><tbody>' + html + '</tbody></table>';
-}
+ var counts = totalFailureCountFor(builder);
+ var totalFailing = counts.totalFailingTests;
+ var totalTests = counts.totalTests;
-function htmlForSummaryTable(results, numColumns)
-{
var percent = [];
- var fixable = results[FIXABLE_COUNT_KEY].slice(0, numColumns);
- var allFixable = results[ALL_FIXABLE_COUNT_KEY].slice(0, numColumns);
- for (var i = 0; i < numColumns; i++) {
- var percentage = 100 * (allFixable[i] - fixable[i]) / allFixable[i];
+ for (var i = 0; i < totalTests.length; i++) {
+ var percentage = 100 * (totalTests[i] - totalFailing[i]) / totalTests[i];
// Round to the nearest tenth of a percent.
percent.push(Math.round(percentage * 10) / 10 + '%');
}
- var html = htmlForRevisionRows(results, numColumns) +
+
+ var results = g_resultsByBuilder[builder];
+ html = '<table><tbody>' +
+ htmlForRevisionRows(results, totalTests.length) +
htmlForTableRow('Percent passed', percent) +
- htmlForTableRow('Failures (deduped)', fixable) +
- htmlForTableRow('Fixable Tests', allFixable);
- return wrapHTMLInTable('Summary', html);
-}
+ htmlForTableRow('Failures', totalFailing) +
+ htmlForTableRow('Total Tests', totalTests);
-function valuesPerExpectation(counts, numColumns)
-{
- var values = {};
- for (var i = 0; i < numColumns; i++) {
- for (var expectation in expectationsMap()) {
- if (expectation in counts[i]) {
- var count = counts[i][expectation];
- if (!values[expectation])
- values[expectation] = [];
- values[expectation].push(count);
- }
- }
- }
- return values;
-}
-
-function htmlForTestType(results, key, description, numColumns)
-{
- var counts = results[key];
- var html = htmlForRevisionRows(results, numColumns);
- var values = valuesPerExpectation(counts, numColumns);
+ var values = results[FAILURES_BY_TYPE_KEY];
for (var expectation in values)
- html += htmlForTableRow(expectationsMap()[expectation], values[expectation]);
- return wrapHTMLInTable(description, html);
+ html += htmlForTableRow(expectation, values[expectation]);
+
+ return html + '</tbody></table>';
}
function htmlForTableRow(columnName, values)
diff --git a/Tools/TestResultServer/static-dashboards/aggregate_results_unittest.js b/Tools/TestResultServer/static-dashboards/aggregate_results_unittest.js
new file mode 100644
index 0000000..39d8bfe
--- /dev/null
+++ b/Tools/TestResultServer/static-dashboards/aggregate_results_unittest.js
@@ -0,0 +1,108 @@
+// Copyright (C) 2013 Google Inc. All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+module('aggregate_results');
+
+function setupAggregateResultsData()
+{
+ var historyInstance = new history.History(flakinessConfig);
+ // FIXME(jparent): Remove this once global isn't used.
+ g_history = historyInstance;
+ for (var key in history.DEFAULT_CROSS_DASHBOARD_STATE_VALUES)
+ historyInstance.crossDashboardState[key] = history.DEFAULT_CROSS_DASHBOARD_STATE_VALUES[key];
+
+ var builderName = 'Blink Linux';
+ LOAD_BUILDBOT_DATA([{
+ name: 'ChromiumWebkit',
+ url: 'dummyurl',
+ tests: {'layout-tests': {'builders': [builderName]}}
+ }]);
+ for (var group in LAYOUT_TESTS_BUILDER_GROUPS)
+ LAYOUT_TESTS_BUILDER_GROUPS[group] = null;
+
+ loadBuildersList('@ToT - chromium.org', 'layout-tests');
+
+ g_resultsByBuilder[builderName] = {
+ "num_failures_by_type": {
+ "CRASH": [ 13, 10 ],
+ "MISSING": [ 6, 8 ],
+ "IMAGE+TEXT": [ 17, 17 ],
+ "IMAGE": [ 81, 68 ],
+ "SKIP": [ 1167, 748 ],
+ "TEXT": [ 89, 60 ],
+ "TIMEOUT": [ 72, 48 ],
+ "PASS": [ 28104, 28586 ],
+ "AUDIO": [ 0, 0 ]
+ },
+ blinkRevision: [1234, 1233],
+ chromeRevision: [4567, 4566]
+ }
+ g_totalFailureCounts = {};
+}
+
+test('htmlForBuilder', 1, function() {
+ setupAggregateResultsData();
+ g_history.dashboardSpecificState.rawValues = false;
+
+ var expectedHtml = '<div class=container>' +
+ '<h2>Blink Linux</h2>' +
+ '<a href="timeline_explorer.html#useTestData=true&builder=Blink Linux">' +
+ '<img src="http://chart.apis.google.com/chart?cht=lc&chs=600x400&chd=e:qe..&chg=15,15,1,3&chxt=x,x,y&chxl=1:||Blink Revision|&chxr=0,1233,1234|2,0,1445&chtt=Total failing">' +
+ '<img src="http://chart.apis.google.com/chart?cht=lc&chs=600x400&chd=e:AjAt,AcAV,A7A7,DuEc,pB..,DSE4,CoD8,AAAA&chg=15,15,1,3&chxt=x,x,y&chxl=1:||Blink Revision|&chxr=0,1233,1234|2,0,1167&chtt=Detailed breakdown&chdl=CRASH|MISSING|IMAGE+TEXT|IMAGE|SKIP|TEXT|TIMEOUT|AUDIO&chco=FF0000,00FF00,0000FF,000000,FF6EB4,FFA812,9B30FF,00FFCC">' +
+ '</a>' +
+ '</div>';
+ equal(expectedHtml, htmlForBuilder('Blink Linux'));
+});
+
+test('htmlForBuilderRawResults', 1, function() {
+ setupAggregateResultsData();
+ g_history.dashboardSpecificState.rawValues = true;
+
+ var expectedHtml = '<div class=container>' +
+ '<h2>Blink Linux</h2>' +
+ '<table>' +
+ '<tbody>' +
+ '<tr><td>Blink Revision</td><td>1234</td><td>1233</td></tr>' +
+ '<tr><td>Chrome Revision</td><td>4567</td><td>4566</td></tr>' +
+ '<tr><td>Percent passed</td><td>95.1%</td><td>96.8%</td></tr>' +
+ '<tr><td>Failures</td><td>1445</td><td>959</td></tr>' +
+ '<tr><td>Total Tests</td><td>29549</td><td>29545</td></tr>' +
+ '<tr><td>CRASH</td><td>13</td><td>10</td></tr>' +
+ '<tr><td>MISSING</td><td>6</td><td>8</td></tr>' +
+ '<tr><td>IMAGE+TEXT</td><td>17</td><td>17</td></tr>' +
+ '<tr><td>IMAGE</td><td>81</td><td>68</td></tr>' +
+ '<tr><td>SKIP</td><td>1167</td><td>748</td></tr>' +
+ '<tr><td>TEXT</td><td>89</td><td>60</td></tr>' +
+ '<tr><td>TIMEOUT</td><td>72</td><td>48</td></tr>' +
+ '<tr><td>PASS</td><td>28104</td><td>28586</td></tr>' +
+ '<tr><td>AUDIO</td><td>0</td><td>0</td></tr>' +
+ '</tbody>' +
+ '</table>' +
+ '</div>';
+ equal(expectedHtml, htmlForBuilder('Blink Linux'));
+});
diff --git a/Tools/TestResultServer/static-dashboards/builders_unittests.js b/Tools/TestResultServer/static-dashboards/builders_unittests.js
index 29c02f1..0b2518c 100644
--- a/Tools/TestResultServer/static-dashboards/builders_unittests.js
+++ b/Tools/TestResultServer/static-dashboards/builders_unittests.js
@@ -26,6 +26,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+module('builders');
+
test('loading steps', 4, function() {
var tests = {}
var baseUrl = 'http://dummyurl';
diff --git a/Tools/TestResultServer/static-dashboards/dashboard_base.js b/Tools/TestResultServer/static-dashboards/dashboard_base.js
index 6f394a9..cc2fa1b 100644
--- a/Tools/TestResultServer/static-dashboards/dashboard_base.js
+++ b/Tools/TestResultServer/static-dashboards/dashboard_base.js
@@ -26,50 +26,21 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// @fileoverview Base JS file for pages that want to parse the results JSON
-// from the testing bots. This deals with generic utility functions, visible
-// history, popups and appending the script elements for the JSON files.
-
-
-//////////////////////////////////////////////////////////////////////////////
-// CONSTANTS
-//////////////////////////////////////////////////////////////////////////////
-var GTEST_EXPECTATIONS_MAP_ = {
- 'P': 'PASS',
- 'F': 'FAIL',
- 'N': 'NO DATA',
- 'X': 'SKIPPED'
-};
-
-var LAYOUT_TEST_EXPECTATIONS_MAP_ = {
- 'P': 'PASS',
- 'N': 'NO DATA',
- 'X': 'SKIP',
- 'T': 'TIMEOUT',
- 'F': 'TEXT',
- 'C': 'CRASH',
- 'I': 'IMAGE',
- 'Z': 'IMAGE+TEXT',
- // We used to glob a bunch of expectations into "O" as OTHER. Expectations
- // are more precise now though and it just means MISSING.
- 'O': 'MISSING'
-};
-
-
// Keys in the JSON files.
-var WONTFIX_COUNTS_KEY = 'wontfixCounts';
-var FIXABLE_COUNTS_KEY = 'fixableCounts';
-var DEFERRED_COUNTS_KEY = 'deferredCounts';
-var WONTFIX_DESCRIPTION = 'Tests never to be fixed (WONTFIX)';
-var FIXABLE_DESCRIPTION = 'All tests for this release';
-var DEFERRED_DESCRIPTION = 'All deferred tests (DEFER)';
-var FIXABLE_COUNT_KEY = 'fixableCount';
-var ALL_FIXABLE_COUNT_KEY = 'allFixableCount';
+var FAILURES_BY_TYPE_KEY = 'num_failures_by_type';
+var FAILURE_MAP_KEY = 'failure_map';
var CHROME_REVISIONS_KEY = 'chromeRevision';
var BLINK_REVISIONS_KEY = 'blinkRevision';
var TIMESTAMPS_KEY = 'secondsSinceEpoch';
var BUILD_NUMBERS_KEY = 'buildNumbers';
var TESTS_KEY = 'tests';
+
+// Failure types.
+var PASS = 'PASS';
+var NO_DATA = 'NO DATA';
+var SKIP = 'SKIP';
+var NOTRUN = 'NOTRUN';
+
var ONE_DAY_SECONDS = 60 * 60 * 24;
var ONE_WEEK_SECONDS = ONE_DAY_SECONDS * 7;
@@ -122,9 +93,11 @@
VALUE: 1
}
-function isFailingResult(value)
+var _NON_FAILURE_TYPES = [PASS, NO_DATA, SKIP, NOTRUN];
+
+function isFailingResult(failureMap, failureType)
{
- return 'FSTOCIZ'.indexOf(value) != -1;
+ return _NON_FAILURE_TYPES.indexOf(failureMap[failureType]) == -1;
}
// Generic utility functions.
@@ -176,12 +149,6 @@
}
var g_resultsByBuilder = {};
-var g_expectationsByPlatform = {};
-
-function isFlakinessDashboard()
-{
- return string.endsWith(window.location.pathname, 'flakiness_dashboard.html');
-}
// Create a new function with some of its arguements
// pre-filled.
@@ -202,8 +169,23 @@
};
};
-// Returns the appropriate expectations map for the current testType.
-function expectationsMap()
+function getTotalTestCounts(failuresByType)
{
- return g_history.isLayoutTestResults() ? LAYOUT_TEST_EXPECTATIONS_MAP_ : GTEST_EXPECTATIONS_MAP_;
-}
\ No newline at end of file
+ var countData;
+ for (var failureType in failuresByType) {
+ var failures = failuresByType[failureType];
+ if (countData) {
+ failures.forEach(function(count, index) {
+ countData.totalTests[index] += count;
+ if (failureType != PASS)
+ countData.totalFailingTests[index] += count;
+ });
+ } else {
+ countData = {
+ totalTests: failures.slice(),
+ totalFailingTests: failures.slice(),
+ };
+ }
+ }
+ return countData;
+}
diff --git a/Tools/TestResultServer/static-dashboards/flakiness_dashboard.js b/Tools/TestResultServer/static-dashboards/flakiness_dashboard.js
index 750cd3a..1cc98e7 100644
--- a/Tools/TestResultServer/static-dashboards/flakiness_dashboard.js
+++ b/Tools/TestResultServer/static-dashboards/flakiness_dashboard.js
@@ -29,109 +29,23 @@
//////////////////////////////////////////////////////////////////////////////
// CONSTANTS
//////////////////////////////////////////////////////////////////////////////
-var ALL = 'ALL';
var FORWARD = 'forward';
var BACKWARD = 'backward';
var GTEST_MODIFIERS = ['FLAKY', 'FAILS', 'MAYBE', 'DISABLED'];
-var TEST_URL_BASE_PATH_IN_VERSION_CONTROL = 'http://src.chromium.org/viewvc/blink/trunk/LayoutTests/';
-var TEST_URL_BASE_PATH = "http://svn.webkit.org/repository/webkit/trunk/LayoutTests/";
-var EXPECTATIONS_URL_BASE_PATH = TEST_URL_BASE_PATH + "platform/";
+var TEST_URL_BASE_PATH_FOR_BROWSING = 'http://src.chromium.org/viewvc/blink/trunk/LayoutTests/';
+var TEST_URL_BASE_PATH_FOR_XHR = 'http://src.chromium.org/blink/trunk/LayoutTests/';
var TEST_RESULTS_BASE_PATH = 'http://build.chromium.org/f/chromium/layout_test_results/';
var GPU_RESULTS_BASE_PATH = 'http://chromium-browser-gpu-tests.commondatastorage.googleapis.com/runs/'
-var PLATFORMS = {
- 'CHROMIUM': {
- expectationsDirectory: null, /* FIXME: cleanup post blink split 'chromium', */
- subPlatforms: {
- 'LION': { fallbackPlatforms: ['CHROMIUM'] },
- 'SNOWLEOPARD': { fallbackPlatforms: ['CHROMIUM'] },
- 'XP': { fallbackPlatforms: ['CHROMIUM'] },
- 'VISTA': { fallbackPlatforms: ['CHROMIUM'] },
- 'WIN7': { fallbackPlatforms: ['CHROMIUM'] },
- 'LUCID': { fallbackPlatforms: ['CHROMIUM'] },
- 'ANDROID': { fallbackPlatforms: ['CHROMIUM'], expectationsDirectory: null /* 'chromium-android' */ }
- },
- platformModifierUnions: {
- 'MAC': ['CHROMIUM_LION', 'CHROMIUM_SNOWLEOPARD'],
- 'WIN': ['CHROMIUM_XP', 'CHROMIUM_VISTA', 'CHROMIUM_WIN7'],
- 'LINUX': ['CHROMIUM_LUCID']
- }
- },
- 'APPLE': {
- subPlatforms: {
- 'MAC': {
- expectationsDirectory: 'mac',
- subPlatforms: {
- 'LION': {
- expectationsDirectory: 'mac-lion',
- subPlatforms: {
- 'WK1': { fallbackPlatforms: ['APPLE_MAC_LION', 'APPLE_MAC'] },
- 'WK2': { fallbackPlatforms: ['APPLE_MAC_LION', 'APPLE_MAC', 'WK2'] }
- }
- },
- 'SNOWLEOPARD': {
- expectationsDirectory: null,
- subPlatforms: {
- 'WK1': { fallbackPlatforms: ['APPLE_MAC_SNOWLEOPARD', 'APPLE_MAC'] },
- 'WK2': { fallbackPlatforms: ['APPLE_MAC_SNOWLEOPARD', 'APPLE_MAC', 'WK2'] }
- }
- }
- }
- },
- 'WIN': {
- expectationsDirectory: 'win',
- subPlatforms: {
- 'XP': { fallbackPlatforms: ['APPLE_WIN'] },
- 'WIN7': { fallbackPlatforms: ['APPLE_WIN'] }
- }
- }
- }
- },
- 'GTK': {
- expectationsDirectory: 'gtk',
- subPlatforms: {
- 'LINUX': {
- subPlatforms: {
- 'WK1': { fallbackPlatforms: ['GTK'] },
- 'WK2': { fallbackPlatforms: ['GTK', 'WK2'], expectationsDirectory: 'gtk-wk2' }
- }
- }
- }
- },
- 'QT': {
- expectationsDirectory: 'qt',
- subPlatforms: {
- 'LINUX': { fallbackPlatforms: ['QT'] }
- }
- },
- 'EFL': {
- expectationsDirectory: 'efl',
- subPlatforms: {
- 'LINUX': {
- subPlatforms: {
- 'WK1': { fallbackPlatforms: ['EFL'], expectationsDirectory: 'efl-wk1' },
- 'WK2': { fallbackPlatforms: ['EFL', 'WK2'], expectationsDirectory: 'efl-wk2' }
- }
- }
- }
- },
- 'WK2': {
- basePlatform: true,
- expectationsDirectory: 'wk2'
- }
-};
-
-var BUILD_TYPES = {'DEBUG': 'DBG', 'RELEASE': 'RELEASE'};
-var MIN_SECONDS_FOR_SLOW_TEST = 4;
-var MIN_SECONDS_FOR_SLOW_TEST_DEBUG = 2 * MIN_SECONDS_FOR_SLOW_TEST;
-var FAIL_RESULTS = ['IMAGE', 'IMAGE+TEXT', 'TEXT', 'MISSING'];
+var RELEASE_TIMEOUT = 6;
+var DEBUG_TIMEOUT = 12;
+var SLOW_MULTIPLIER = 5;
var CHUNK_SIZE = 25;
-var MAX_RESULTS = 1500;
// FIXME: Figure out how to make this not be hard-coded.
var VIRTUAL_SUITES = {
- 'platform/chromium/virtual/gpu/fast/canvas': 'fast/canvas',
- 'platform/chromium/virtual/gpu/canvas/philip': 'canvas/philip'
+ 'virtual/gpu/fast/canvas': 'fast/canvas',
+ 'virtual/gpu/canvas/philip': 'canvas/philip'
};
var resourceLoader;
@@ -148,8 +62,6 @@
// result expands to all tests that ever have the given result
if (historyInstance.dashboardSpecificState.tests || historyInstance.dashboardSpecificState.result)
generatePageForIndividualTests(individualTests());
- else if (historyInstance.dashboardSpecificState.expectationsUpdate)
- generatePageForExpectationsUpdate();
else
generatePageForBuilder(historyInstance.dashboardSpecificState.builder || currentBuilderGroup().defaultBuilder());
@@ -162,6 +74,7 @@
function handleValidHashParameter(historyInstance, key, value)
{
switch(key) {
+ case 'result':
case 'tests':
history.validateParameter(historyInstance.dashboardSpecificState, key, value,
function() {
@@ -169,18 +82,6 @@
});
return true;
- case 'result':
- value = value.toUpperCase();
- history.validateParameter(historyInstance.dashboardSpecificState, key, value,
- function() {
- for (var result in LAYOUT_TEST_EXPECTATIONS_MAP_) {
- if (value == LAYOUT_TEST_EXPECTATIONS_MAP_[result])
- return true;
- }
- return false;
- });
- return true;
-
case 'builder':
history.validateParameter(historyInstance.dashboardSpecificState, key, value,
function() {
@@ -211,7 +112,6 @@
return true;
case 'resultsHeight':
- case 'updateIndex':
case 'revision':
history.validateParameter(historyInstance.dashboardSpecificState, key, Number(value),
function() {
@@ -220,17 +120,14 @@
return true;
case 'showChrome':
- case 'showCorrectExpectations':
- case 'showWrongExpectations':
case 'showExpectations':
case 'showFlaky':
case 'showLargeExpectations':
- case 'legacyExpectationsSemantics':
- case 'showSkipped':
+ case 'showNonFlaky':
case 'showSlow':
+ case 'showSkip':
case 'showUnexpectedPasses':
- case 'showWontFixSkip':
- case 'expectationsUpdate':
+ case 'showWontFix':
historyInstance.dashboardSpecificState[key] = value == 'true';
return true;
@@ -263,18 +160,17 @@
sortOrder: BACKWARD,
sortColumn: 'flakiness',
showExpectations: false,
- showFlaky: true,
+ // FIXME: Show flaky tests by default if you have a builder picked.
+ // Ideally, we'd fix the dashboard to not pick a default builder and have
+ // you pick one. In the interim, this is a good way to make the default
+ // page load faster since we don't need to generate/layout a large table.
+ showFlaky: false,
showLargeExpectations: false,
- legacyExpectationsSemantics: true,
showChrome: true,
- showCorrectExpectations: false,
- showWrongExpectations: false,
- showWontFixSkip: false,
- showSlow: false,
- showSkipped: false,
+ showWontFix: false,
+ showNonFlaky: false,
+ showSkip: false,
showUnexpectedPasses: false,
- expectationsUpdate: false,
- updateIndex: 0,
resultsHeight: 300,
revision: null,
tests: '',
@@ -288,7 +184,6 @@
'group': 'builder'
};
-
var flakinessConfig = {
defaultStateValues: defaultDashboardSpecificStateValues,
generatePage: generatePage,
@@ -305,35 +200,9 @@
// GLOBALS
//////////////////////////////////////////////////////////////////////////////
-var g_perBuilderPlatformAndBuildType = {};
var g_perBuilderFailures = {};
-// Map of builder to arrays of tests that are listed in the expectations file
-// but have for that builder.
-var g_perBuilderWithExpectationsButNoFailures = {};
-// Map of builder to arrays of paths that are skipped. This shows the raw
-// path used in TestExpectations rather than the test path since we
-// don't actually have any data here for skipped tests.
-var g_perBuilderSkippedPaths = {};
// Maps test path to an array of {builder, testResults} objects.
var g_testToResultsMap = {};
-// Tests that the user wants to update expectations for.
-var g_confirmedTests = {};
-
-function traversePlatformsTree(callback)
-{
- function traverse(platformObject, parentPlatform) {
- Object.keys(platformObject).forEach(function(platformName) {
- var platform = platformObject[platformName];
- platformName = parentPlatform ? parentPlatform + platformName : platformName;
-
- if (platform.subPlatforms)
- traverse(platform.subPlatforms, platformName + '_');
- else if (!platform.basePlatform)
- callback(platform, platformName);
- });
- }
- traverse(PLATFORMS, null);
-}
function createResultsObjectForTest(test, builder)
{
@@ -344,16 +213,8 @@
html: '',
flips: 0,
slowestTime: 0,
- slowestNonTimeoutCrashTime: 0,
- meetsExpectations: true,
- isWontFixSkip: false,
isFlaky: false,
- // Sorted string of missing expectations
- missing: '',
- // String of extra expectations (i.e. expectations that never occur).
- extra: '',
- modifiers: '',
- bugs: '',
+ bugs: [],
expectations : '',
rawResults: '',
// List of all the results the test actually has.
@@ -361,79 +222,6 @@
};
}
-function matchingElement(stringToMatch, elementsMap)
-{
- for (var element in elementsMap) {
- if (string.contains(stringToMatch, elementsMap[element]))
- return element;
- }
-}
-
-function chromiumPlatform(builderNameUpperCase)
-{
- if (string.contains(builderNameUpperCase, 'MAC')) {
- if (string.contains(builderNameUpperCase, '10.7'))
- return 'CHROMIUM_LION';
- // The webkit.org 'Chromium Mac Release (Tests)' bot runs SnowLeopard.
- return 'CHROMIUM_SNOWLEOPARD';
- }
- if (string.contains(builderNameUpperCase, 'WIN7'))
- return 'CHROMIUM_WIN7';
- if (string.contains(builderNameUpperCase, 'VISTA'))
- return 'CHROMIUM_VISTA';
- if (string.contains(builderNameUpperCase, 'WIN') || string.contains(builderNameUpperCase, 'XP'))
- return 'CHROMIUM_XP';
- if (string.contains(builderNameUpperCase, 'LINUX'))
- return 'CHROMIUM_LUCID';
- if (string.contains(builderNameUpperCase, 'ANDROID'))
- return 'CHROMIUM_ANDROID';
- // The interactive bot is XP, but doesn't have an OS in it's name.
- if (string.contains(builderNameUpperCase, 'INTERACTIVE'))
- return 'CHROMIUM_XP';
-}
-
-
-function platformAndBuildType(builderName)
-{
- if (!g_perBuilderPlatformAndBuildType[builderName]) {
- var builderNameUpperCase = builderName.toUpperCase();
-
- var platform = chromiumPlatform(builderNameUpperCase);
-
- if (!platform)
- console.error('Could not resolve platform for builder: ' + builderName);
-
- var buildType = string.contains(builderNameUpperCase, 'DBG') || string.contains(builderNameUpperCase, 'DEBUG') ? 'DEBUG' : 'RELEASE';
- g_perBuilderPlatformAndBuildType[builderName] = {platform: platform, buildType: buildType};
- }
- return g_perBuilderPlatformAndBuildType[builderName];
-}
-
-function isDebug(builderName)
-{
- return platformAndBuildType(builderName).buildType == 'DEBUG';
-}
-
-// Returns the expectation string for the given single character result.
-// This string should match the expectations that are put into
-// test_expectations.py.
-//
-// For example, if we start explicitly listing IMAGE result failures,
-// this function should start returning 'IMAGE'.
-function expectationsFileStringForResult(result)
-{
- // For the purposes of comparing against the expecations of a test,
- // consider simplified diff failures as just text failures since
- // the test_expectations file doesn't treat them specially.
- if (result == 'S')
- return 'TEXT';
-
- if (result == 'N')
- return '';
-
- return expectationsMap()[result];
-}
-
var TestTrie = function(builders, resultsByBuilder)
{
this._trie = {};
@@ -562,22 +350,6 @@
return testsArray;
}
-// Returns whether this test's slowest time is above the cutoff for
-// being a slow test.
-function isSlowTest(resultsForTest)
-{
- var maxTime = isDebug(resultsForTest.builder) ? MIN_SECONDS_FOR_SLOW_TEST_DEBUG : MIN_SECONDS_FOR_SLOW_TEST;
- return resultsForTest.slowestNonTimeoutCrashTime > maxTime;
-}
-
-// Returns whether this test's slowest time is *well* below the cutoff for
-// being a slow test.
-function isFastTest(resultsForTest)
-{
- var maxTime = isDebug(resultsForTest.builder) ? MIN_SECONDS_FOR_SLOW_TEST_DEBUG : MIN_SECONDS_FOR_SLOW_TEST;
- return resultsForTest.slowestNonTimeoutCrashTime < maxTime / 2;
-}
-
function allTestsWithResult(result)
{
processTestRunsForAllBuilders();
@@ -585,7 +357,7 @@
getAllTestsTrie().forEach(function(triePath) {
for (var i = 0; i < g_testToResultsMap[triePath].length; i++) {
- if (g_testToResultsMap[triePath][i].actualResults.indexOf(result) != -1) {
+ if (g_testToResultsMap[triePath][i].actualResults.indexOf(result.toUpperCase()) != -1) {
retVal.push(triePath);
break;
}
@@ -595,372 +367,6 @@
return retVal;
}
-
-// Adds all the tests for the given builder to the testMapToPopulate.
-function addTestsForBuilder(builder, testMapToPopulate)
-{
- var tests = g_resultsByBuilder[builder].tests;
- for (var test in tests) {
- testMapToPopulate[test] = true;
- }
-}
-
-// Map of all tests to true values by platform and build type.
-// e.g. g_allTestsByPlatformAndBuildType['XP']['DEBUG'] will have the union
-// of all tests run on the xp-debug builders.
-var g_allTestsByPlatformAndBuildType = {};
-traversePlatformsTree(function(platform, platformName) {
- g_allTestsByPlatformAndBuildType[platformName] = {};
-});
-
-// Map of all tests to true values by platform and build type.
-// e.g. g_allTestsByPlatformAndBuildType['WIN']['DEBUG'] will have the union
-// of all tests run on the win-debug builders.
-function allTestsWithSamePlatformAndBuildType(platform, buildType)
-{
- if (!g_allTestsByPlatformAndBuildType[platform][buildType]) {
- var tests = {};
- for (var thisBuilder in currentBuilders()) {
- var thisBuilderBuildInfo = platformAndBuildType(thisBuilder);
- if (thisBuilderBuildInfo.buildType == buildType && thisBuilderBuildInfo.platform == platform) {
- addTestsForBuilder(thisBuilder, tests);
- }
- }
- g_allTestsByPlatformAndBuildType[platform][buildType] = tests;
- }
-
- return g_allTestsByPlatformAndBuildType[platform][buildType];
-}
-
-function getExpectations(test, platform, buildType)
-{
- var testObject = g_allExpectations[test];
- if (!testObject)
- return null;
-
- var platformObject = testObject[platform];
- if (!platformObject)
- return null;
-
- return platformObject[buildType];
-}
-
-function filterBugs(modifiers)
-{
- var bugs = modifiers.match(/\b(Bug|webkit.org|crbug.com|code.google.com)\S*/g);
- if (!bugs)
- return {bugs: '', modifiers: modifiers};
- for (var j = 0; j < bugs.length; j++)
- modifiers = modifiers.replace(bugs[j], '');
- return {bugs: bugs.join(' '), modifiers: string.collapseWhitespace(string.trimString(modifiers))};
-}
-
-function populateExpectationsData(resultsObject)
-{
- var buildInfo = platformAndBuildType(resultsObject.builder);
- var expectations = getExpectations(resultsObject.test, buildInfo.platform, buildInfo.buildType);
- if (!expectations)
- return;
-
- resultsObject.expectations = expectations.expectations;
- var filteredModifiers = filterBugs(expectations.modifiers);
- resultsObject.modifiers = filteredModifiers.modifiers;
- resultsObject.bugs = filteredModifiers.bugs;
- resultsObject.isWontFixSkip = string.contains(expectations.modifiers, 'WONTFIX') || string.contains(expectations.modifiers, 'SKIP');
-}
-
-function platformObjectForName(platformName)
-{
- var platformsList = platformName.split("_");
- var platformObject = PLATFORMS[platformsList.shift()];
- platformsList.forEach(function(platformName) {
- platformObject = platformObject.subPlatforms[platformName];
- });
- return platformObject;
-}
-
-// Data structure to hold the processed expectations.
-// g_allExpectations[testPath][platform][buildType] gets the object that has
-// expectations and modifiers properties for this platform/buildType.
-//
-// platform and buildType both go through fallback sets of keys from most
-// specific key to least specific. For example, on Windows XP, we first
-// check the platform WIN-XP, if there's no such object, we check WIN,
-// then finally we check ALL. For build types, we check the current
-// buildType, then ALL.
-var g_allExpectations;
-
-function getParsedExpectations(data)
-{
- var expectations = [];
- var lines = data.split('\n');
- lines.forEach(function(line) {
- line = string.trimString(line);
- if (!line || string.startsWith(line, '#'))
- return;
-
- // This code mimics _tokenize_line_using_new_format() in
- // Tools/Scripts/webkitpy/layout_tests/models/test_expectations.py
- //
- // FIXME: consider doing more error checking here.
- //
- // FIXME: Clean this all up once we've fully cut over to the new syntax.
- var tokens = line.split(/\s+/)
- var parsed_bugs = [];
- var parsed_modifiers = [];
- var parsed_path;
- var parsed_expectations = [];
- var state = 'start';
-
- // This clones _modifier_tokens_list in test_expectations.py.
- // FIXME: unify with the platforms constants at the top of the file.
- var modifier_tokens = {
- 'Release': 'RELEASE',
- 'Debug': 'DEBUG',
- 'Mac': 'MAC',
- 'Win': 'WIN',
- 'Linux': 'LINUX',
- 'SnowLeopard': 'SNOWLEOPARD',
- 'Lion': 'LION',
- 'MountainLion': 'MOUNTAINLION',
- 'Win7': 'WIN7',
- 'XP': 'XP',
- 'Vista': 'VISTA',
- 'Android': 'ANDROID',
- };
-
- var expectation_tokens = {
- 'Crash': 'CRASH',
- 'Failure': 'FAIL',
- 'ImageOnlyFailure': 'IMAGE',
- 'Missing': 'MISSING',
- 'Pass': 'PASS',
- 'Rebaseline': 'REBASELINE',
- 'Skip': 'SKIP',
- 'Slow': 'SLOW',
- 'Timeout': 'TIMEOUT',
- 'WontFix': 'WONTFIX',
- };
-
- var reachedEol = false;
-
- // States
- // - start: Next tokens are bugs or a path.
- // - modifier: Parsed bugs and a '['. Next token is a modifier.
- // - path: Parsed modifiers and a ']'. Next token is a path.
- // - path_found: Parsed a path. Next token is '[' or EOL.
- // - expectations: Parsed a path and a '['. Next tokens are
- // expectations.
- // - done: Parsed expectations and a ']'. Next is EOL.
- // - error: Error occurred. Ignore this line.
- tokens.forEach(function(token) {
- if (reachedEol)
- return;
-
- if (state == 'start' &&
- (token.indexOf('Bug') == 0 ||
- token.indexOf('webkit.org') == 0 ||
- token.indexOf('crbug.com') == 0 ||
- token.indexOf('code.google.com') == 0)) {
- parsed_bugs.push(token);
- } else if (token == '[') {
- if (state == 'start') {
- state = 'modifier';
- } else if (state == 'path_found') {
- state = 'expectations';
- } else {
- console.error('Unexpected \'[\' (state = ' + state + '): ' + line);
- state = 'error';
- return;
- }
- } else if (token == ']') {
- if (state == 'modifier') {
- state = 'path';
- } else if (state == 'expectations') {
- state = 'done';
- } else {
- state = 'error';
- return;
- }
- } else if (state == 'modifier') {
- var modifier = modifier_tokens[token];
- if (!modifier) {
- console.error('Unknown modifier: ' + modifier);
- state = 'error';
- return;
- }
- parsed_modifiers.push(modifier);
- } else if (state == 'expectations') {
- if (token == 'Rebaseline' || token == 'Skip' || token == 'Slow' || token == 'WontFix') {
- parsed_modifiers.push(token.toUpperCase());
- } else {
- var expectation = expectation_tokens[token];
- if (!expectation) {
- console.error('Unknown expectation: ' + expectation);
- state = 'error';
- return;
- }
- parsed_expectations.push(expectation);
- }
- } else if (token == '#') {
- reachedEol = true;
- } else if (state == 'path' || state == 'start') {
- parsed_path = token;
- state = 'path_found';
- } else {
- console.error('Unexpected token (state = ' + state + '): ' + token);
- state = 'error';
- }
- });
-
- if (state != 'path_found' && state != 'done')
- return;
-
- if (!parsed_expectations.length) {
- if (parsed_modifiers.indexOf('Slow') == -1) {
- parsed_modifiers.push('Skip');
- parsed_expectations = ['Pass'];
- }
- }
-
- // FIXME: Should we include line number and comment lines here?
- expectations.push({
- modifiers: parsed_bugs.concat(parsed_modifiers).join(' '),
- path: parsed_path,
- expectations: parsed_expectations.join(' '),
- });
- });
- return expectations;
-}
-
-
-function addTestToAllExpectationsForPlatform(test, platformName, expectations, modifiers)
-{
- if (!g_allExpectations[test])
- g_allExpectations[test] = {};
-
- if (!g_allExpectations[test][platformName])
- g_allExpectations[test][platformName] = {};
-
- var allBuildTypes = [];
- modifiers.split(' ').forEach(function(modifier) {
- if (modifier in BUILD_TYPES) {
- allBuildTypes.push(modifier);
- return;
- }
- });
- if (!allBuildTypes.length)
- allBuildTypes = Object.keys(BUILD_TYPES);
-
- allBuildTypes.forEach(function(buildType) {
- g_allExpectations[test][platformName][buildType] = {modifiers: modifiers, expectations: expectations};
- });
-}
-
-function processExpectationsForPlatform(platformObject, platformName, expectationsArray)
-{
- if (!g_allExpectations)
- g_allExpectations = {};
-
- if (!expectationsArray)
- return;
-
- // Sort the array to hit more specific paths last. More specific
- // paths (e.g. foo/bar/baz.html) override entries for less-specific ones (e.g. foo/bar).
- expectationsArray.sort(alphanumericCompare('path'));
-
- for (var i = 0; i < expectationsArray.length; i++) {
- var path = expectationsArray[i].path;
- var modifiers = expectationsArray[i].modifiers;
- var expectations = expectationsArray[i].expectations;
-
- var shouldProcessExpectation = false;
- var hasPlatformModifierUnions = false;
- if (platformObject.fallbackPlatforms) {
- platformObject.fallbackPlatforms.forEach(function(fallbackPlatform) {
- if (shouldProcessExpectation)
- return;
-
- var fallbackPlatformObject = platformObjectForName(fallbackPlatform);
- if (!fallbackPlatformObject.platformModifierUnions)
- return;
-
- modifiers.split(' ').forEach(function(modifier) {
- if (modifier in fallbackPlatformObject.platformModifierUnions) {
- hasPlatformModifierUnions = true;
- if (fallbackPlatformObject.platformModifierUnions[modifier].indexOf(platformName) != -1)
- shouldProcessExpectation = true;
- }
- });
- });
- }
-
- if (!hasPlatformModifierUnions)
- shouldProcessExpectation = true;
-
- if (!shouldProcessExpectation)
- continue;
-
- getAllTestsTrie().forEach(function(triePath) {
- addTestToAllExpectationsForPlatform(triePath, platformName, expectations, modifiers);
- }, path);
- }
-}
-
-function processExpectations()
-{
- // FIXME: An expectations-by-platform object should be passed into this function rather than checking
- // for a global object. That way this function can be better tested and meaningful errors can
- // be reported when expectations for a given platform are not found in that object.
- if (!g_expectationsByPlatform)
- return;
-
- traversePlatformsTree(function(platform, platformName) {
- if (platform.fallbackPlatforms) {
- platform.fallbackPlatforms.forEach(function(fallbackPlatform) {
- if (fallbackPlatform in g_expectationsByPlatform)
- processExpectationsForPlatform(platform, platformName, g_expectationsByPlatform[fallbackPlatform]);
- });
- }
-
- if (platformName in g_expectationsByPlatform)
- processExpectationsForPlatform(platform, platformName, g_expectationsByPlatform[platformName]);
- });
-
- g_expectationsByPlatform = undefined;
-}
-
-function processMissingTestsWithExpectations(builder, platform, buildType)
-{
- var noFailures = [];
- var skipped = [];
-
- var allTestsForPlatformAndBuildType = allTestsWithSamePlatformAndBuildType(platform, buildType);
- for (var test in g_allExpectations) {
- var expectations = getExpectations(test, platform, buildType);
-
- if (!expectations)
- continue;
-
- // Test has expectations, but no result in the builders results.
- // This means it's either SKIP or passes on all builds.
- if (!allTestsForPlatformAndBuildType[test] && !string.contains(expectations.modifiers, 'WONTFIX')) {
- if (string.contains(expectations.modifiers, 'SKIP'))
- skipped.push(test);
- else if (!expectations.expectations.match(/^\s*PASS\s*$/)) {
- // Don't show tests expected to always pass. This is used in ways like
- // the following:
- // foo/bar = FAIL
- // foo/bar/baz.html = PASS
- noFailures.push({test: test, expectations: expectations.expectations, modifiers: expectations.modifiers});
- }
- }
- }
-
- g_perBuilderSkippedPaths[builder] = skipped.sort();
- g_perBuilderWithExpectationsButNoFailures[builder] = noFailures.sort();
-}
-
function processTestResultsForBuilderAsync(builder)
{
setTimeout(function() { processTestRunsForBuilder(builder); }, 0);
@@ -982,26 +388,24 @@
g_perBuilderFailures[builderName] = [];
return;
}
-
- processExpectations();
- var buildInfo = platformAndBuildType(builderName);
- var platform = buildInfo.platform;
- var buildType = buildInfo.buildType;
- processMissingTestsWithExpectations(builderName, platform, buildType);
-
var failures = [];
var allTestsForThisBuilder = g_resultsByBuilder[builderName].tests;
for (var test in allTestsForThisBuilder) {
var resultsForTest = createResultsObjectForTest(test, builderName);
- populateExpectationsData(resultsForTest);
var rawTest = g_resultsByBuilder[builderName].tests[test];
resultsForTest.rawTimes = rawTest.times;
var rawResults = rawTest.results;
resultsForTest.rawResults = rawResults;
+ if (rawTest.expected)
+ resultsForTest.expectations = rawTest.expected;
+
+ if (rawTest.bugs)
+ resultsForTest.bugs = rawTest.bugs;
+
// FIXME: Switch to resultsByBuild
var times = resultsForTest.rawTimes;
var numTimesSeen = 0;
@@ -1019,16 +423,10 @@
if (rawResults && rawResults[resultsIndex])
currentResult = rawResults[resultsIndex][RLE.VALUE];
- var time = times[i][RLE.VALUE]
-
- // Ignore times for crashing/timeout runs for the sake of seeing if
- // a test should be marked slow.
- if (currentResult != 'C' && currentResult != 'T')
- resultsForTest.slowestNonTimeoutCrashTime = Math.max(resultsForTest.slowestNonTimeoutCrashTime, time);
- resultsForTest.slowestTime = Math.max(resultsForTest.slowestTime, time);
+ resultsForTest.slowestTime = Math.max(resultsForTest.slowestTime, times[i][RLE.VALUE]);
}
- processMissingAndExtraExpectations(resultsForTest);
+ determineFlakiness(g_resultsByBuilder[builderName][FAILURE_MAP_KEY], resultsForTest);
failures.push(resultsForTest);
if (!g_testToResultsMap[test])
@@ -1039,7 +437,7 @@
g_perBuilderFailures[builderName] = failures;
}
-function processMissingAndExtraExpectations(resultsForTest)
+function determineFlakiness(failureMap, resultsForTest)
{
// Heuristic for determining whether expectations apply to a given test:
// -If a test result happens < MIN_RUNS_FOR_FLAKE, then consider it a flaky
@@ -1051,12 +449,6 @@
// a few runs, then being fixed or otherwise modified in a non-flaky way.
var rawResults = resultsForTest.rawResults;
- // If the first result is no-data that means the test is skipped or is
- // being run on a different builder (e.g. moved from one shard to another).
- // Ignore these results since we have no real data about what's going on.
- if (rawResults[0][RLE.VALUE] == 'N')
- return;
-
// Only consider flake if it doesn't happen twice in a row.
var MIN_RUNS_FOR_FLAKE = 2;
var resultsMap = {}
@@ -1084,109 +476,14 @@
continue;
}
- var expectation = expectationsFileStringForResult(result);
+ var expectation = failureMap[result];
resultsMap[expectation] = true;
numRealResults++;
}
+ resultsForTest.actualResults = Object.keys(resultsMap);
resultsForTest.flips = i - 1;
resultsForTest.isFlaky = numRealResults > 1;
-
- var missingExpectations = [];
- var extraExpectations = [];
-
- if (g_history.isLayoutTestResults()) {
- var expectationsArray = resultsForTest.expectations ? resultsForTest.expectations.split(' ') : [];
- extraExpectations = expectationsArray.filter(
- function(element) {
- // FIXME: Once all the FAIL lines are removed from
- // TestExpectations, delete all the legacyExpectationsSemantics
- // code.
- if (g_history.dashboardSpecificState.legacyExpectationsSemantics) {
- if (element == 'FAIL') {
- for (var i = 0; i < FAIL_RESULTS.length; i++) {
- if (resultsMap[FAIL_RESULTS[i]])
- return false;
- }
- return true;
- }
- }
-
- return element && !resultsMap[element] && !string.contains(element, 'BUG');
- });
-
- for (var result in resultsMap) {
- resultsForTest.actualResults.push(result);
- var hasExpectation = false;
- for (var i = 0; i < expectationsArray.length; i++) {
- var expectation = expectationsArray[i];
- // FIXME: Once all the FAIL lines are removed from
- // TestExpectations, delete all the legacyExpectationsSemantics
- // code.
- if (g_history.dashboardSpecificState.legacyExpectationsSemantics) {
- if (expectation == 'FAIL') {
- for (var j = 0; j < FAIL_RESULTS.length; j++) {
- if (result == FAIL_RESULTS[j]) {
- hasExpectation = true;
- break;
- }
- }
- }
- }
-
- if (result == expectation)
- hasExpectation = true;
-
- if (hasExpectation)
- break;
- }
- // If we have no expectations for a test and it only passes, then don't
- // list PASS as a missing expectation. We only want to list PASS if it
- // flaky passes, so there would be other expectations.
- if (!hasExpectation && !(!expectationsArray.length && result == 'PASS' && numRealResults == 1))
- missingExpectations.push(result);
- }
-
- // Only highlight tests that take > 2 seconds as needing to be marked as
- // slow. There are too many tests that take ~2 seconds every couple
- // hundred runs. It's not worth the manual maintenance effort.
- // Also, if a test times out, then it should not be marked as slow.
- var minTimeForNeedsSlow = isDebug(resultsForTest.builder) ? 2 : 1;
- if (isSlowTest(resultsForTest) && !resultsMap['TIMEOUT'] && (!resultsForTest.modifiers || !string.contains(resultsForTest.modifiers, 'SLOW')))
- missingExpectations.push('SLOW');
- else if (isFastTest(resultsForTest) && resultsForTest.modifiers && string.contains(resultsForTest.modifiers, 'SLOW'))
- extraExpectations.push('SLOW');
-
- // If there are no missing results or modifiers besides build
- // type, platform, or bug and the expectations are all extra
- // that is, extraExpectations - expectations = PASS,
- // include PASS as extra, since that means this line in
- // test_expectations can be deleted..
- if (!missingExpectations.length && !(resultsForTest.modifiers && realModifiers(resultsForTest.modifiers))) {
- var extraPlusPass = extraExpectations.concat(['PASS']);
- if (extraPlusPass.sort().toString() == expectationsArray.slice(0).sort().toString())
- extraExpectations.push('PASS');
- }
-
- }
-
- resultsForTest.meetsExpectations = !missingExpectations.length && !extraExpectations.length;
- resultsForTest.missing = missingExpectations.sort().join(' ');
- resultsForTest.extra = extraExpectations.sort().join(' ');
-}
-
-var BUG_URL_PREFIX = '<a href="http://';
-var BUG_URL_POSTFIX = '/$1">crbug.com/$1</a> ';
-var WEBKIT_BUG_URL_POSTFIX = '/$1">webkit.org/b/$1</a> ';
-var INTERNAL_BUG_REPLACE_VALUE = BUG_URL_PREFIX + 'b' + BUG_URL_POSTFIX;
-var EXTERNAL_BUG_REPLACE_VALUE = BUG_URL_PREFIX + 'crbug.com' + BUG_URL_POSTFIX;
-var WEBKIT_BUG_REPLACE_VALUE = BUG_URL_PREFIX + 'webkit.org/b' + WEBKIT_BUG_URL_POSTFIX;
-
-function htmlForBugs(bugs)
-{
- bugs = bugs.replace(/crbug.com\/(\d+)(\ |$)/g, EXTERNAL_BUG_REPLACE_VALUE);
- bugs = bugs.replace(/webkit.org\/b\/(\d+)(\ |$)/g, WEBKIT_BUG_REPLACE_VALUE);
- return bugs;
}
function linkHTMLToOpenWindow(url, text)
@@ -1200,10 +497,11 @@
{
var currentIndex = 0;
var rawResults = g_resultsByBuilder[builder].tests[testName].results;
+ var failureMap = g_resultsByBuilder[builder][FAILURE_MAP_KEY];
for (var i = 0; i < rawResults.length; i++) {
currentIndex += rawResults[i][RLE.LENGTH];
if (currentIndex > index)
- return isFailingResult(rawResults[i][RLE.VALUE]);
+ return isFailingResult(failureMap, rawResults[i][RLE.VALUE]);
}
console.error('Index exceeds number of results: ' + index);
}
@@ -1213,11 +511,12 @@
{
var rawResults = g_resultsByBuilder[builder].tests[testName].results;
var buildNumbers = g_resultsByBuilder[builder].buildNumbers;
+ var failureMap = g_resultsByBuilder[builder][FAILURE_MAP_KEY];
var index = 0;
var failures = [];
for (var i = 0; i < rawResults.length; i++) {
var numResults = rawResults[i][RLE.LENGTH];
- if (isFailingResult(rawResults[i][RLE.VALUE])) {
+ if (isFailingResult(failureMap, rawResults[i][RLE.VALUE])) {
for (var j = 0; j < numResults; j++)
failures.push(index + j);
}
@@ -1246,8 +545,10 @@
var master = builderMaster(builder);
var buildBasePath = master.logPath(builder, buildNumber);
- html += '<ul><li>' + linkHTMLToOpenWindow(buildBasePath, 'Build log') +
- '</li><li>Blink: ' + ui.html.blinkRevisionLink(g_resultsByBuilder[builder], index) + '</li>';
+ html += '<ul><li>' + linkHTMLToOpenWindow(buildBasePath, 'Build log');
+
+ if (g_resultsByBuilder[builder][BLINK_REVISIONS_KEY])
+ html += '</li><li>Blink: ' + ui.html.blinkRevisionLink(g_resultsByBuilder[builder], index) + '</li>';
html += '</li><li>Chromium: ' + ui.html.chromiumRevisionLink(g_resultsByBuilder[builder], index) + '</li>';
@@ -1264,6 +565,11 @@
ui.popup.show(e.target, html);
}
+function classNameForFailureString(failure)
+{
+ return failure.replace(/(\+|\ )/, '');
+}
+
function htmlForTestResults(test)
{
var html = '';
@@ -1275,21 +581,18 @@
var indexToReplaceCurrentResult = -1;
var indexToReplaceCurrentTime = -1;
- var currentResultArray, currentTimeArray, currentResult, innerHTML, resultString;
for (var i = 0; i < buildNumbers.length; i++) {
+ var currentResultArray, currentTimeArray, innerHTML, resultString;
+
if (i > indexToReplaceCurrentResult) {
currentResultArray = results.shift();
if (currentResultArray) {
- currentResult = currentResultArray[RLE.VALUE];
- // Treat simplified diff failures as just text failures.
- if (currentResult == 'S')
- currentResult = 'F';
+ resultString = g_resultsByBuilder[builder][FAILURE_MAP_KEY][currentResultArray[RLE.VALUE]];
indexToReplaceCurrentResult += currentResultArray[RLE.LENGTH];
} else {
- currentResult = 'N';
+ resultString = NO_DATA;
indexToReplaceCurrentResult += buildNumbers.length;
}
- resultString = expectationsFileStringForResult(currentResult);
}
if (i > indexToReplaceCurrentTime) {
@@ -1304,70 +607,27 @@
innerHTML = currentTime || ' ';
}
- html += '<td title="' + (resultString || 'NO DATA') + '. Click for more info." class="results ' + currentResult +
+ html += '<td title="' + resultString + '. Click for more info." class="results ' + classNameForFailureString(resultString) +
'" onclick=\'showPopupForBuild(event, "' + builder + '",' + i + ',"' + test.test + '")\'>' + innerHTML;
}
return html;
}
-function htmlForTestsWithExpectationsButNoFailures(builder)
+function shouldShowTest(testResult)
{
- var tests = g_perBuilderWithExpectationsButNoFailures[builder];
- var skippedPaths = g_perBuilderSkippedPaths[builder];
- var showUnexpectedPassesLink = linkHTMLToToggleState('showUnexpectedPasses', 'tests that have not failed in last ' + g_resultsByBuilder[builder].buildNumbers.length + ' runs');
- var showSkippedLink = linkHTMLToToggleState('showSkipped', 'skipped tests in TestExpectations');
-
- var html = '';
- if (g_history.isLayoutTestResults() && (tests.length || skippedPaths.length)) {
- var buildInfo = platformAndBuildType(builder);
- html += '<h2 style="display:inline-block">Expectations for ' + buildInfo.platform + '-' + buildInfo.buildType + '</h2> ';
- if (!g_history.dashboardSpecificState.showUnexpectedPasses && tests.length)
- html += showUnexpectedPassesLink;
- html += ' ';
- if (!g_history.dashboardSpecificState.showSkipped && skippedPaths.length)
- html += showSkippedLink;
- }
-
- var open = '<div onclick="selectContents(this)">';
-
- if (g_history.dashboardSpecificState.showUnexpectedPasses && tests.length) {
- html += '<div id="passing-tests">' + showUnexpectedPassesLink;
- for (var i = 0; i < tests.length; i++)
- html += open + tests[i].test + '</div>';
- html += '</div>';
- }
-
- if (g_history.dashboardSpecificState.showSkipped && skippedPaths.length)
- html += '<div id="skipped-tests">' + showSkippedLink + open + skippedPaths.join('</div>' + open) + '</div></div>';
- return html + '<br>';
-}
-
-// Returns whether we should exclude test results from the test table.
-function shouldHideTest(testResult)
-{
- // For non-layout tests, we always show everything.
if (!g_history.isLayoutTestResults())
- return false;
+ return true;
- if (testResult.isWontFixSkip)
- return !g_history.dashboardSpecificState.showWontFixSkip;
+ if (testResult.expectations == 'WONTFIX')
+ return g_history.dashboardSpecificState.showWontFix;
+
+ if (testResult.expectations == 'SKIP')
+ return g_history.dashboardSpecificState.showSkip;
if (testResult.isFlaky)
- return !g_history.dashboardSpecificState.showFlaky;
+ return g_history.dashboardSpecificState.showFlaky;
- if (isSlowTest(testResult))
- return !g_history.dashboardSpecificState.showSlow;
-
- if (testResult.meetsExpectations)
- return !g_history.dashboardSpecificState.showCorrectExpectations;
-
- return !g_history.dashboardSpecificState.showWrongExpectations;
-}
-
-// Sets the browser's selection to the element's contents.
-function selectContents(element)
-{
- window.getSelection().selectAllChildren(element);
+ return g_history.dashboardSpecificState.showNonFlaky;
}
function createBugHTML(test)
@@ -1379,12 +639,12 @@
'[insert probable cause]');
url = 'https://code.google.com/p/chromium/issues/entry?template=Layout%20Test%20Failure&summary=' + title + '&comment=' + description;
- return '<a href="' + url + '" class="file-bug">FILE BUG</a>';
+ return '<a href="' + url + '">File new bug</a>';
}
function isCrossBuilderView()
{
- return g_history.dashboardSpecificState.tests || g_history.dashboardSpecificState.result || g_history.dashboardSpecificState.expectationsUpdate;
+ return g_history.dashboardSpecificState.tests || g_history.dashboardSpecificState.result;
}
function tableHeaders(opt_getAll)
@@ -1397,22 +657,28 @@
headers.push('test');
if (g_history.isLayoutTestResults() || opt_getAll)
- headers.push('bugs', 'modifiers', 'expectations');
+ headers.push('bugs', 'expectations');
headers.push('slowest run', 'flakiness (numbers are runtimes in seconds)');
return headers;
}
+function linkifyBugs(bugs)
+{
+ var html = '';
+ bugs.forEach(function(bug) {
+ var bugHtml;
+ if (string.startsWith(bug, 'Bug('))
+ bugHtml = bug;
+ else
+ bugHtml = '<a href="http://' + bug + '">' + bug + '</a>';
+ html += '<div>' + bugHtml + '</div>'
+ });
+ return html;
+}
+
function htmlForSingleTestRow(test)
{
- if (!isCrossBuilderView() && shouldHideTest(test)) {
- // The innerHTML call is considerably faster if we exclude the rows for
- // items we're not showing than if we hide them using display:none.
- // For the crossBuilderView, we want to show all rows the user is
- // explicitly listing tests to view.
- return '';
- }
-
var headers = tableHeaders();
var html = '';
for (var i = 0; i < headers.length; i++) {
@@ -1426,9 +692,8 @@
html += '<tr><td class="' + testCellClassName + '">' + testCellHTML;
} else if (string.startsWith(header, 'bugs'))
- html += '<td class=options-container>' + (test.bugs ? htmlForBugs(test.bugs) : createBugHTML(test));
- else if (string.startsWith(header, 'modifiers'))
- html += '<td class=options-container>' + test.modifiers;
+ // FIXME: linkify bugs.
+ html += '<td class=options-container>' + (linkifyBugs(test.bugs) || createBugHTML(test));
else if (string.startsWith(header, 'expectations'))
html += '<td class=options-container>' + test.expectations;
else if (string.startsWith(header, 'slowest'))
@@ -1550,243 +815,6 @@
tests.sort(sortFunctionGetter(resultsProperty, order == BACKWARD));
}
-// Sorts a space separated expectations string in alphanumeric order.
-// @param {string} str The expectations string.
-// @return {string} The sorted string.
-function sortExpectationsString(str)
-{
- return str.split(' ').sort().join(' ');
-}
-
-function addUpdate(testsNeedingUpdate, test, builderName, missing, extra)
-{
- if (!testsNeedingUpdate[test])
- testsNeedingUpdate[test] = {};
-
- var buildInfo = platformAndBuildType(builderName);
- var builder = buildInfo.platform + ' ' + buildInfo.buildType;
- if (!testsNeedingUpdate[test][builder])
- testsNeedingUpdate[test][builder] = {};
-
- if (missing)
- testsNeedingUpdate[test][builder].missing = sortExpectationsString(missing);
-
- if (extra)
- testsNeedingUpdate[test][builder].extra = sortExpectationsString(extra);
-}
-
-
-// From a string of modifiers, returns a string of modifiers that
-// are for real result changes, like SLOW, and excludes modifiers
-// that specificy things like platform, build_type, bug.
-// @param {string} modifierString String containing all modifiers.
-// @return {string} String containing only modifiers that effect the results.
-function realModifiers(modifierString)
-{
- var modifiers = modifierString.split(' ');;
- return modifiers.filter(function(modifier) {
- if (modifier in BUILD_TYPES || string.startsWith(modifier, 'BUG'))
- return false;
-
- var matchesPlatformOrUnion = false;
- traversePlatformsTree(function(platform, platformName) {
- if (matchesPlatformOrUnion)
- return;
-
- if (platform.fallbackPlatforms) {
- platform.fallbackPlatforms.forEach(function(fallbackPlatform) {
- if (matchesPlatformOrUnion)
- return;
-
- var fallbackPlatformObject = platformObjectForName(fallbackPlatform);
- if (!fallbackPlatformObject.platformModifierUnions)
- return;
-
- matchesPlatformOrUnion = modifier in fallbackPlatformObject.subPlatforms || modifier in fallbackPlatformObject.platformModifierUnions;
- });
- }
- });
-
- return !matchesPlatformOrUnion;
- }).join(' ');
-}
-
-function generatePageForExpectationsUpdate()
-{
- // Always show all runs when auto-updating expectations.
- if (!g_history.crossDashboardState.showAllRuns)
- g_history.setQueryParameter('showAllRuns', true);
-
- processTestRunsForAllBuilders();
- var testsNeedingUpdate = {};
- for (var test in g_testToResultsMap) {
- var results = g_testToResultsMap[test];
- for (var i = 0; i < results.length; i++) {
- var thisResult = results[i];
-
- if (!thisResult.missing && !thisResult.extra)
- continue;
-
- var allPassesOrNoDatas = thisResult.rawResults.filter(function (x) { return x[1] != "P" && x[1] != "N"; }).length == 0;
-
- if (allPassesOrNoDatas)
- continue;
-
- addUpdate(testsNeedingUpdate, test, thisResult.builder, thisResult.missing, thisResult.extra);
- }
- }
-
- for (var builder in currentBuilders()) {
- var tests = g_perBuilderWithExpectationsButNoFailures[builder]
- for (var i = 0; i < tests.length; i++) {
- // Anything extra in this case is what is listed in expectations
- // plus modifiers other than bug, platform, build type.
- var modifiers = realModifiers(tests[i].modifiers);
- var extras = tests[i].expectations;
- extras += modifiers ? ' ' + modifiers : '';
- addUpdate(testsNeedingUpdate, tests[i].test, builder, null, extras);
- }
- }
-
- // Get the keys in alphabetical order, so it is easy to process groups
- // of tests.
- var keys = Object.keys(testsNeedingUpdate).sort();
- showUpdateInfoForTest(testsNeedingUpdate, keys);
-}
-
-// Show the test results and the json for differing expectations, and
-// allow the user to include or exclude this update.
-//
-// @param {Object} testsNeedingUpdate Tests that need updating.
-// @param {Array.<string>} keys Keys into the testNeedingUpdate object.
-function showUpdateInfoForTest(testsNeedingUpdate, keys)
-{
- var test = keys[g_history.dashboardSpecificState.updateIndex];
- document.body.innerHTML = '';
-
- // FIXME: Make this DOM creation less verbose.
- var index = document.createElement('div');
- index.style.cssFloat = 'right';
- index.textContent = (g_history.dashboardSpecificState.updateIndex + 1) + ' of ' + keys.length + ' tests';
- document.body.appendChild(index);
-
- var buttonRegion = document.createElement('div');
- var includeBtn = document.createElement('input');
- includeBtn.type = 'button';
- includeBtn.value = 'include selected';
- includeBtn.addEventListener('click', partial(handleUpdate, testsNeedingUpdate, keys), false);
- buttonRegion.appendChild(includeBtn);
-
- var previousBtn = document.createElement('input');
- previousBtn.type = 'button';
- previousBtn.value = 'previous';
- previousBtn.addEventListener('click',
- function() {
- setUpdateIndex(g_history.dashboardSpecificState.updateIndex - 1, testsNeedingUpdate, keys);
- },
- false);
- buttonRegion.appendChild(previousBtn);
-
- var nextBtn = document.createElement('input');
- nextBtn.type = 'button';
- nextBtn.value = 'next';
- nextBtn.addEventListener('click', partial(nextUpdate, testsNeedingUpdate, keys), false);
- buttonRegion.appendChild(nextBtn);
-
- var doneBtn = document.createElement('input');
- doneBtn.type = 'button';
- doneBtn.value = 'done';
- doneBtn.addEventListener('click', finishUpdate, false);
- buttonRegion.appendChild(doneBtn);
-
- document.body.appendChild(buttonRegion);
-
- var updates = testsNeedingUpdate[test];
- var checkboxes = document.createElement('div');
- for (var builder in updates) {
- // Create a checkbox for each builder.
- var checkboxRegion = document.createElement('div');
- var checkbox = document.createElement('input');
- checkbox.type = 'checkbox';
- checkbox.id = builder;
- checkbox.checked = true;
- checkboxRegion.appendChild(checkbox);
- checkboxRegion.appendChild(document.createTextNode(builder + ' : ' + JSON.stringify(updates[builder])));
- checkboxes.appendChild(checkboxRegion);
- }
- document.body.appendChild(checkboxes);
-
- var div = document.createElement('div');
- div.innerHTML = htmlForIndividualTestOnAllBuildersWithResultsLinks(test);
- document.body.appendChild(div);
- appendExpectations();
-}
-
-
-// When the user has finished selecting expectations to update, provide them
-// with json to copy over.
-function finishUpdate()
-{
- document.body.innerHTML = 'The next step is to copy the output below ' +
- 'into a local file and save it. Then, run<br><code>python ' +
- 'src/webkit/tools/layout_tests/webkitpy/layout_tests/update_expectat' +
- 'ions_from_dashboard.py path/to/local/file</code><br>in order to ' +
- 'update the expectations file.<br><textarea id="results" '+
- 'style="width:600px;height:600px;"> ' +
- JSON.stringify(g_confirmedTests) + '</textarea>';
- results.focus();
- document.execCommand('SelectAll');
-}
-
-// Handle user click on "include selected" button.
-// Includes the tests that are selected and exclude the rest.
-// @param {Object} testsNeedingUpdate Tests that need updating.
-// @param {Array.<string>} keys Keys into the testNeedingUpdate object.
-function handleUpdate(testsNeedingUpdate, keys)
-{
- var test = keys[g_history.dashboardSpecificState.updateIndex];
- var updates = testsNeedingUpdate[test];
- for (var builder in updates) {
- // Add included tests, and delete excluded tests if
- // they were previously included.
- if ($(builder).checked) {
- if (!g_confirmedTests[test])
- g_confirmedTests[test] = {};
- g_confirmedTests[test][builder] = testsNeedingUpdate[test][builder];
- } else if (g_confirmedTests[test] && g_confirmedTests[test][builder]) {
- delete g_confirmedTests[test][builder];
- if (!Object.keys(g_confirmedTests[test]).length)
- delete g_confirmedTests[test];
- }
- }
- nextUpdate(testsNeedingUpdate, keys);
-}
-
-
-// Move to the next item to update.
-// @param {Object} testsNeedingUpdate Tests that need updating.
-// @param {Array.<string>} keys Keys into the testNeedingUpdate object.
-function nextUpdate(testsNeedingUpdate, keys)
-{
- setUpdateIndex(g_history.dashboardSpecificState.updateIndex + 1, testsNeedingUpdate, keys);
-}
-
-
-// Advance the index we are updating at. If we walk over the end
-// or beginning, just loop.
-// @param {string} newIndex The index into the keys to move to.
-// @param {Object} testsNeedingUpdate Tests that need updating.
-// @param {Array.<string>} keys Keys into the testNeedingUpdate object.
-function setUpdateIndex(newIndex, testsNeedingUpdate, keys)
-{
- if (newIndex == -1)
- newIndex = keys.length - 1;
- else if (newIndex == keys.length)
- newIndex = 0;
- g_history.setQueryParameter("updateIndex", newIndex);
- showUpdateInfoForTest(testsNeedingUpdate, keys);
-}
-
function htmlForIndividualTestOnAllBuilders(test)
{
processTestRunsForAllBuilders();
@@ -1932,15 +960,14 @@
};
var url = base + platformPart + path;
- if (isImage || !string.startsWith(base, 'http://svn.webkit.org')) {
+ if (isImage) {
var dummyNode = document.createElement(isImage ? 'img' : 'script');
dummyNode.src = url;
dummyNode.onload = function() {
var item;
if (isImage) {
item = dummyNode;
- if (string.startsWith(base, 'http://svn.webkit.org'))
- maybeAddPngChecksum(item, url);
+ maybeAddPngChecksum(item, url);
} else {
item = document.createElement('iframe');
item.src = url;
@@ -2191,27 +1218,6 @@
container.appendChild(dummyNode);
}
-function buildInfoForRevision(builder, revision)
-{
- var revisions = g_resultsByBuilder[builder][BLINK_REVISION_KEY];
- var revisionStart = 0, revisionEnd = 0, buildNumber = 0;
- for (var i = 0; i < revisions.length; i++) {
- if (revision > revisions[i]) {
- revisionStart = revisions[i - 1];
- revisionEnd = revisions[i];
- buildNumber = g_resultsByBuilder[builder].buildNumbers[i - 1];
- break;
- }
- }
-
- if (revisionEnd)
- revisionEnd++;
- else
- revisionEnd = '';
-
- return {revisionStart: revisionStart, revisionEnd: revisionEnd, buildNumber: buildNumber};
-}
-
function lookupVirtualTestSuite(test) {
for (var suite in VIRTUAL_SUITES) {
if (test.indexOf(suite) != -1)
@@ -2234,15 +1240,15 @@
var suite = lookupVirtualTestSuite(test);
if (!suite)
- addExpectationItem(expectationsContainers, expectationsContainer, null, test, TEST_URL_BASE_PATH);
+ addExpectationItem(expectationsContainers, expectationsContainer, null, test, TEST_URL_BASE_PATH_FOR_XHR);
addExpectations(expectationsContainers, expectationsContainer,
- TEST_URL_BASE_PATH, '', text, png, reftest_html_file, reftest_mismatch_html_file, suite);
+ TEST_URL_BASE_PATH_FOR_XHR, '', text, png, reftest_html_file, reftest_mismatch_html_file, suite);
var fallbacks = allFallbacks();
for (var i = 0; i < fallbacks.length; i++) {
var fallback = 'platform/' + fallbacks[i];
- addExpectations(expectationsContainers, expectationsContainer, TEST_URL_BASE_PATH, fallback, text, png,
+ addExpectations(expectationsContainers, expectationsContainer, TEST_URL_BASE_PATH_FOR_XHR, fallback, text, png,
reftest_html_file, reftest_mismatch_html_file, suite);
}
@@ -2362,7 +1368,7 @@
if (g_history.isLayoutTestResults()) {
var suite = lookupVirtualTestSuite(test);
var base = suite ? baseTest(test, suite) : test;
- var versionControlUrl = TEST_URL_BASE_PATH_IN_VERSION_CONTROL + base;
+ var versionControlUrl = TEST_URL_BASE_PATH_FOR_BROWSING + base;
testNameHtml += '<h2>' + linkHTMLToOpenWindow(versionControlUrl, test) + '</h2>';
} else
testNameHtml += '<h2>' + test + '</h2>';
@@ -2407,18 +1413,17 @@
function headerForTestTableHtml()
{
return '<h2 style="display:inline-block">Failing tests</h2>' +
- checkBoxToToggleState('showWontFixSkip', 'WONTFIX/SKIP') +
- checkBoxToToggleState('showCorrectExpectations', 'tests with correct expectations') +
- checkBoxToToggleState('showWrongExpectations', 'tests with wrong expectations') +
- checkBoxToToggleState('showFlaky', 'flaky') +
- checkBoxToToggleState('showSlow', 'slow');
+ checkBoxToToggleState('showFlaky', 'Show flaky') +
+ checkBoxToToggleState('showNonFlaky', 'Show non-flaky') +
+ checkBoxToToggleState('showSkip', 'Show Skip') +
+ checkBoxToToggleState('showWontFix', 'Show WontFix');
}
function generatePageForBuilder(builderName)
{
processTestRunsForBuilder(builderName);
- var results = g_perBuilderFailures[builderName];
+ var results = g_perBuilderFailures[builderName].filter(shouldShowTest);
sortTests(results, g_history.dashboardSpecificState.sortColumn, g_history.dashboardSpecificState.sortOrder);
var testsHTML = '';
@@ -2428,17 +1433,16 @@
tableRowsHTML += htmlForSingleTestRow(results[i])
testsHTML = htmlForTestTable(tableRowsHTML);
} else {
- testsHTML = '<div>No tests found. ';
if (g_history.isLayoutTestResults())
- testsHTML += 'Try showing tests with correct expectations.</div>';
+ testsHTML += '<div>Fill in one of the text inputs or checkboxes above to show failures.</div>';
else
- testsHTML += 'This means no tests have failed!</div>';
+ testsHTML += '<div>No tests have failed!</div>';
}
var html = htmlForNavBar();
if (g_history.isLayoutTestResults())
- html += htmlForTestsWithExpectationsButNoFailures(builderName) + headerForTestTableHtml();
+ html += headerForTestTableHtml();
html += '<br>' + testsHTML;
appendHTML(html);
@@ -2458,7 +1462,6 @@
showChrome: 1,
showExpectations: 1,
showLargeExpectations: 1,
- legacyExpectationsSemantics: 1,
resultsHeight: 1,
revision: 1
};
@@ -2499,20 +1502,26 @@
var html = '<div id=legend-toggle onclick="hideLegend()">Hide ' +
'legend [type esc]</div><div id=legend-contents>';
- for (var expectation in expectationsMap())
- html += '<div class=' + expectation + '>' + expectationsMap()[expectation] + '</div>';
+
+ // Just grab the first failureMap. Technically, different builders can have different maps if they
+ // haven't all cycled after the map was changed, but meh.
+ var failureMap = g_resultsByBuilder[Object.keys(g_resultsByBuilder)[0]][FAILURE_MAP_KEY];
+ for (var expectation in failureMap) {
+ var failureString = failureMap[expectation];
+ html += '<div class=' + classNameForFailureString(failureString) + '>' + failureString + '</div>';
+ }
if (g_history.isLayoutTestResults()) {
html += '</div><br style="clear:both">' +
- '</div><h3>Test expectatons fallback order.</h3>';
+ '</div><h3>Test expectations fallback order.</h3>';
for (var platform in g_fallbacksMap)
html += '<div class=fallback-header>' + platform + '</div>' + htmlForFallbackHelp(g_fallbacksMap[platform]);
- html += '<div>TIMES:</div>' +
- htmlForSlowTimes(MIN_SECONDS_FOR_SLOW_TEST) +
- '<div>DEBUG TIMES:</div>' +
- htmlForSlowTimes(MIN_SECONDS_FOR_SLOW_TEST_DEBUG);
+ html += '<div>RELEASE TIMEOUTS:</div>' +
+ htmlForSlowTimes(RELEASE_TIMEOUT) +
+ '<div>DEBUG TIMEOUTS:</div>' +
+ htmlForSlowTimes(DEBUG_TIMEOUT);
}
legend.innerHTML = html;
@@ -2520,9 +1529,8 @@
function htmlForSlowTimes(minTime)
{
- return '<ul><li><1 second == !SLOW</li><li>>1 second && <' +
- minTime + ' seconds == SLOW || !SLOW is fine</li><li>>' +
- minTime + ' seconds == SLOW</li></ul>';
+ return '<ul><li>' + minTime + ' seconds</li><li>' +
+ SLOW_MULTIPLIER * minTime + ' seconds if marked Slow in TestExpectations</li></ul>';
}
function postHeightChangedMessage()
diff --git a/Tools/TestResultServer/static-dashboards/flakiness_dashboard_tests.css b/Tools/TestResultServer/static-dashboards/flakiness_dashboard_tests.css
index 55d912c..edabb5a 100644
--- a/Tools/TestResultServer/static-dashboards/flakiness_dashboard_tests.css
+++ b/Tools/TestResultServer/static-dashboards/flakiness_dashboard_tests.css
@@ -103,36 +103,39 @@
float: left;
border: 1px solid grey;
}
-.P {
+.PASS {
background-color: #3f3;
}
-.N {
+.NODATA, .NOTRUN {
background-color: #fff;
}
-.X {
+.SKIP {
background-color: lightgray;
}
-.C {
+.CRASH {
background-color: #c90;
}
-.T {
+.TIMEOUT {
background-color: #fffc6c;
}
-.I {
+.IMAGE {
background-color: #69f;
}
-.S {
- background-color: #c6c;
-}
-.F {
+.TEXT {
background-color: #e98080;
}
-.O {
+.MISSING {
background-color: #8a7700;
}
-.Z {
+.IMAGETEXT {
background-color: #96f;
}
+.AUDIO {
+ background-color: lightblue;
+}
+.FLAKY {
+ background-color: turquoise;
+}
.separator {
border: 1px solid lightgray;
height: 0px;
@@ -233,10 +236,6 @@
right: 0;
z-index: 1;
}
-.file-bug {
- font-weight: bold;
- font-size: 11px;
-}
.pngchecksum {
position: absolute;
right: 0;
diff --git a/Tools/TestResultServer/static-dashboards/flakiness_dashboard_unittests.js b/Tools/TestResultServer/static-dashboards/flakiness_dashboard_unittests.js
index a7a792b..1cef5fb 100644
--- a/Tools/TestResultServer/static-dashboards/flakiness_dashboard_unittests.js
+++ b/Tools/TestResultServer/static-dashboards/flakiness_dashboard_unittests.js
@@ -26,14 +26,13 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+module('flakiness_dashboard');
+
// FIXME(jparent): Rename this once it isn't globals.
function resetGlobals()
{
allExpectations = null;
- allTests = null;
- g_expectationsByPlatform = {};
g_resultsByBuilder = {};
- g_allExpectations = null;
g_allTestsTrie = null;
var historyInstance = new history.History(flakinessConfig);
// FIXME(jparent): Remove this once global isn't used.
@@ -55,346 +54,8 @@
return historyInstance;
}
-test('getParsedExpectationsCommentLine', 1, function() {
- var expectations = getParsedExpectations('# Comment line crbug.com/12345 [ Release ] tests/foobar.html [ Failure ]');
- equal(expectations.length, 0, 'Number of expectations');
-});
-
-test('getParsedExpectationsSimpleInput', 4, function() {
- var expectations = getParsedExpectations('crbug.com/12345 [ Release ] tests/foobar.html [ Failure ]');
- equal(expectations.length, 1, 'Number of expectations');
- equal(expectations[0].modifiers, 'crbug.com/12345 RELEASE', 'modifiers');
- equal(expectations[0].path, 'tests/foobar.html', 'path');
- equal(expectations[0].expectations, 'FAIL', 'expectations');
-});
-
-test('getParsedExpectationsSimpleInputWithComment', 4, function() {
- var expectations = getParsedExpectations('crbug.com/12345 [ Release ] tests/foobar.html [ Failure ] # Comment');
- equal(expectations.length, 1, 'Number of expectations');
- equal(expectations[0].modifiers, 'crbug.com/12345 RELEASE', 'modifiers');
- equal(expectations[0].path, 'tests/foobar.html', 'path');
- equal(expectations[0].expectations, 'FAIL', 'expectations');
-});
-
-test('getParsedExpectationsOnlyBug', 1, function() {
- var expectations = getParsedExpectations('crbug.com/12345');
- equal(expectations.length, 0, 'Number of expectations');
-});
-
-test('getParsedExpectationsTwoBugs', 4, function() {
- var expectations = getParsedExpectations('crbug.com/12345 tests/foobar.html [ Failure ]');
- equal(expectations.length, 1, 'Number of expectations');
- equal(expectations[0].modifiers, 'crbug.com/12345', 'modifiers');
- equal(expectations[0].path, 'tests/foobar.html', 'path');
- equal(expectations[0].expectations, 'FAIL', 'expectations');
-});
-
-test('getParsedExpectationsNoBug', 4, function() {
- var expectations = getParsedExpectations('tests/foobar.html [ Failure ]');
- equal(expectations.length, 1, 'Number of expectations');
- equal(expectations[0].modifiers, '', 'modifiers');
- equal(expectations[0].path, 'tests/foobar.html', 'path');
- equal(expectations[0].expectations, 'FAIL', 'expectations');
-});
-
-test('getParsedExpectationsBugPrefixInPath', 4, function() {
- var expectations = getParsedExpectations('Bug12345 Bug67890 tests/Bug.html [ Failure ]');
- equal(expectations.length, 1, 'Number of expectations');
- equal(expectations[0].modifiers, 'Bug12345 Bug67890', 'modifiers');
- equal(expectations[0].path, 'tests/Bug.html', 'path');
- equal(expectations[0].expectations, 'FAIL', 'expectations');
-});
-
-test('getParsedExpectationsTwoModifiers', 4, function() {
- var expectations = getParsedExpectations('crbug.com/12345 [ Release Debug ] tests/foobar.html [ Failure ]');
- equal(expectations.length, 1, 'Number of expectations');
- equal(expectations[0].modifiers, 'crbug.com/12345 RELEASE DEBUG', 'modifiers');
- equal(expectations[0].path, 'tests/foobar.html', 'path');
- equal(expectations[0].expectations, 'FAIL', 'expectations');
-});
-
-test('getParsedExpectationsUnknownModifier', 1, function() {
- var expectations = getParsedExpectations('crbug.com/12345 [ ImaginaryOS ] tests/foobar.html');
- equal(expectations.length, 0, 'Number of expectations');
-});
-
-test('getParsedExpectationsTwoPaths', 1, function() {
- var expectations = getParsedExpectations('crbug.com/12345 tests/foo.html tests/bar.html [ Failure ]');
- equal(expectations.length, 0, 'Number of expectations');
-});
-
-test('getParsedExpectationsNoPath', 1, function() {
- var expectations = getParsedExpectations('crbug.com/12345 [ Failure ]');
- equal(expectations.length, 0, 'Number of expectations');
-});
-
-test('getParsedExpectationsHashInPath', 1, function() {
- var expectations = getParsedExpectations('crbug.com/12345 # [ Failure ]');
- equal(expectations.length, 0, 'Number of expectations');
-});
-
-test('getParsedExpectationsTwoExpectations', 4, function() {
- expectations = getParsedExpectations('crbug.com/12345 tests/foobar.html [ Pass Failure ]');
- equal(expectations.length, 1, 'Number of expectations');
- equal(expectations[0].modifiers, 'crbug.com/12345', 'modifiers');
- equal(expectations[0].path, 'tests/foobar.html', 'path');
- equal(expectations[0].expectations, 'PASS FAIL', 'expectations');
-});
-
-test('getParsedExpectationsNoExpectation', 4, function() {
- var expectations = getParsedExpectations('crbug.com/12345 tests/foobar.html');
- equal(expectations.length, 1, 'Number of expectations');
- equal(expectations[0].modifiers, 'crbug.com/12345 Skip', 'modifiers');
- equal(expectations[0].path, 'tests/foobar.html', 'path');
- equal(expectations[0].expectations, 'Pass', 'expectations');
-});
-
-test('getParsedExpectationsNoExpectationWithComment', 4, function() {
- var expectations = getParsedExpectations('crbug.com/12345 tests/foobar.html # Comment');
- equal(expectations.length, 1, 'Number of expectations');
- equal(expectations[0].modifiers, 'crbug.com/12345 Skip', 'modifiers');
- equal(expectations[0].path, 'tests/foobar.html', 'path');
- equal(expectations[0].expectations, 'Pass', 'expectations');
-});
-
-test('getParsedExpectationsExpectationConversionToModifier', 4, function() {
- var expectations = getParsedExpectations('crbug.com/12345 tests/foobar.html [ Rebaseline ]');
- equal(expectations.length, 1, 'Number of expectations');
- equal(expectations[0].modifiers, 'crbug.com/12345 REBASELINE Skip', 'modifiers');
- equal(expectations[0].path, 'tests/foobar.html', 'path');
- equal(expectations[0].expectations, 'Pass', 'expectations');
-});
-
-test('getParsedExpectationsUnknownExpectation', 1, function() {
- var expectations = getParsedExpectations('crbug.com/12345 tests/foobar.html [ PANIC ]');
- equal(expectations.length, 0, 'Number of expectations');
-});
-
-function stubResultsByBuilder(data)
-{
- for (var builder in currentBuilders())
- {
- g_resultsByBuilder[builder] = data[builder] || {'tests': []};
- };
-}
-
-function runExpectationsTest(builder, test, expectations, modifiers)
-{
- // Put in some dummy results. processExpectations expects the test to be
- // there.
- var tests = {};
- tests[test] = {'results': [[100, 'F']], 'times': [[100, 0]]};
- var results = {};
- results[builder] = {'tests': tests};
- stubResultsByBuilder(results);
-
- processExpectations();
- var resultsForTest = createResultsObjectForTest(test, builder);
- populateExpectationsData(resultsForTest);
-
- var message = 'Builder: ' + resultsForTest.builder + ' test: ' + resultsForTest.test;
- equal(resultsForTest.expectations, expectations, message);
- equal(resultsForTest.modifiers, modifiers, message);
-}
-
-test('releaseFail', 2, function() {
- resetGlobals();
- loadBuildersList('@ToT - chromium.org', 'layout-tests');
-
- var builder = 'WebKit Win';
- var test = 'foo/1.html';
- var expectationsArray = [
- {'modifiers': 'RELEASE', 'expectations': 'FAIL'}
- ];
- g_expectationsByPlatform['CHROMIUM'] = getParsedExpectations('[ Release ] ' + test + ' [ Failure ]');
- runExpectationsTest(builder, test, 'FAIL', 'RELEASE');
-});
-
-test('releaseFailDebugCrashReleaseBuilder', 2, function() {
- resetGlobals();
- loadBuildersList('@ToT - chromium.org', 'layout-tests');
- var builder = 'WebKit Win';
- var test = 'foo/1.html';
- var expectationsArray = [
- {'modifiers': 'RELEASE', 'expectations': 'FAIL'},
- {'modifiers': 'DEBUG', 'expectations': 'CRASH'}
- ];
- g_expectationsByPlatform['CHROMIUM'] = getParsedExpectations('[ Release ] ' + test + ' [ Failure ]\n' +
- '[ Debug ] ' + test + ' [ Crash ]');
- runExpectationsTest(builder, test, 'FAIL', 'RELEASE');
-});
-
-test('releaseFailDebugCrashDebugBuilder', 2, function() {
- resetGlobals();
- loadBuildersList('@ToT - chromium.org', 'layout-tests');
- var builder = 'WebKit Win (dbg)';
- var test = 'foo/1.html';
- var expectationsArray = [
- {'modifiers': 'RELEASE', 'expectations': 'FAIL'},
- {'modifiers': 'DEBUG', 'expectations': 'CRASH'}
- ];
- g_expectationsByPlatform['CHROMIUM'] = getParsedExpectations('[ Release ] ' + test + ' [ Failure ]\n' +
- '[ Debug ] ' + test + ' [ Crash ]');
- runExpectationsTest(builder, test, 'CRASH', 'DEBUG');
-});
-
-test('overrideJustBuildType', 12, function() {
- resetGlobals();
- loadBuildersList('@ToT - chromium.org', 'layout-tests');
- var test = 'bar/1.html';
- g_expectationsByPlatform['CHROMIUM'] = getParsedExpectations('bar [ WontFix Failure Pass Timeout ]\n' +
- '[ Mac ] ' + test + ' [ WontFix Failure ]\n' +
- '[ Linux Debug ] ' + test + ' [ Crash ]');
-
- runExpectationsTest('WebKit Win', test, 'FAIL PASS TIMEOUT', 'WONTFIX');
- runExpectationsTest('WebKit Win (dbg)(3)', test, 'FAIL PASS TIMEOUT', 'WONTFIX');
- runExpectationsTest('WebKit Linux', test, 'FAIL PASS TIMEOUT', 'WONTFIX');
- runExpectationsTest('WebKit Linux (dbg)(3)', test, 'CRASH', 'LINUX DEBUG');
- runExpectationsTest('WebKit Mac10.7', test, 'FAIL', 'MAC WONTFIX');
- runExpectationsTest('WebKit Mac10.7 (dbg)(3)', test, 'FAIL', 'MAC WONTFIX');
-});
-
-test('platformAndBuildType', 42, function() {
- var historyInstance = new history.History(flakinessConfig);
- // FIXME(jparent): Change to use the flakiness_db's history object
- // once it exists, rather than tracking global.
- g_history = historyInstance;
-
- var runPlatformAndBuildTypeTest = function(builder, expectedPlatform, expectedBuildType) {
- g_perBuilderPlatformAndBuildType = {};
- buildInfo = platformAndBuildType(builder);
- var message = 'Builder: ' + builder;
- equal(buildInfo.platform, expectedPlatform, message);
- equal(buildInfo.buildType, expectedBuildType, message);
- }
- runPlatformAndBuildTypeTest('WebKit Win (deps)', 'CHROMIUM_XP', 'RELEASE');
- runPlatformAndBuildTypeTest('WebKit Win (deps)(dbg)(1)', 'CHROMIUM_XP', 'DEBUG');
- runPlatformAndBuildTypeTest('WebKit Win (deps)(dbg)(2)', 'CHROMIUM_XP', 'DEBUG');
- runPlatformAndBuildTypeTest('WebKit Linux (deps)', 'CHROMIUM_LUCID', 'RELEASE');
- runPlatformAndBuildTypeTest('WebKit Linux (deps)(dbg)(1)', 'CHROMIUM_LUCID', 'DEBUG');
- runPlatformAndBuildTypeTest('WebKit Linux (deps)(dbg)(2)', 'CHROMIUM_LUCID', 'DEBUG');
- runPlatformAndBuildTypeTest('WebKit Mac10.6 (deps)', 'CHROMIUM_SNOWLEOPARD', 'RELEASE');
- runPlatformAndBuildTypeTest('WebKit Mac10.6 (deps)(dbg)(1)', 'CHROMIUM_SNOWLEOPARD', 'DEBUG');
- runPlatformAndBuildTypeTest('WebKit Mac10.6 (deps)(dbg)(2)', 'CHROMIUM_SNOWLEOPARD', 'DEBUG');
- runPlatformAndBuildTypeTest('WebKit Win', 'CHROMIUM_XP', 'RELEASE');
- runPlatformAndBuildTypeTest('WebKit Win7', 'CHROMIUM_WIN7', 'RELEASE');
- runPlatformAndBuildTypeTest('WebKit Win (dbg)(1)', 'CHROMIUM_XP', 'DEBUG');
- runPlatformAndBuildTypeTest('WebKit Win (dbg)(2)', 'CHROMIUM_XP', 'DEBUG');
- runPlatformAndBuildTypeTest('WebKit Linux', 'CHROMIUM_LUCID', 'RELEASE');
- runPlatformAndBuildTypeTest('WebKit Linux 32', 'CHROMIUM_LUCID', 'RELEASE');
- runPlatformAndBuildTypeTest('WebKit Linux (dbg)(1)', 'CHROMIUM_LUCID', 'DEBUG');
- runPlatformAndBuildTypeTest('WebKit Linux (dbg)(2)', 'CHROMIUM_LUCID', 'DEBUG');
- runPlatformAndBuildTypeTest('WebKit Mac10.6', 'CHROMIUM_SNOWLEOPARD', 'RELEASE');
- runPlatformAndBuildTypeTest('WebKit Mac10.6 (dbg)', 'CHROMIUM_SNOWLEOPARD', 'DEBUG');
- runPlatformAndBuildTypeTest('XP Tests', 'CHROMIUM_XP', 'RELEASE');
- runPlatformAndBuildTypeTest('Interactive Tests (dbg)', 'CHROMIUM_XP', 'DEBUG');
-});
-
-test('realModifiers', 3, function() {
- equal(realModifiers('BUG(Foo) LINUX LION WIN DEBUG SLOW'), 'SLOW');
- equal(realModifiers('BUG(Foo) LUCID MAC XP RELEASE SKIP'), 'SKIP');
- equal(realModifiers('BUG(Foo)'), '');
-});
-
-test('allTestsWithSamePlatformAndBuildType', 1, function() {
- // FIXME: test that allTestsWithSamePlatformAndBuildType actually returns the right set of tests.
- var expectedPlatformsList = ['CHROMIUM_LION', 'CHROMIUM_SNOWLEOPARD', 'CHROMIUM_XP', 'CHROMIUM_VISTA', 'CHROMIUM_WIN7', 'CHROMIUM_LUCID',
- 'CHROMIUM_ANDROID', 'APPLE_MAC_LION_WK1', 'APPLE_MAC_LION_WK2', 'APPLE_MAC_SNOWLEOPARD_WK1', 'APPLE_MAC_SNOWLEOPARD_WK2',
- 'APPLE_WIN_XP', 'APPLE_WIN_WIN7', 'GTK_LINUX_WK1', 'GTK_LINUX_WK2', 'QT_LINUX', 'EFL_LINUX_WK1', 'EFL_LINUX_WK2'];
- var actualPlatformsList = Object.keys(g_allTestsByPlatformAndBuildType);
- deepEqual(expectedPlatformsList, actualPlatformsList);
-});
-
-test('filterBugs',4, function() {
- var filtered = filterBugs('Skip crbug.com/123 webkit.org/b/123 Slow Bug(Tony) Debug')
- equal(filtered.modifiers, 'Skip Slow Debug');
- equal(filtered.bugs, 'crbug.com/123 webkit.org/b/123 Bug(Tony)');
-
- filtered = filterBugs('Skip Slow Debug')
- equal(filtered.modifiers, 'Skip Slow Debug');
- equal(filtered.bugs, '');
-});
-
-test('getExpectations', 16, function() {
- resetGlobals();
- loadBuildersList('@ToT - chromium.org', 'layout-tests');
-
- stubResultsByBuilder({
- 'WebKit Win' : {
- 'tests': {
- 'foo/test1.html': {'results': [[100, 'F']], 'times': [[100, 0]]},
- 'foo/test2.html': {'results': [[100, 'F']], 'times': [[100, 0]]},
- 'foo/test3.html': {'results': [[100, 'F']], 'times': [[100, 0]]},
- 'test1.html': {'results': [[100, 'F']], 'times': [[100, 0]]}
- }
- }
- });
-
- g_expectationsByPlatform['CHROMIUM'] = getParsedExpectations('Bug(123) foo [ Failure Pass Crash ]\n' +
- 'Bug(Foo) [ Release ] foo/test1.html [ Failure ]\n' +
- '[ Debug ] foo/test1.html [ Crash ]\n' +
- 'Bug(456) foo/test2.html [ Failure ]\n' +
- '[ Linux Debug ] foo/test2.html [ Crash ]\n' +
- '[ Release ] test1.html [ Failure ]\n' +
- '[ Debug ] test1.html [ Crash ]\n');
- g_expectationsByPlatform['CHROMIUM_ANDROID'] = getParsedExpectations('Bug(654) foo/test2.html [ Crash ]\n');
-
- g_expectationsByPlatform['GTK'] = getParsedExpectations('Bug(42) foo/test2.html [ ImageOnlyFailure ]\n' +
- '[ Debug ] test1.html [ Crash ]\n');
- g_expectationsByPlatform['GTK_LINUX_WK1'] = getParsedExpectations('[ Release ] foo/test1.html [ ImageOnlyFailure ]\n' +
- 'Bug(789) foo/test2.html [ Crash ]\n');
- g_expectationsByPlatform['GTK_LINUX_WK2'] = getParsedExpectations('Bug(987) foo/test2.html [ Failure ]\n');
-
- processExpectations();
-
- var expectations = getExpectations('foo/test1.html', 'CHROMIUM_XP', 'DEBUG');
- equal(JSON.stringify(expectations), '{"modifiers":"DEBUG","expectations":"CRASH"}');
-
- var expectations = getExpectations('foo/test1.html', 'CHROMIUM_LUCID', 'RELEASE');
- equal(JSON.stringify(expectations), '{"modifiers":"Bug(Foo) RELEASE","expectations":"FAIL"}');
-
- var expectations = getExpectations('foo/test2.html', 'CHROMIUM_LUCID', 'RELEASE');
- equal(JSON.stringify(expectations), '{"modifiers":"Bug(456)","expectations":"FAIL"}');
-
- var expectations = getExpectations('foo/test2.html', 'CHROMIUM_LION', 'DEBUG');
- equal(JSON.stringify(expectations), '{"modifiers":"Bug(456)","expectations":"FAIL"}');
-
- var expectations = getExpectations('foo/test2.html', 'CHROMIUM_LUCID', 'DEBUG');
- equal(JSON.stringify(expectations), '{"modifiers":"LINUX DEBUG","expectations":"CRASH"}');
-
- var expectations = getExpectations('foo/test2.html', 'CHROMIUM_ANDROID', 'RELEASE');
- equal(JSON.stringify(expectations), '{"modifiers":"Bug(654)","expectations":"CRASH"}');
-
- var expectations = getExpectations('test1.html', 'CHROMIUM_ANDROID', 'RELEASE');
- equal(JSON.stringify(expectations), '{"modifiers":"RELEASE","expectations":"FAIL"}');
-
- var expectations = getExpectations('foo/test3.html', 'CHROMIUM_LUCID', 'DEBUG');
- equal(JSON.stringify(expectations), '{"modifiers":"Bug(123)","expectations":"FAIL PASS CRASH"}');
-
- var expectations = getExpectations('test1.html', 'CHROMIUM_XP', 'DEBUG');
- equal(JSON.stringify(expectations), '{"modifiers":"DEBUG","expectations":"CRASH"}');
-
- var expectations = getExpectations('test1.html', 'CHROMIUM_LUCID', 'RELEASE');
- equal(JSON.stringify(expectations), '{"modifiers":"RELEASE","expectations":"FAIL"}');
-
- var expectations = getExpectations('foo/test1.html', 'GTK_LINUX_WK1', 'RELEASE');
- equal(JSON.stringify(expectations), '{"modifiers":"RELEASE","expectations":"IMAGE"}');
-
- var expectations = getExpectations('foo/test2.html', 'GTK_LINUX_WK1', 'DEBUG');
- equal(JSON.stringify(expectations), '{"modifiers":"Bug(789)","expectations":"CRASH"}');
-
- var expectations = getExpectations('test1.html', 'GTK_LINUX_WK1', 'DEBUG');
- equal(JSON.stringify(expectations), '{"modifiers":"DEBUG","expectations":"CRASH"}');
-
- var expectations = getExpectations('foo/test2.html', 'GTK_LINUX_WK2', 'DEBUG');
- equal(JSON.stringify(expectations), '{"modifiers":"Bug(987)","expectations":"FAIL"}');
-
- var expectations = getExpectations('foo/test2.html', 'GTK_LINUX_WK2', 'RELEASE');
- equal(JSON.stringify(expectations), '{"modifiers":"Bug(987)","expectations":"FAIL"}');
-
- var expectations = getExpectations('test1.html', 'GTK_LINUX_WK2', 'DEBUG');
- equal(JSON.stringify(expectations), '{"modifiers":"DEBUG","expectations":"CRASH"}');
-});
+var FAILURE_MAP = {"A": "AUDIO", "C": "CRASH", "F": "TEXT", "I": "IMAGE", "O": "MISSING",
+ "N": "NO DATA", "P": "PASS", "T": "TIMEOUT", "Y": "NOTRUN", "X": "SKIP", "Z": "IMAGE+TEXT"}
test('substringList', 2, function() {
var historyInstance = new history.History(flakinessConfig);
@@ -409,40 +70,10 @@
equal(substringList().toString(), 'foo/bar.FLAKY_foo.html');
});
-test('htmlForTestsWithExpectationsButNoFailures', 4, function() {
- var historyInstance = new history.History(defaultDashboardSpecificStateValues, generatePage, handleValidHashParameter);
- // FIXME(jparent): Remove this once global isn't used.
- g_history = historyInstance;
- loadBuildersList('@ToT - chromium.org', 'layout-tests');
- var builder = 'WebKit Win';
- g_perBuilderWithExpectationsButNoFailures[builder] = ['passing-test1.html', 'passing-test2.html'];
- g_perBuilderSkippedPaths[builder] = ['skipped-test1.html'];
- g_resultsByBuilder[builder] = { buildNumbers: [5, 4, 3, 1] };
-
- historyInstance.dashboardSpecificState.showUnexpectedPasses = true;
- historyInstance.dashboardSpecificState.showSkipped = true;
-
- historyInstance.crossDashboardState.group = '@ToT - chromium.org';
- historyInstance.crossDashboardState.testType = 'layout-tests';
-
- var container = document.createElement('div');
- container.innerHTML = htmlForTestsWithExpectationsButNoFailures(builder);
- equal(container.querySelectorAll('#passing-tests > div').length, 2);
- equal(container.querySelectorAll('#skipped-tests > div').length, 1);
-
- historyInstance.dashboardSpecificState.showUnexpectedPasses = false;
- historyInstance.dashboardSpecificState.showSkipped = false;
-
- var container = document.createElement('div');
- container.innerHTML = htmlForTestsWithExpectationsButNoFailures(builder);
- equal(container.querySelectorAll('#passing-tests > div').length, 0);
- equal(container.querySelectorAll('#skipped-tests > div').length, 0);
-});
-
test('headerForTestTableHtml', 1, function() {
var container = document.createElement('div');
container.innerHTML = headerForTestTableHtml();
- equal(container.querySelectorAll('input').length, 5);
+ equal(container.querySelectorAll('input').length, 4);
});
test('htmlForTestTypeSwitcherGroup', 6, function() {
@@ -492,19 +123,31 @@
loadBuildersList('@ToT - chromium.org', 'layout-tests');
var builderName = 'WebKit Linux';
+ g_resultsByBuilder[builderName] = {buildNumbers: [2, 1], blinkRevision: [1234, 1233], failure_map: FAILURE_MAP};
+
var test = 'dummytest.html';
- g_testToResultsMap[test] = [createResultsObjectForTest(test, builderName)];
+ var resultsObject = createResultsObjectForTest(test, builderName);
+ resultsObject.rawResults = [[1, 'F']];
+ resultsObject.rawTimes = [[1, 0]];
+ resultsObject.bugs = ["crbug.com/1234", "webkit.org/5678"];
+ g_testToResultsMap[test] = [resultsObject];
equal(htmlForIndividualTestOnAllBuildersWithResultsLinks(test),
'<table class=test-table><thead><tr>' +
'<th sortValue=test><div class=table-header-content><span></span><span class=header-text>test</span></div></th>' +
'<th sortValue=bugs><div class=table-header-content><span></span><span class=header-text>bugs</span></div></th>' +
- '<th sortValue=modifiers><div class=table-header-content><span></span><span class=header-text>modifiers</span></div></th>' +
'<th sortValue=expectations><div class=table-header-content><span></span><span class=header-text>expectations</span></div></th>' +
'<th sortValue=slowest><div class=table-header-content><span></span><span class=header-text>slowest run</span></div></th>' +
'<th sortValue=flakiness colspan=10000><div class=table-header-content><span></span><span class=header-text>flakiness (numbers are runtimes in seconds)</span></div></th>' +
'</tr></thead>' +
- '<tbody></tbody>' +
+ '<tbody><tr>' +
+ '<td class="test-link"><span class="link" onclick="g_history.setQueryParameter(\'tests\',\'dummytest.html\');">dummytest.html</span>' +
+ '<td class=options-container>' +
+ '<div><a href="http://crbug.com/1234">crbug.com/1234</a></div>' +
+ '<div><a href="http://webkit.org/5678">webkit.org/5678</a></div>' +
+ '<td class=options-container><td><td title="TEXT. Click for more info." class="results TEXT" onclick=\'showPopupForBuild(event, "WebKit Linux",0,"dummytest.html")\'> ' +
+ '<td title="NO DATA. Click for more info." class="results NODATA" onclick=\'showPopupForBuild(event, "WebKit Linux",1,"dummytest.html")\'> ' +
+ '</tbody>' +
'</table>' +
'<div>The following builders either don\'t run this test (e.g. it\'s skipped) or all runs passed:</div>' +
'<div class=skipped-builder-list>' +
@@ -527,7 +170,7 @@
var tests = [test1, test2];
equal(htmlForIndividualTests(tests),
- '<h2><a href="' + TEST_URL_BASE_PATH_IN_VERSION_CONTROL + 'foo/nonexistant.html" target="_blank">foo/nonexistant.html</a></h2>' +
+ '<h2><a href="' + TEST_URL_BASE_PATH_FOR_BROWSING + 'foo/nonexistant.html" target="_blank">foo/nonexistant.html</a></h2>' +
htmlForIndividualTestOnAllBuilders(test1) +
'<div class=expectations test=foo/nonexistant.html>' +
'<div><span class=link onclick=\"g_history.setQueryParameter(\'showExpectations\', true)\">Show results</span> | ' +
@@ -535,7 +178,7 @@
'<b>Only shows actual results/diffs from the most recent *failure* on each bot.</b></div>' +
'</div>' +
'<hr>' +
- '<h2><a href="' + TEST_URL_BASE_PATH_IN_VERSION_CONTROL + 'bar/nonexistant.html" target="_blank">bar/nonexistant.html</a></h2>' +
+ '<h2><a href="' + TEST_URL_BASE_PATH_FOR_BROWSING + 'bar/nonexistant.html" target="_blank">bar/nonexistant.html</a></h2>' +
htmlForIndividualTestOnAllBuilders(test2) +
'<div class=expectations test=bar/nonexistant.html>' +
'<div><span class=link onclick=\"g_history.setQueryParameter(\'showExpectations\', true)\">Show results</span> | ' +
@@ -554,45 +197,52 @@
historyInstance.dashboardSpecificState.showChrome = true;
equal(htmlForIndividualTests(tests),
- '<h2><a href="' + TEST_URL_BASE_PATH_IN_VERSION_CONTROL + 'foo/nonexistant.html" target="_blank">foo/nonexistant.html</a></h2>' +
+ '<h2><a href="' + TEST_URL_BASE_PATH_FOR_BROWSING + 'foo/nonexistant.html" target="_blank">foo/nonexistant.html</a></h2>' +
htmlForIndividualTestOnAllBuildersWithResultsLinks(test1));
tests = [test1, test2];
equal(htmlForIndividualTests(tests),
- '<h2><a href="' + TEST_URL_BASE_PATH_IN_VERSION_CONTROL + 'foo/nonexistant.html" target="_blank">foo/nonexistant.html</a></h2>' +
+ '<h2><a href="' + TEST_URL_BASE_PATH_FOR_BROWSING + 'foo/nonexistant.html" target="_blank">foo/nonexistant.html</a></h2>' +
htmlForIndividualTestOnAllBuildersWithResultsLinks(test1) + '<hr>' +
- '<h2><a href="' + TEST_URL_BASE_PATH_IN_VERSION_CONTROL + 'bar/nonexistant.html" target="_blank">bar/nonexistant.html</a></h2>' +
+ '<h2><a href="' + TEST_URL_BASE_PATH_FOR_BROWSING + 'bar/nonexistant.html" target="_blank">bar/nonexistant.html</a></h2>' +
htmlForIndividualTestOnAllBuildersWithResultsLinks(test2));
});
+test('linkifyBugs', 4, function() {
+ equal(linkifyBugs(["crbug.com/1234", "webkit.org/5678"]),
+ '<div><a href="http://crbug.com/1234">crbug.com/1234</a></div><div><a href="http://webkit.org/5678">webkit.org/5678</a></div>');
+ equal(linkifyBugs(["crbug.com/1234"]), '<div><a href="http://crbug.com/1234">crbug.com/1234</a></div>');
+ equal(linkifyBugs(["Bug(nick)"]), '<div>Bug(nick)</div>');
+ equal(linkifyBugs([]), '');
+});
+
test('htmlForSingleTestRow', 1, function() {
var historyInstance = resetGlobals();
var builder = 'dummyBuilder';
BUILDER_TO_MASTER[builder] = CHROMIUM_WEBKIT_BUILDER_MASTER;
var test = createResultsObjectForTest('foo/exists.html', builder);
- historyInstance.dashboardSpecificState.showCorrectExpectations = true;
- g_resultsByBuilder[builder] = {buildNumbers: [2, 1], blinkRevision: [1234, 1233]};
+ historyInstance.dashboardSpecificState.showNonFlaky = true;
+ g_resultsByBuilder[builder] = {buildNumbers: [2, 1], blinkRevision: [1234, 1233], failure_map: FAILURE_MAP};
test.rawResults = [[1, 'F'], [2, 'I']];
test.rawTimes = [[1, 0], [2, 5]];
var expected = '<tr>' +
'<td class="test-link"><span class="link" onclick="g_history.setQueryParameter(\'tests\',\'foo/exists.html\');">foo/exists.html</span>' +
- '<td class=options-container><a href="https://bugs.webkit.org/enter_bug.cgi?assigned_to=webkit-unassigned%40lists.webkit.org&product=WebKit&form_name=enter_bug&component=Tools%20%2F%20Tests&short_desc=Layout%20Test%20foo%2Fexists.html%20is%20failing&comment=The%20following%20layout%20test%20is%20failing%20on%20%5Binsert%20platform%5D%0A%0Afoo%2Fexists.html%0A%0AProbable%20cause%3A%0A%0A%5Binsert%20probable%20cause%5D" class="file-bug">FILE BUG</a>' +
+ '<td class=options-container><a href="https://code.google.com/p/chromium/issues/entry?template=Layout%20Test%20Failure&summary=Layout%20Test%20foo%2Fexists.html%20is%20failing&comment=The%20following%20layout%20test%20is%20failing%20on%20%5Binsert%20platform%5D%0A%0Afoo%2Fexists.html%0A%0AProbable%20cause%3A%0A%0A%5Binsert%20probable%20cause%5D">File new bug</a>' +
'<td class=options-container>' +
- '<td class=options-container>' +
- '<td><td title="TEXT. Click for more info." class="results F" onclick=\'showPopupForBuild(event, "dummyBuilder",0,"foo/exists.html")\'> ' +
- '<td title="IMAGE. Click for more info." class="results I" onclick=\'showPopupForBuild(event, "dummyBuilder",1,"foo/exists.html")\'>5';
-
+ '<td>' +
+ '<td title="TEXT. Click for more info." class="results TEXT" onclick=\'showPopupForBuild(event, "dummyBuilder",0,"foo/exists.html")\'> ' +
+ '<td title="IMAGE. Click for more info." class="results IMAGE" onclick=\'showPopupForBuild(event, "dummyBuilder",1,"foo/exists.html")\'>5';
equal(htmlForSingleTestRow(test), expected);
});
test('lookupVirtualTestSuite', 2, function() {
equal(lookupVirtualTestSuite('fast/canvas/foo.html'), '');
- equal(lookupVirtualTestSuite('platform/chromium/virtual/gpu/fast/canvas/foo.html'), 'platform/chromium/virtual/gpu/fast/canvas');
+ equal(lookupVirtualTestSuite('virtual/gpu/fast/canvas/foo.html'), 'virtual/gpu/fast/canvas');
});
test('baseTest', 2, function() {
equal(baseTest('fast/canvas/foo.html', ''), 'fast/canvas/foo.html');
- equal(baseTest('platform/chromium/virtual/gpu/fast/canvas/foo.html', 'platform/chromium/virtual/gpu/fast/canvas'), 'fast/canvas/foo.html');
+ equal(baseTest('virtual/gpu/fast/canvas/foo.html', 'virtual/gpu/fast/canvas'), 'fast/canvas/foo.html');
});
// FIXME: Create builders_tests.js and move this there.
@@ -642,14 +292,14 @@
var test1 = createResultsObjectForTest('foo/test1.html', 'dummyBuilder');
var test2 = createResultsObjectForTest('foo/test2.html', 'dummyBuilder');
var test3 = createResultsObjectForTest('foo/test3.html', 'dummyBuilder');
- test1.modifiers = 'b';
- test2.modifiers = 'a';
- test3.modifiers = '';
+ test1.expectations = 'b';
+ test2.expectations = 'a';
+ test3.expectations = '';
var tests = [test1, test2, test3];
- sortTests(tests, 'modifiers', FORWARD);
+ sortTests(tests, 'expectations', FORWARD);
deepEqual(tests, [test2, test1, test3]);
- sortTests(tests, 'modifiers', BACKWARD);
+ sortTests(tests, 'expectations', BACKWARD);
deepEqual(tests, [test3, test1, test2]);
test1.bugs = 'b';
@@ -752,7 +402,7 @@
notEqual(historyInstance.crossDashboardState.group, originalGroup, "group should have been invalidated");
});
-test('shouldHideTest', 10, function() {
+test('shouldShowTest', 9, function() {
var historyInstance = new history.History(flakinessConfig);
historyInstance.parseParameters();
// FIXME(jparent): Change to use the flakiness_dashboard's history object
@@ -760,35 +410,33 @@
g_history = historyInstance;
var test = createResultsObjectForTest('foo/test.html', 'dummyBuilder');
- equal(shouldHideTest(test), true, 'default layout test, hide it.');
- historyInstance.dashboardSpecificState.showCorrectExpectations = true;
- equal(shouldHideTest(test), false, 'show correct expectations.');
- historyInstance.dashboardSpecificState.showCorrectExpectations = false;
+ equal(shouldShowTest(test), false, 'default layout test, hide it.');
+ historyInstance.dashboardSpecificState.showNonFlaky = true;
+ equal(shouldShowTest(test), true, 'show correct expectations.');
+ historyInstance.dashboardSpecificState.showNonFlaky = false;
test = createResultsObjectForTest('foo/test.html', 'dummyBuilder');
- test.isWontFixSkip = true;
- equal(shouldHideTest(test), true, 'by default hide these too');
- historyInstance.dashboardSpecificState.showWontFixSkip = true;
- equal(shouldHideTest(test), false, 'now we should show it');
- historyInstance.dashboardSpecificState.showWontFixSkip = false;
+ test.expectations = "WONTFIX";
+ equal(shouldShowTest(test), false, 'by default hide wontfix');
+ historyInstance.dashboardSpecificState.showWontFix = true;
+ equal(shouldShowTest(test), true, 'show wontfix');
+ historyInstance.dashboardSpecificState.showWontFix = false;
+
+ test = createResultsObjectForTest('foo/test.html', 'dummyBuilder');
+ test.expectations = "SKIP";
+ equal(shouldShowTest(test), false, 'we hide skip tests by default');
+ historyInstance.dashboardSpecificState.showSkip = true;
+ equal(shouldShowTest(test), true, 'show skip test');
+ historyInstance.dashboardSpecificState.showSkip = false;
test = createResultsObjectForTest('foo/test.html', 'dummyBuilder');
test.isFlaky = true;
- equal(shouldHideTest(test), false, 'we show flaky tests by default');
- historyInstance.dashboardSpecificState.showFlaky = false;
- equal(shouldHideTest(test), true, 'do not show flaky test');
+ equal(shouldShowTest(test), false, 'hide flaky tests by default');
historyInstance.dashboardSpecificState.showFlaky = true;
-
- test = createResultsObjectForTest('foo/test.html', 'dummyBuilder');
- test.slowestNonTimeoutCrashTime = MIN_SECONDS_FOR_SLOW_TEST + 1;
- equal(shouldHideTest(test), true, 'we hide slow tests by default');
- historyInstance.dashboardSpecificState.showSlow = true;
- equal(shouldHideTest(test), false, 'now show slow test');
- historyInstance.dashboardSpecificState.showSlow = false;
+ equal(shouldShowTest(test), true, 'show flaky test');
+ historyInstance.dashboardSpecificState.showFlaky = false;
test = createResultsObjectForTest('foo/test.html', 'dummyBuilder');
historyInstance.crossDashboardState.testType = 'not layout tests';
- equal(shouldHideTest(test), false, 'show all non layout tests');
- test.isWontFixSkip = true;
- equal(shouldHideTest(test), false, 'show all non layout tests, even if wont fix');
+ equal(shouldShowTest(test), true, 'show all non layout tests');
});
diff --git a/Tools/TestResultServer/static-dashboards/history_unittests.js b/Tools/TestResultServer/static-dashboards/history_unittests.js
index 7559453..4594a61 100644
--- a/Tools/TestResultServer/static-dashboards/history_unittests.js
+++ b/Tools/TestResultServer/static-dashboards/history_unittests.js
@@ -26,6 +26,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+module('history');
+
test('queryHashAsMap', 2, function() {
equal(window.location.hash, '#useTestData=true');
deepEqual(history.queryHashAsMap(), {useTestData: 'true'});
diff --git a/Tools/TestResultServer/static-dashboards/loader.js b/Tools/TestResultServer/static-dashboards/loader.js
index 8d84682..9fd00b8 100644
--- a/Tools/TestResultServer/static-dashboards/loader.js
+++ b/Tools/TestResultServer/static-dashboards/loader.js
@@ -32,7 +32,6 @@
(function() {
var TEST_RESULTS_SERVER = 'http://test-results.appspot.com/';
-var CHROMIUM_EXPECTATIONS_URL = 'http://svn.webkit.org/repository/webkit/trunk/LayoutTests/platform/chromium/TestExpectations';
function pathToBuilderResultsFile(builderName) {
return TEST_RESULTS_SERVER + 'testfile?builder=' + builderName +
@@ -62,7 +61,6 @@
this._loadingSteps = [
this._loadBuildersList,
this._loadResultsFiles,
- this._loadExpectationsFiles,
];
this._buildersThatFailedToLoad = [];
@@ -165,26 +163,19 @@
{
var builds = JSON.parse(fileData);
- var json_version = builds['version'];
- for (var builderName in builds) {
- if (builderName == 'version')
- continue;
+ // If a test suite stops being run on a given builder, we don't want to show it.
+ // Assume any builder without a run in two weeks for a given test suite isn't
+ // running that suite anymore.
+ // FIXME: Grab which bots run which tests directly from the buildbot JSON instead.
+ var lastRunSeconds = builds[builderName].secondsSinceEpoch[0];
+ if ((Date.now() / 1000) - lastRunSeconds > ONE_WEEK_SECONDS)
+ return;
- // If a test suite stops being run on a given builder, we don't want to show it.
- // Assume any builder without a run in two weeks for a given test suite isn't
- // running that suite anymore.
- // FIXME: Grab which bots run which tests directly from the buildbot JSON instead.
- var lastRunSeconds = builds[builderName].secondsSinceEpoch[0];
- if ((Date.now() / 1000) - lastRunSeconds > ONE_WEEK_SECONDS)
- continue;
+ if ((Date.now() / 1000) - lastRunSeconds > ONE_DAY_SECONDS)
+ this._staleBuilders.push(builderName);
- if ((Date.now() / 1000) - lastRunSeconds > ONE_DAY_SECONDS)
- this._staleBuilders.push(builderName);
-
- if (json_version >= 4)
- builds[builderName][TESTS_KEY] = loader.Loader._flattenTrie(builds[builderName][TESTS_KEY]);
- g_resultsByBuilder[builderName] = builds[builderName];
- }
+ builds[builderName][TESTS_KEY] = loader.Loader._flattenTrie(builds[builderName][TESTS_KEY]);
+ g_resultsByBuilder[builderName] = builds[builderName];
},
_handleResultsFileLoadError: function(builderName)
{
@@ -213,39 +204,6 @@
}
return true;
},
- _loadExpectationsFiles: function()
- {
- if (!isFlakinessDashboard() && !this._history.crossDashboardState.useTestData) {
- this._loadNext();
- return;
- }
-
- var expectationsFilesToRequest = {};
- traversePlatformsTree(function(platform, platformName) {
- if (platform.fallbackPlatforms)
- platform.fallbackPlatforms.forEach(function(fallbackPlatform) {
- var fallbackPlatformObject = platformObjectForName(fallbackPlatform);
- if (fallbackPlatformObject.expectationsDirectory && !(fallbackPlatform in expectationsFilesToRequest))
- expectationsFilesToRequest[fallbackPlatform] = EXPECTATIONS_URL_BASE_PATH + fallbackPlatformObject.expectationsDirectory + '/TestExpectations';
- });
-
- if (platform.expectationsDirectory)
- expectationsFilesToRequest[platformName] = EXPECTATIONS_URL_BASE_PATH + platform.expectationsDirectory + '/TestExpectations';
- });
-
- for (platformWithExpectations in expectationsFilesToRequest)
- loader.request(expectationsFilesToRequest[platformWithExpectations],
- partial(function(loader, platformName, xhr) {
- g_expectationsByPlatform[platformName] = getParsedExpectations(xhr.responseText);
-
- delete expectationsFilesToRequest[platformName];
- if (!Object.keys(expectationsFilesToRequest).length)
- loader._loadNext();
- }, this, platformWithExpectations),
- partial(function(platformName, xhr) {
- console.error('Could not load expectations file for ' + platformName);
- }, platformWithExpectations));
- },
_addErrors: function()
{
if (this._buildersThatFailedToLoad.length)
diff --git a/Tools/TestResultServer/static-dashboards/loader_unittests.js b/Tools/TestResultServer/static-dashboards/loader_unittests.js
index 186067a..cfeccd6 100644
--- a/Tools/TestResultServer/static-dashboards/loader_unittests.js
+++ b/Tools/TestResultServer/static-dashboards/loader_unittests.js
@@ -67,7 +67,7 @@
loader.request = function(url, successCallback, errorCallback) {
var builderName = /builder=([\w ().]+)&/.exec(url)[1];
loadedBuilders.push(builderName);
- successCallback({responseText: '{"version": 4, "' + builderName + '": {"secondsSinceEpoch": [' + Date.now() + '], "tests": {}}}'});
+ successCallback({responseText: '{"version":4,"' + builderName + '":{"failure_map":{"A":"AUDIO","C":"CRASH","F":"TEXT"},"secondsSinceEpoch":[' + Date.now() + '],"tests":{}}}'});
}
loadBuildersList('@ToT - chromium.org', 'layout-tests');
@@ -79,31 +79,6 @@
}
});
-test('expectations files loading', 1, function() {
- resetGlobals();
- g_history.parseCrossDashboardParameters();
- // FIXME: re-enable once added back in flakiness_dashboard.js
- var expectedLoadedPlatforms = [/* "chromium", "chromium-android", */"efl", "efl-wk1", "efl-wk2", "gtk",
- "gtk-wk2", "mac", "mac-lion", /*"mac-snowleopard", */"qt", "win", "wk2"];
- var loadedPlatforms = [];
- var resourceLoader = new loader.Loader();
- resourceLoader._loadNext = function() {
- deepEqual(loadedPlatforms.sort(), expectedLoadedPlatforms);
- }
-
- var requestFunction = loader.request;
- loader.request = function(url, successCallback, errorCallback) {
- loadedPlatforms.push(/LayoutTests\/platform\/(.+)\/TestExpectations/.exec(url)[1]);
- successCallback({responseText: ''});
- }
-
- try {
- resourceLoader._loadExpectationsFiles();
- } finally {
- loader.request = requestFunction;
- }
-});
-
test('results file failing to load', 2, function() {
resetGlobals();
loadBuildersList('@ToT - chromium.org', 'layout-tests');
diff --git a/Tools/TestResultServer/static-dashboards/run-embedded-unittests.html b/Tools/TestResultServer/static-dashboards/run-embedded-unittests.html
index 10f058a..08c7b29 100644
--- a/Tools/TestResultServer/static-dashboards/run-embedded-unittests.html
+++ b/Tools/TestResultServer/static-dashboards/run-embedded-unittests.html
@@ -52,6 +52,7 @@
<script src="history_unittests.js"></script>
<script src="dashboard_base.js"></script>
<script src="ui.js"></script>
+<script src="ui_unittests.js"></script>
<script src="loader.js"></script>
<script src="loader_unittests.js"></script>
<script>
diff --git a/Tools/TestResultServer/static-dashboards/run-unittests.html b/Tools/TestResultServer/static-dashboards/run-unittests.html
index 15bc212..fb42134 100644
--- a/Tools/TestResultServer/static-dashboards/run-unittests.html
+++ b/Tools/TestResultServer/static-dashboards/run-unittests.html
@@ -46,6 +46,7 @@
<script src="history_unittests.js"></script>
<script src="dashboard_base.js"></script>
<script src="ui.js"></script>
+<script src="ui_unittests.js"></script>
<script src="loader.js"></script>
<script src="loader_unittests.js"></script>
<script>
@@ -60,5 +61,8 @@
<!-- FIXME: Split this up into multiple unittest.js, e.g. one for builders.js and one for dashboard_base.js. -->
<script src="flakiness_dashboard_unittests.js"></script>
+
+<script src="aggregate_results.js"></script>
+<script src="aggregate_results_unittest.js"></script>
</body>
</html>
diff --git a/Tools/TestResultServer/static-dashboards/string.js b/Tools/TestResultServer/static-dashboards/string.js
index 3e1f7f9..6424dbf 100644
--- a/Tools/TestResultServer/static-dashboards/string.js
+++ b/Tools/TestResultServer/static-dashboards/string.js
@@ -53,7 +53,7 @@
string.isValidName = function(str)
{
- return str.match(/[A-Za-z0-9\-\_,]/);
+ return str.match(/[A-Za-z0-9\-\_,\+]/);
}
string.trimString = function(str)
diff --git a/Tools/TestResultServer/static-dashboards/timeline_explorer.js b/Tools/TestResultServer/static-dashboards/timeline_explorer.js
index 3ff7f92..2be0fcd 100644
--- a/Tools/TestResultServer/static-dashboards/timeline_explorer.js
+++ b/Tools/TestResultServer/static-dashboards/timeline_explorer.js
@@ -45,10 +45,12 @@
function generatePage(historyInstance)
{
- g_buildIndicesByTimestamp = {};
var results = g_resultsByBuilder[historyInstance.dashboardSpecificState.builder || currentBuilderGroup().defaultBuilder()];
- for (var i = 0; i < results[FIXABLE_COUNTS_KEY].length; i++) {
+ g_totalFailureCount = getTotalTestCounts(results[FAILURES_BY_TYPE_KEY]).totalFailingTests;
+
+ g_buildIndicesByTimestamp = {};
+ for (var i = 0; i < g_totalFailureCount.length; i++) {
var buildDate = new Date(results[TIMESTAMPS_KEY][i] * 1000);
g_buildIndicesByTimestamp[buildDate.getTime()] = i;
}
@@ -106,9 +108,7 @@
function initCurrentBuilderTestResults()
{
- var startTime = Date.now();
g_currentBuilderTestResults = _decompressResults(g_resultsByBuilder[g_history.dashboardSpecificState.builder || currentBuilderGroup().defaultBuilder()]);
- console.log( 'Time to get test results by build: ' + (Date.now() - startTime));
}
function shouldShowBlinkRevisionsOnly()
@@ -125,11 +125,11 @@
var annotations = [];
// Dygraph prefers to be handed data in chronological order.
- for (var i = results[FIXABLE_COUNTS_KEY].length - 1; i >= 0; i--) {
+ for (var i = g_totalFailureCount.length - 1; i >= 0; i--) {
var buildDate = new Date(results[TIMESTAMPS_KEY][i] * 1000);
// FIXME: Find a better way to exclude outliers. This is just so we
// exclude runs where every test failed.
- var failureCount = Math.min(results[FIXABLE_COUNT_KEY][i], 10000);
+ var failureCount = Math.min(g_totalFailureCount[i], 10000);
if (g_history.dashboardSpecificState.ignoreFlakyTests)
failureCount -= g_currentBuilderTestResults.flakyDeltasByBuild[i].total || 0;
@@ -254,22 +254,23 @@
addRow(label, currentValue + deltaText);
}
- var expectations = expectationsMap();
var flakyDeltasByBuild = g_currentBuilderTestResults.flakyDeltasByBuild;
- for (var expectationKey in expectations) {
- if (expectationKey in results[FIXABLE_COUNTS_KEY][index]) {
- var currentCount = results[FIXABLE_COUNTS_KEY][index][expectationKey];
- var previousCount = results[FIXABLE_COUNTS_KEY][index + 1][expectationKey];
- if (g_history.dashboardSpecificState.ignoreFlakyTests) {
- currentCount -= flakyDeltasByBuild[index][expectationKey] || 0;
- previousCount -= flakyDeltasByBuild[index + 1][expectationKey] || 0;
- }
- addNumberRow(expectations[expectationKey], currentCount, previousCount);
+ var failures_by_type = results[FAILURES_BY_TYPE_KEY];
+ for (var failureType in failures_by_type) {
+ var failureCount = failures_by_type[failureType];
+ var currentCount = failureCount[index];
+ var previousCount = failureCount[index + 1];
+ if (!currentCount && !previousCount)
+ continue;
+ if (g_history.dashboardSpecificState.ignoreFlakyTests) {
+ currentCount -= flakyDeltasByBuild[index][failureType] || 0;
+ previousCount -= flakyDeltasByBuild[index + 1][failureType] || 0;
}
+ addNumberRow(failureType, currentCount, previousCount);
}
- var currentTotal = results[FIXABLE_COUNT_KEY][index];
- var previousTotal = results[FIXABLE_COUNT_KEY][index + 1];
+ var currentTotal = g_totalFailureCount[index];
+ var previousTotal = g_totalFailureCount[index + 1];
if (g_history.dashboardSpecificState.ignoreFlakyTests) {
currentTotal -= flakyDeltasByBuild[index].total || 0;
previousTotal -= flakyDeltasByBuild[index + 1].total || 0;
@@ -306,7 +307,7 @@
var currentResults = g_currentBuilderTestResults.resultsByBuild[index];
var testNames = g_currentBuilderTestResults.testNames;
var previousResults = g_currentBuilderTestResults.resultsByBuild[index + 1];
- var expectations = expectationsMap();
+ var expectations = g_currentBuilderTestResults.failureMap;
var deltas = {};
function addDelta(category, testIndex)
@@ -363,14 +364,6 @@
deltaWindow.document.write(html);
}
-var _FAILURE_EXPECTATIONS = {
- 'T': 1,
- 'F': 1,
- 'C': 1,
- 'I': 1,
- 'Z': 1
-};
-
// "Decompresses" the RLE-encoding of test results so that we can query it
// by build index and test name.
//
@@ -383,7 +376,7 @@
function _decompressResults(builderResults)
{
var builderTestResults = builderResults[TESTS_KEY];
- var buildCount = builderResults[FIXABLE_COUNTS_KEY].length;
+ var buildCount = g_totalFailureCount.length;
var resultsByBuild = new Array(buildCount);
var flakyDeltasByBuild = new Array(buildCount);
@@ -403,6 +396,8 @@
var testNames = new Array(testCount);
var flakyTests = new Array(testCount);
+ var failureMap = builderResults[FAILURE_MAP_KEY];
+
// Decompress and "invert" test results (by build instead of by test) and
// determine which are flaky.
for (var testName in builderTestResults) {
@@ -414,7 +409,7 @@
var count = rleResult[RLE.LENGTH];
var value = rleResult[RLE.VALUE];
- if (count == 1 && value in _FAILURE_EXPECTATIONS)
+ if (count == 1 && isFailingResult(failureMap, value))
oneBuildFailureCount++;
for (var j = 0; j < count; j++) {
@@ -451,7 +446,7 @@
buildTestResults[key]++;
}
addFlakyDelta(value);
- if (value != 'P' && value != 'N')
+ if (isFailingResult(failureMap, value))
addFlakyDelta('total');
if (currentBuildIndex == buildCount)
break;
@@ -463,7 +458,8 @@
testNames: testNames,
resultsByBuild: resultsByBuild,
flakyTests: flakyTests,
- flakyDeltasByBuild: flakyDeltasByBuild
+ flakyDeltasByBuild: flakyDeltasByBuild,
+ failureMap: failureMap
};
}
diff --git a/Tools/TestResultServer/static-dashboards/treemap.js b/Tools/TestResultServer/static-dashboards/treemap.js
index db99b8a..f72b0bb 100644
--- a/Tools/TestResultServer/static-dashboards/treemap.js
+++ b/Tools/TestResultServer/static-dashboards/treemap.js
@@ -97,7 +97,7 @@
var g_history = new history.History(treemapConfig);
g_history.parseCrossDashboardParameters();
-var TEST_URL_BASE_PATH = "http://svn.webkit.org/repository/webkit/trunk/";
+var TEST_URL_BASE_PATH = "http://src.chromium.org/blink/trunk/";
function humanReadableTime(milliseconds)
{
diff --git a/Tools/TestResultServer/static-dashboards/ui.js b/Tools/TestResultServer/static-dashboards/ui.js
index 120e2a9..abce48d 100644
--- a/Tools/TestResultServer/static-dashboards/ui.js
+++ b/Tools/TestResultServer/static-dashboards/ui.js
@@ -169,7 +169,7 @@
var rangeUrl = 'http://build.chromium.org/f/chromium/perf/dashboard/ui/changelog' +
(isChrome ? '' : '_blink') + '.html?url=/trunk' + (isChrome ? '/src' : '') +
- '&range=' + previousRevision + ':' + currentRevision + '&mode=html';
+ '&range=' + (previousRevision + 1) + ':' + currentRevision + '&mode=html';
return '<a href="' + rangeUrl + '">r' + (previousRevision + 1) + ' to r' + currentRevision + '</a>';
}
diff --git a/Tools/TestResultServer/static-dashboards/ui_unittests.js b/Tools/TestResultServer/static-dashboards/ui_unittests.js
new file mode 100644
index 0000000..3a3ff7d
--- /dev/null
+++ b/Tools/TestResultServer/static-dashboards/ui_unittests.js
@@ -0,0 +1,71 @@
+// Copyright (C) 2013 Google Inc. All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+module('ui');
+
+test('chromiumRevisionLinkOneRevision', 1, function() {
+ var results = {};
+ results[CHROME_REVISIONS_KEY] = [3, 2, 1];
+ var html = ui.html.chromiumRevisionLink(results, 1);
+ equal(html, '<a href="http://src.chromium.org/viewvc/chrome?view=rev&revision=2">r2</a>');
+});
+
+test('chromiumRevisionLinkAtRevision', 1, function() {
+ var results = {};
+ results[CHROME_REVISIONS_KEY] = [3, 2, 2];
+ var html = ui.html.chromiumRevisionLink(results, 1);
+ equal(html, 'At <a href="http://src.chromium.org/viewvc/chrome?view=rev&revision=2">r2</a>');
+});
+
+test('chromiumRevisionLinkRevisionRange', 1, function() {
+ var results = {};
+ results[CHROME_REVISIONS_KEY] = [5, 2];
+ var html = ui.html.chromiumRevisionLink(results, 0);
+ equal(html, '<a href="http://build.chromium.org/f/chromium/perf/dashboard/ui/changelog.html?url=/trunk/src&range=3:5&mode=html">r3 to r5</a>');
+});
+
+test('blinkRevisionLinkOneRevision', 1, function() {
+ var results = {};
+ results[BLINK_REVISIONS_KEY] = [3, 2, 1];
+ var html = ui.html.blinkRevisionLink(results, 1);
+ equal(html, '<a href="http://src.chromium.org/viewvc/blink?view=rev&revision=2">r2</a>');
+});
+
+test('blinkRevisionLinkAtRevision', 1, function() {
+ var results = {};
+ results[BLINK_REVISIONS_KEY] = [3, 2, 2];
+ var html = ui.html.blinkRevisionLink(results, 1);
+ equal(html, 'At <a href="http://src.chromium.org/viewvc/blink?view=rev&revision=2">r2</a>');
+});
+
+test('blinkRevisionLinkRevisionRange', 1, function() {
+ var results = {};
+ results[BLINK_REVISIONS_KEY] = [5, 2];
+ var html = ui.html.blinkRevisionLink(results, 0);
+ equal(html, '<a href="http://build.chromium.org/f/chromium/perf/dashboard/ui/changelog_blink.html?url=/trunk&range=3:5&mode=html">r3 to r5</a>');
+});
diff --git a/Tools/TestResultServer/templates/uploadform.html b/Tools/TestResultServer/templates/uploadform.html
index 9974a24..e4389f2 100644
--- a/Tools/TestResultServer/templates/uploadform.html
+++ b/Tools/TestResultServer/templates/uploadform.html
@@ -23,8 +23,6 @@
</tr>
</table>
<br>
- <div><input class=button type="checkbox" name="incremental">Incremental results, merge with server file.</div>
- <br>
<div><input class=button type="file" name="file" multiple></div>
<br>
<div><input class=button type="submit" value="Upload"></div>