Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1 | # Copyright 2012 the V8 project authors. All rights reserved. |
| 2 | # Redistribution and use in source and binary forms, with or without |
| 3 | # modification, are permitted provided that the following conditions are |
| 4 | # met: |
| 5 | # |
| 6 | # * Redistributions of source code must retain the above copyright |
| 7 | # notice, this list of conditions and the following disclaimer. |
| 8 | # * Redistributions in binary form must reproduce the above |
| 9 | # copyright notice, this list of conditions and the following |
| 10 | # disclaimer in the documentation and/or other materials provided |
| 11 | # with the distribution. |
| 12 | # * Neither the name of Google Inc. nor the names of its |
| 13 | # contributors may be used to endorse or promote products derived |
| 14 | # from this software without specific prior written permission. |
| 15 | # |
| 16 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 17 | # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 18 | # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| 19 | # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
| 20 | # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| 21 | # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| 22 | # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| 23 | # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 24 | # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 25 | # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 26 | # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 27 | |
| 28 | |
| 29 | import imp |
| 30 | import os |
| 31 | |
| 32 | from . import commands |
| 33 | from . import statusfile |
| 34 | from . import utils |
| 35 | from ..objects import testcase |
| 36 | |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame^] | 37 | # Use this to run several variants of the tests. |
| 38 | VARIANT_FLAGS = { |
| 39 | "default": [], |
| 40 | "stress": ["--stress-opt", "--always-opt"], |
| 41 | "turbofan": ["--turbo-asm", "--turbo-filter=*", "--always-opt"], |
| 42 | "nocrankshaft": ["--nocrankshaft"]} |
| 43 | |
| 44 | FAST_VARIANT_FLAGS = [ |
| 45 | f for v, f in VARIANT_FLAGS.iteritems() if v in ["default", "turbofan"] |
| 46 | ] |
| 47 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 48 | class TestSuite(object): |
| 49 | |
| 50 | @staticmethod |
| 51 | def LoadTestSuite(root): |
| 52 | name = root.split(os.path.sep)[-1] |
| 53 | f = None |
| 54 | try: |
| 55 | (f, pathname, description) = imp.find_module("testcfg", [root]) |
| 56 | module = imp.load_module("testcfg", f, pathname, description) |
| 57 | return module.GetSuite(name, root) |
| 58 | except: |
| 59 | # Use default if no testcfg is present. |
| 60 | return GoogleTestSuite(name, root) |
| 61 | finally: |
| 62 | if f: |
| 63 | f.close() |
| 64 | |
| 65 | def __init__(self, name, root): |
| 66 | self.name = name # string |
| 67 | self.root = root # string containing path |
| 68 | self.tests = None # list of TestCase objects |
| 69 | self.rules = None # dictionary mapping test path to list of outcomes |
| 70 | self.wildcards = None # dictionary mapping test paths to list of outcomes |
| 71 | self.total_duration = None # float, assigned on demand |
| 72 | |
| 73 | def shell(self): |
| 74 | return "d8" |
| 75 | |
| 76 | def suffix(self): |
| 77 | return ".js" |
| 78 | |
| 79 | def status_file(self): |
| 80 | return "%s/%s.status" % (self.root, self.name) |
| 81 | |
| 82 | # Used in the status file and for stdout printing. |
| 83 | def CommonTestName(self, testcase): |
| 84 | if utils.IsWindows(): |
| 85 | return testcase.path.replace("\\", "/") |
| 86 | else: |
| 87 | return testcase.path |
| 88 | |
| 89 | def ListTests(self, context): |
| 90 | raise NotImplementedError |
| 91 | |
| 92 | def VariantFlags(self, testcase, default_flags): |
| 93 | if testcase.outcomes and statusfile.OnlyStandardVariant(testcase.outcomes): |
| 94 | return [[]] |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame^] | 95 | if testcase.outcomes and statusfile.OnlyFastVariants(testcase.outcomes): |
| 96 | return filter(lambda flags: flags in FAST_VARIANT_FLAGS, default_flags) |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 97 | return default_flags |
| 98 | |
| 99 | def DownloadData(self): |
| 100 | pass |
| 101 | |
| 102 | def ReadStatusFile(self, variables): |
| 103 | (self.rules, self.wildcards) = \ |
| 104 | statusfile.ReadStatusFile(self.status_file(), variables) |
| 105 | |
| 106 | def ReadTestCases(self, context): |
| 107 | self.tests = self.ListTests(context) |
| 108 | |
| 109 | @staticmethod |
| 110 | def _FilterFlaky(flaky, mode): |
| 111 | return (mode == "run" and not flaky) or (mode == "skip" and flaky) |
| 112 | |
| 113 | @staticmethod |
| 114 | def _FilterSlow(slow, mode): |
| 115 | return (mode == "run" and not slow) or (mode == "skip" and slow) |
| 116 | |
| 117 | @staticmethod |
| 118 | def _FilterPassFail(pass_fail, mode): |
| 119 | return (mode == "run" and not pass_fail) or (mode == "skip" and pass_fail) |
| 120 | |
| 121 | def FilterTestCasesByStatus(self, warn_unused_rules, |
| 122 | flaky_tests="dontcare", |
| 123 | slow_tests="dontcare", |
| 124 | pass_fail_tests="dontcare"): |
| 125 | filtered = [] |
| 126 | used_rules = set() |
| 127 | for t in self.tests: |
| 128 | flaky = False |
| 129 | slow = False |
| 130 | pass_fail = False |
| 131 | testname = self.CommonTestName(t) |
| 132 | if testname in self.rules: |
| 133 | used_rules.add(testname) |
| 134 | # Even for skipped tests, as the TestCase object stays around and |
| 135 | # PrintReport() uses it. |
| 136 | t.outcomes = self.rules[testname] |
| 137 | if statusfile.DoSkip(t.outcomes): |
| 138 | continue # Don't add skipped tests to |filtered|. |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame^] | 139 | for outcome in t.outcomes: |
| 140 | if outcome.startswith('Flags: '): |
| 141 | t.flags += outcome[7:].split() |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 142 | flaky = statusfile.IsFlaky(t.outcomes) |
| 143 | slow = statusfile.IsSlow(t.outcomes) |
| 144 | pass_fail = statusfile.IsPassOrFail(t.outcomes) |
| 145 | skip = False |
| 146 | for rule in self.wildcards: |
| 147 | assert rule[-1] == '*' |
| 148 | if testname.startswith(rule[:-1]): |
| 149 | used_rules.add(rule) |
| 150 | t.outcomes = self.wildcards[rule] |
| 151 | if statusfile.DoSkip(t.outcomes): |
| 152 | skip = True |
| 153 | break # "for rule in self.wildcards" |
| 154 | flaky = flaky or statusfile.IsFlaky(t.outcomes) |
| 155 | slow = slow or statusfile.IsSlow(t.outcomes) |
| 156 | pass_fail = pass_fail or statusfile.IsPassOrFail(t.outcomes) |
| 157 | if (skip or self._FilterFlaky(flaky, flaky_tests) |
| 158 | or self._FilterSlow(slow, slow_tests) |
| 159 | or self._FilterPassFail(pass_fail, pass_fail_tests)): |
| 160 | continue # "for t in self.tests" |
| 161 | filtered.append(t) |
| 162 | self.tests = filtered |
| 163 | |
| 164 | if not warn_unused_rules: |
| 165 | return |
| 166 | |
| 167 | for rule in self.rules: |
| 168 | if rule not in used_rules: |
| 169 | print("Unused rule: %s -> %s" % (rule, self.rules[rule])) |
| 170 | for rule in self.wildcards: |
| 171 | if rule not in used_rules: |
| 172 | print("Unused rule: %s -> %s" % (rule, self.wildcards[rule])) |
| 173 | |
| 174 | def FilterTestCasesByArgs(self, args): |
| 175 | filtered = [] |
| 176 | filtered_args = [] |
| 177 | for a in args: |
| 178 | argpath = a.split(os.path.sep) |
| 179 | if argpath[0] != self.name: |
| 180 | continue |
| 181 | if len(argpath) == 1 or (len(argpath) == 2 and argpath[1] == '*'): |
| 182 | return # Don't filter, run all tests in this suite. |
| 183 | path = os.path.sep.join(argpath[1:]) |
| 184 | if path[-1] == '*': |
| 185 | path = path[:-1] |
| 186 | filtered_args.append(path) |
| 187 | for t in self.tests: |
| 188 | for a in filtered_args: |
| 189 | if t.path.startswith(a): |
| 190 | filtered.append(t) |
| 191 | break |
| 192 | self.tests = filtered |
| 193 | |
| 194 | def GetFlagsForTestCase(self, testcase, context): |
| 195 | raise NotImplementedError |
| 196 | |
| 197 | def GetSourceForTest(self, testcase): |
| 198 | return "(no source available)" |
| 199 | |
| 200 | def IsFailureOutput(self, output, testpath): |
| 201 | return output.exit_code != 0 |
| 202 | |
| 203 | def IsNegativeTest(self, testcase): |
| 204 | return False |
| 205 | |
| 206 | def HasFailed(self, testcase): |
| 207 | execution_failed = self.IsFailureOutput(testcase.output, testcase.path) |
| 208 | if self.IsNegativeTest(testcase): |
| 209 | return not execution_failed |
| 210 | else: |
| 211 | return execution_failed |
| 212 | |
| 213 | def GetOutcome(self, testcase): |
| 214 | if testcase.output.HasCrashed(): |
| 215 | return statusfile.CRASH |
| 216 | elif testcase.output.HasTimedOut(): |
| 217 | return statusfile.TIMEOUT |
| 218 | elif self.HasFailed(testcase): |
| 219 | return statusfile.FAIL |
| 220 | else: |
| 221 | return statusfile.PASS |
| 222 | |
| 223 | def HasUnexpectedOutput(self, testcase): |
| 224 | outcome = self.GetOutcome(testcase) |
| 225 | return not outcome in (testcase.outcomes or [statusfile.PASS]) |
| 226 | |
| 227 | def StripOutputForTransmit(self, testcase): |
| 228 | if not self.HasUnexpectedOutput(testcase): |
| 229 | testcase.output.stdout = "" |
| 230 | testcase.output.stderr = "" |
| 231 | |
| 232 | def CalculateTotalDuration(self): |
| 233 | self.total_duration = 0.0 |
| 234 | for t in self.tests: |
| 235 | self.total_duration += t.duration |
| 236 | return self.total_duration |
| 237 | |
| 238 | |
| 239 | class GoogleTestSuite(TestSuite): |
| 240 | def __init__(self, name, root): |
| 241 | super(GoogleTestSuite, self).__init__(name, root) |
| 242 | |
| 243 | def ListTests(self, context): |
| 244 | shell = os.path.abspath(os.path.join(context.shell_dir, self.shell())) |
| 245 | if utils.IsWindows(): |
| 246 | shell += ".exe" |
| 247 | output = commands.Execute(context.command_prefix + |
| 248 | [shell, "--gtest_list_tests"] + |
| 249 | context.extra_flags) |
| 250 | if output.exit_code != 0: |
| 251 | print output.stdout |
| 252 | print output.stderr |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame^] | 253 | raise Exception("Test executable failed to list the tests.") |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 254 | tests = [] |
| 255 | test_case = '' |
| 256 | for line in output.stdout.splitlines(): |
| 257 | test_desc = line.strip().split()[0] |
| 258 | if test_desc.endswith('.'): |
| 259 | test_case = test_desc |
| 260 | elif test_case and test_desc: |
| 261 | test = testcase.TestCase(self, test_case + test_desc, dependency=None) |
| 262 | tests.append(test) |
| 263 | tests.sort() |
| 264 | return tests |
| 265 | |
| 266 | def GetFlagsForTestCase(self, testcase, context): |
| 267 | return (testcase.flags + ["--gtest_filter=" + testcase.path] + |
| 268 | ["--gtest_random_seed=%s" % context.random_seed] + |
| 269 | ["--gtest_print_time=0"] + |
| 270 | context.mode_flags) |
| 271 | |
| 272 | def shell(self): |
| 273 | return self.name |