Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1 | # Copyright 2012 the V8 project authors. All rights reserved. |
| 2 | # Redistribution and use in source and binary forms, with or without |
| 3 | # modification, are permitted provided that the following conditions are |
| 4 | # met: |
| 5 | # |
| 6 | # * Redistributions of source code must retain the above copyright |
| 7 | # notice, this list of conditions and the following disclaimer. |
| 8 | # * Redistributions in binary form must reproduce the above |
| 9 | # copyright notice, this list of conditions and the following |
| 10 | # disclaimer in the documentation and/or other materials provided |
| 11 | # with the distribution. |
| 12 | # * Neither the name of Google Inc. nor the names of its |
| 13 | # contributors may be used to endorse or promote products derived |
| 14 | # from this software without specific prior written permission. |
| 15 | # |
| 16 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 17 | # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 18 | # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| 19 | # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
| 20 | # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| 21 | # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| 22 | # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| 23 | # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 24 | # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 25 | # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 26 | # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 27 | |
| 28 | |
| 29 | import os |
| 30 | import shutil |
| 31 | import time |
| 32 | |
| 33 | from pool import Pool |
| 34 | from . import commands |
| 35 | from . import perfdata |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame^] | 36 | from . import statusfile |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 37 | from . import utils |
| 38 | |
| 39 | |
| 40 | class Job(object): |
| 41 | def __init__(self, command, dep_command, test_id, timeout, verbose): |
| 42 | self.command = command |
| 43 | self.dep_command = dep_command |
| 44 | self.id = test_id |
| 45 | self.timeout = timeout |
| 46 | self.verbose = verbose |
| 47 | |
| 48 | |
| 49 | def RunTest(job): |
| 50 | start_time = time.time() |
| 51 | if job.dep_command is not None: |
| 52 | dep_output = commands.Execute(job.dep_command, job.verbose, job.timeout) |
| 53 | # TODO(jkummerow): We approximate the test suite specific function |
| 54 | # IsFailureOutput() by just checking the exit code here. Currently |
| 55 | # only cctests define dependencies, for which this simplification is |
| 56 | # correct. |
| 57 | if dep_output.exit_code != 0: |
| 58 | return (job.id, dep_output, time.time() - start_time) |
| 59 | output = commands.Execute(job.command, job.verbose, job.timeout) |
| 60 | return (job.id, output, time.time() - start_time) |
| 61 | |
| 62 | class Runner(object): |
| 63 | |
| 64 | def __init__(self, suites, progress_indicator, context): |
| 65 | self.datapath = os.path.join("out", "testrunner_data") |
| 66 | self.perf_data_manager = perfdata.PerfDataManager(self.datapath) |
| 67 | self.perfdata = self.perf_data_manager.GetStore(context.arch, context.mode) |
| 68 | self.perf_failures = False |
| 69 | self.printed_allocations = False |
| 70 | self.tests = [ t for s in suites for t in s.tests ] |
| 71 | if not context.no_sorting: |
| 72 | for t in self.tests: |
| 73 | t.duration = self.perfdata.FetchPerfData(t) or 1.0 |
| 74 | self.tests.sort(key=lambda t: t.duration, reverse=True) |
| 75 | self._CommonInit(len(self.tests), progress_indicator, context) |
| 76 | |
| 77 | def _CommonInit(self, num_tests, progress_indicator, context): |
| 78 | self.indicator = progress_indicator |
| 79 | progress_indicator.runner = self |
| 80 | self.context = context |
| 81 | self.succeeded = 0 |
| 82 | self.total = num_tests |
| 83 | self.remaining = num_tests |
| 84 | self.failed = [] |
| 85 | self.crashed = 0 |
| 86 | self.reran_tests = 0 |
| 87 | |
| 88 | def _RunPerfSafe(self, fun): |
| 89 | try: |
| 90 | fun() |
| 91 | except Exception, e: |
| 92 | print("PerfData exception: %s" % e) |
| 93 | self.perf_failures = True |
| 94 | |
| 95 | def _GetJob(self, test): |
| 96 | command = self.GetCommand(test) |
| 97 | timeout = self.context.timeout |
| 98 | if ("--stress-opt" in test.flags or |
| 99 | "--stress-opt" in self.context.mode_flags or |
| 100 | "--stress-opt" in self.context.extra_flags): |
| 101 | timeout *= 4 |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame^] | 102 | # FIXME(machenbach): Make this more OO. Don't expose default outcomes or |
| 103 | # the like. |
| 104 | if statusfile.IsSlow(test.outcomes or [statusfile.PASS]): |
| 105 | timeout *= 2 |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 106 | if test.dependency is not None: |
| 107 | dep_command = [ c.replace(test.path, test.dependency) for c in command ] |
| 108 | else: |
| 109 | dep_command = None |
| 110 | return Job(command, dep_command, test.id, timeout, self.context.verbose) |
| 111 | |
| 112 | def _MaybeRerun(self, pool, test): |
| 113 | if test.run <= self.context.rerun_failures_count: |
| 114 | # Possibly rerun this test if its run count is below the maximum per |
| 115 | # test. <= as the flag controls reruns not including the first run. |
| 116 | if test.run == 1: |
| 117 | # Count the overall number of reran tests on the first rerun. |
| 118 | if self.reran_tests < self.context.rerun_failures_max: |
| 119 | self.reran_tests += 1 |
| 120 | else: |
| 121 | # Don't rerun this if the overall number of rerun tests has been |
| 122 | # reached. |
| 123 | return |
| 124 | if test.run >= 2 and test.duration > self.context.timeout / 20.0: |
| 125 | # Rerun slow tests at most once. |
| 126 | return |
| 127 | |
| 128 | # Rerun this test. |
| 129 | test.duration = None |
| 130 | test.output = None |
| 131 | test.run += 1 |
| 132 | pool.add([self._GetJob(test)]) |
| 133 | self.remaining += 1 |
| 134 | |
| 135 | def _ProcessTestNormal(self, test, result, pool): |
| 136 | self.indicator.AboutToRun(test) |
| 137 | test.output = result[1] |
| 138 | test.duration = result[2] |
| 139 | has_unexpected_output = test.suite.HasUnexpectedOutput(test) |
| 140 | if has_unexpected_output: |
| 141 | self.failed.append(test) |
| 142 | if test.output.HasCrashed(): |
| 143 | self.crashed += 1 |
| 144 | else: |
| 145 | self.succeeded += 1 |
| 146 | self.remaining -= 1 |
| 147 | # For the indicator, everything that happens after the first run is treated |
| 148 | # as unexpected even if it flakily passes in order to include it in the |
| 149 | # output. |
| 150 | self.indicator.HasRun(test, has_unexpected_output or test.run > 1) |
| 151 | if has_unexpected_output: |
| 152 | # Rerun test failures after the indicator has processed the results. |
| 153 | self._MaybeRerun(pool, test) |
| 154 | # Update the perf database if the test succeeded. |
| 155 | return not has_unexpected_output |
| 156 | |
| 157 | def _ProcessTestPredictable(self, test, result, pool): |
| 158 | def HasDifferentAllocations(output1, output2): |
| 159 | def AllocationStr(stdout): |
| 160 | for line in reversed((stdout or "").splitlines()): |
| 161 | if line.startswith("### Allocations = "): |
| 162 | self.printed_allocations = True |
| 163 | return line |
| 164 | return "" |
| 165 | return (AllocationStr(output1.stdout) != AllocationStr(output2.stdout)) |
| 166 | |
| 167 | # Always pass the test duration for the database update. |
| 168 | test.duration = result[2] |
| 169 | if test.run == 1 and result[1].HasTimedOut(): |
| 170 | # If we get a timeout in the first run, we are already in an |
| 171 | # unpredictable state. Just report it as a failure and don't rerun. |
| 172 | self.indicator.AboutToRun(test) |
| 173 | test.output = result[1] |
| 174 | self.remaining -= 1 |
| 175 | self.failed.append(test) |
| 176 | self.indicator.HasRun(test, True) |
| 177 | if test.run > 1 and HasDifferentAllocations(test.output, result[1]): |
| 178 | # From the second run on, check for different allocations. If a |
| 179 | # difference is found, call the indicator twice to report both tests. |
| 180 | # All runs of each test are counted as one for the statistic. |
| 181 | self.indicator.AboutToRun(test) |
| 182 | self.remaining -= 1 |
| 183 | self.failed.append(test) |
| 184 | self.indicator.HasRun(test, True) |
| 185 | self.indicator.AboutToRun(test) |
| 186 | test.output = result[1] |
| 187 | self.indicator.HasRun(test, True) |
| 188 | elif test.run >= 3: |
| 189 | # No difference on the third run -> report a success. |
| 190 | self.indicator.AboutToRun(test) |
| 191 | self.remaining -= 1 |
| 192 | self.succeeded += 1 |
| 193 | test.output = result[1] |
| 194 | self.indicator.HasRun(test, False) |
| 195 | else: |
| 196 | # No difference yet and less than three runs -> add another run and |
| 197 | # remember the output for comparison. |
| 198 | test.run += 1 |
| 199 | test.output = result[1] |
| 200 | pool.add([self._GetJob(test)]) |
| 201 | # Always update the perf database. |
| 202 | return True |
| 203 | |
| 204 | def Run(self, jobs): |
| 205 | self.indicator.Starting() |
| 206 | self._RunInternal(jobs) |
| 207 | self.indicator.Done() |
| 208 | if self.failed or self.remaining: |
| 209 | return 1 |
| 210 | return 0 |
| 211 | |
| 212 | def _RunInternal(self, jobs): |
| 213 | pool = Pool(jobs) |
| 214 | test_map = {} |
| 215 | # TODO(machenbach): Instead of filling the queue completely before |
| 216 | # pool.imap_unordered, make this a generator that already starts testing |
| 217 | # while the queue is filled. |
| 218 | queue = [] |
| 219 | queued_exception = None |
| 220 | for test in self.tests: |
| 221 | assert test.id >= 0 |
| 222 | test_map[test.id] = test |
| 223 | try: |
| 224 | queue.append([self._GetJob(test)]) |
| 225 | except Exception, e: |
| 226 | # If this failed, save the exception and re-raise it later (after |
| 227 | # all other tests have had a chance to run). |
| 228 | queued_exception = e |
| 229 | continue |
| 230 | try: |
| 231 | it = pool.imap_unordered(RunTest, queue) |
| 232 | for result in it: |
| 233 | test = test_map[result[0]] |
| 234 | if self.context.predictable: |
| 235 | update_perf = self._ProcessTestPredictable(test, result, pool) |
| 236 | else: |
| 237 | update_perf = self._ProcessTestNormal(test, result, pool) |
| 238 | if update_perf: |
| 239 | self._RunPerfSafe(lambda: self.perfdata.UpdatePerfData(test)) |
| 240 | finally: |
| 241 | pool.terminate() |
| 242 | self._RunPerfSafe(lambda: self.perf_data_manager.close()) |
| 243 | if self.perf_failures: |
| 244 | # Nuke perf data in case of failures. This might not work on windows as |
| 245 | # some files might still be open. |
| 246 | print "Deleting perf test data due to db corruption." |
| 247 | shutil.rmtree(self.datapath) |
| 248 | if queued_exception: |
| 249 | raise queued_exception |
| 250 | |
| 251 | # Make sure that any allocations were printed in predictable mode. |
| 252 | assert not self.context.predictable or self.printed_allocations |
| 253 | |
| 254 | def GetCommand(self, test): |
| 255 | d8testflag = [] |
| 256 | shell = test.suite.shell() |
| 257 | if shell == "d8": |
| 258 | d8testflag = ["--test"] |
| 259 | if utils.IsWindows(): |
| 260 | shell += ".exe" |
| 261 | cmd = (self.context.command_prefix + |
| 262 | [os.path.abspath(os.path.join(self.context.shell_dir, shell))] + |
| 263 | d8testflag + |
| 264 | ["--random-seed=%s" % self.context.random_seed] + |
| 265 | test.suite.GetFlagsForTestCase(test, self.context) + |
| 266 | self.context.extra_flags) |
| 267 | return cmd |
| 268 | |
| 269 | |
| 270 | class BreakNowException(Exception): |
| 271 | def __init__(self, value): |
| 272 | self.value = value |
| 273 | def __str__(self): |
| 274 | return repr(self.value) |