Crosperf: Fix many broken unittests.
Apparently people have been making changes and not running the
unittests; many of the unittests were suffering from bit-rot. This
CL fixes most of the broken unittests (there are still 3 large ones
being worked on -- that will be in another CL).
BUG=chromium:567883
TEST=Verified that these unittests work now.
Change-Id: I0252a276a035894e70e04c61339cdba148c9bbfd
Reviewed-on: https://chrome-internal-review.googlesource.com/241513
Commit-Ready: Luis Lozano <llozano@chromium.org>
Tested-by: Luis Lozano <llozano@chromium.org>
Reviewed-by: Luis Lozano <llozano@chromium.org>
diff --git a/crosperf/benchmark_run.py b/crosperf/benchmark_run.py
index 4d0eb74..f10326b 100644
--- a/crosperf/benchmark_run.py
+++ b/crosperf/benchmark_run.py
@@ -251,7 +251,7 @@
class MockBenchmarkRun(BenchmarkRun):
- """Inherited from BenchmarkRuna."""
+ """Inherited from BenchmarkRun."""
def ReadCache(self):
# Just use the first machine for running the cached version,
@@ -292,7 +292,7 @@
self.test_args,
self.profiler_args)
self.run_completed = True
- rr = MockResult("logger", self.label, self.log_level)
+ rr = MockResult("logger", self.label, self.log_level, machine)
rr.out = out
rr.err = err
rr.retval = retval
diff --git a/crosperf/benchmark_run_unittest.py b/crosperf/benchmark_run_unittest.py
index 07c8331..4917118 100755
--- a/crosperf/benchmark_run_unittest.py
+++ b/crosperf/benchmark_run_unittest.py
@@ -10,7 +10,7 @@
import unittest
import inspect
-from utils import logger
+from cros_utils import logger
import benchmark_run
@@ -43,8 +43,9 @@
self.test_label = MockLabel("test1", "image1", "/tmp/test_benchmark_run",
"x86-alex", "chromeos2-row1-rack4-host9.cros",
- image_args="",
- cache_dir="", cache_only=False)
+ image_args="", cache_dir="", cache_only=False,
+ log_level="average", compiler="gcc")
+
self.test_cache_conditions = [CacheConditions.CACHE_FILE_EXISTS,
CacheConditions.CHECKSUMS_MATCH]
@@ -56,15 +57,16 @@
def testDryRun(self):
my_label = MockLabel("test1", "image1", "/tmp/test_benchmark_run",
"x86-alex", "chromeos2-row1-rack4-host9.cros",
- image_args="",
- cache_dir="", cache_only=False)
+ image_args="", cache_dir="", cache_only=False,
+ log_level="average", compiler="gcc")
+
logging_level = "average"
m = MockMachineManager("/tmp/chromeos_root", 0, logging_level, '')
m.AddMachine("chromeos2-row1-rack4-host9.cros")
bench = Benchmark("page_cycler.netsim.top_10", # name
"page_cycler.netsim.top_10", # test_name
"", # test_args
- 1, # iteratins
+ 1, # iterations
False, # rm_chroot_tmp
"", # perf_args
suite="telemetry_Crosperf") # suite
@@ -386,7 +388,7 @@
self.assertEqual (mock_result.call_count, 1)
mock_result.assert_called_with (self.mock_logger, 'average',
- self.test_label, "{'Score':100}",
+ self.test_label, None, "{'Score':100}",
"", 0, False, 'page_cycler.netsim.top_10',
'telemetry_Crosperf')
diff --git a/crosperf/crosperf_unittest.py b/crosperf/crosperf_unittest.py
index 3af3507..082d8a6 100755
--- a/crosperf/crosperf_unittest.py
+++ b/crosperf/crosperf_unittest.py
@@ -19,8 +19,8 @@
from help import Help
-from utils import command_executer
-from utils import logger
+from cros_utils import command_executer
+from cros_utils import logger
EXPERIMENT_FILE_1 = """
board: x86-alex
@@ -60,7 +60,7 @@
self.assertEqual(len(options_before), 3)
crosperf.SetupParserOptions(parser)
options_after = parser._get_all_options()
- self.assertEqual(len(options_after), 26)
+ self.assertEqual(len(options_after), 29)
def test_convert_options_to_settings(self):
@@ -78,7 +78,7 @@
settings = crosperf.ConvertOptionsToSettings(options)
self.assertIsNotNone(settings)
self.assertIsInstance(settings, settings_factory.GlobalSettings)
- self.assertEqual(len(settings.fields), 22)
+ self.assertEqual(len(settings.fields), 25)
self.assertTrue(settings.GetField('rerun'))
argv = ['crosperf/crosperf.py', 'temp.exp']
options, args = parser.parse_args(argv)
diff --git a/crosperf/experiment.py b/crosperf/experiment.py
index 2cc5062..caf2569 100644
--- a/crosperf/experiment.py
+++ b/crosperf/experiment.py
@@ -203,6 +203,9 @@
# to unlock everything.
self.machine_manager.Cleanup()
else:
+ if test_flag.GetTestMode():
+ return
+
all_machines = self.locked_machines
if not all_machines:
return
diff --git a/crosperf/experiment_factory_unittest.py b/crosperf/experiment_factory_unittest.py
index 89f3864..148b7e4 100755
--- a/crosperf/experiment_factory_unittest.py
+++ b/crosperf/experiment_factory_unittest.py
@@ -20,6 +20,7 @@
import experiment_factory
import machine_manager
import settings_factory
+import test_flag
EXPERIMENT_FILE_1 = """
board: x86-alex
@@ -67,7 +68,8 @@
bench_list = []
ef._AppendBenchmarkSet(bench_list,
experiment_factory.telemetry_perfv2_tests,
- "", 1, False, "", "telemetry_Crosperf", False)
+ "", 1, False, "", "telemetry_Crosperf", False, 0,
+ False)
self.assertEqual(len(bench_list),
len(experiment_factory.telemetry_perfv2_tests))
self.assertTrue(type(bench_list[0]) is benchmark.Benchmark)
@@ -75,7 +77,8 @@
bench_list = []
ef._AppendBenchmarkSet(bench_list,
experiment_factory.telemetry_pagecycler_tests,
- "", 1, False, "", "telemetry_Crosperf", False)
+ "", 1, False, "", "telemetry_Crosperf", False, 0,
+ False)
self.assertEqual(len(bench_list),
len(experiment_factory.telemetry_pagecycler_tests))
self.assertTrue(type(bench_list[0]) is benchmark.Benchmark)
@@ -83,7 +86,8 @@
bench_list = []
ef._AppendBenchmarkSet(bench_list,
experiment_factory.telemetry_toolchain_perf_tests,
- "", 1, False, "", "telemetry_Crosperf", False)
+ "", 1, False, "", "telemetry_Crosperf", False, 0,
+ False)
self.assertEqual(len(bench_list),
len(experiment_factory.telemetry_toolchain_perf_tests))
self.assertTrue(type(bench_list[0]) is benchmark.Benchmark)
@@ -110,7 +114,6 @@
def FakeGetXbuddyPath(build, board, chroot, log_level):
return "fake_image_path"
-
ef = ExperimentFactory()
ef._AppendBenchmarkSet = FakeAppendBenchmarkSet
ef.GetDefaultRemotes = FakeGetDefaultRemotes
@@ -124,7 +127,8 @@
mock_experiment_file = ExperimentFile(StringIO.StringIO(""))
mock_experiment_file.all_settings = []
- # Basic test.
+ test_flag.SetTestMode(True)
+ # Basic test.
global_settings.SetField("name","unittest_test")
global_settings.SetField("board", "lumpy")
global_settings.SetField("remote", "123.45.67.89 123.45.76.80")
@@ -162,6 +166,7 @@
self.assertEqual(exp.labels[0].board, "lumpy")
# Second test: Remotes listed in labels.
+ test_flag.SetTestMode(True)
label_settings.SetField("remote", "chromeos1.cros chromeos2.cros")
exp = ef.GetExperiment(mock_experiment_file, "", "")
self.assertEqual(exp.remote,
diff --git a/crosperf/experiment_runner.py b/crosperf/experiment_runner.py
index 7167b80..971b967 100644
--- a/crosperf/experiment_runner.py
+++ b/crosperf/experiment_runner.py
@@ -7,6 +7,7 @@
import time
import afe_lock_machine
+import test_flag
from cros_utils import command_executer
from cros_utils import logger
@@ -82,25 +83,29 @@
requested for this crosperf run, to prevent any other crosperf runs from
being able to update/use the machines while this experiment is running.
"""
- lock_mgr = afe_lock_machine.AFELockManager(
- self._GetMachineList(),
- "",
- experiment.labels[0].chromeos_root,
- None,
- log=self.l,
- )
- for m in lock_mgr.machines:
- if not lock_mgr.MachineIsKnown(m):
- lock_mgr.AddLocalMachine(m)
- machine_states = lock_mgr.GetMachineStates("lock")
- lock_mgr.CheckMachineLocks(machine_states, "lock")
- self.locked_machines = lock_mgr.UpdateMachines(True)
- self._experiment.locked_machines = self.locked_machines
- self._UpdateMachineList(self.locked_machines)
- self._experiment.machine_manager.RemoveNonLockedMachines(
- self.locked_machines)
- if len(self.locked_machines) == 0:
- raise RuntimeError("Unable to lock any machines.")
+ if test_flag.GetTestMode():
+ self.locked_machines = self._GetMachineList()
+ self._experiment.locked_machines = self.locked_machines
+ else:
+ lock_mgr = afe_lock_machine.AFELockManager(
+ self._GetMachineList(),
+ "",
+ experiment.labels[0].chromeos_root,
+ None,
+ log=self.l,
+ )
+ for m in lock_mgr.machines:
+ if not lock_mgr.MachineIsKnown(m):
+ lock_mgr.AddLocalMachine(m)
+ machine_states = lock_mgr.GetMachineStates("lock")
+ lock_mgr.CheckMachineLocks(machine_states, "lock")
+ self.locked_machines = lock_mgr.UpdateMachines(True)
+ self._experiment.locked_machines = self.locked_machines
+ self._UpdateMachineList(self.locked_machines)
+ self._experiment.machine_manager.RemoveNonLockedMachines(
+ self.locked_machines)
+ if len(self.locked_machines) == 0:
+ raise RuntimeError("Unable to lock any machines.")
def _UnlockAllMachines(self, experiment):
"""Attempt to globally unlock all of the machines requested for run.
@@ -108,7 +113,7 @@
The method will use the AFE server to globally unlock all of the machines
requested for this crosperf run.
"""
- if not self.locked_machines:
+ if not self.locked_machines or test_flag.GetTestMode():
return
lock_mgr = afe_lock_machine.AFELockManager(
diff --git a/crosperf/experiment_runner_unittest.py b/crosperf/experiment_runner_unittest.py
index 0e949eb..46f5093 100755
--- a/crosperf/experiment_runner_unittest.py
+++ b/crosperf/experiment_runner_unittest.py
@@ -13,16 +13,17 @@
import experiment_status
import machine_manager
import config
+import test_flag
from experiment_file import ExperimentFile
from experiment_factory import ExperimentFactory
from results_report import TextResultsReport
from results_report import HTMLResultsReport
from results_cache import Result
-from utils import logger
-from utils import command_executer
-from utils.email_sender import EmailSender
-from utils.file_utils import FileUtils
+from cros_utils import logger
+from cros_utils import command_executer
+from cros_utils.email_sender import EmailSender
+from cros_utils.file_utils import FileUtils
EXPERIMENT_FILE_1 = """
board: parrot
@@ -91,6 +92,7 @@
mock_cmd_exec = mock.Mock(spec=command_executer.CommandExecuter)
def make_fake_experiment(self):
+ test_flag.SetTestMode(True)
experiment_file = ExperimentFile(StringIO.StringIO(EXPERIMENT_FILE_1))
experiment = ExperimentFactory().GetExperiment(experiment_file,
working_directory="",
@@ -105,14 +107,18 @@
def test_init(self):
- er = experiment_runner.ExperimentRunner(self.exp, self.mock_logger,
- self.mock_cmd_exec)
+ er = experiment_runner.ExperimentRunner(self.exp, json_report=False,
+ using_schedv2=False,
+ log=self.mock_logger,
+ cmd_exec=self.mock_cmd_exec)
self.assertFalse (er._terminated)
self.assertEqual (er.STATUS_TIME_DELAY, 10)
self.exp.log_level = "verbose"
- er = experiment_runner.ExperimentRunner(self.exp, self.mock_logger,
- self.mock_cmd_exec)
+ er = experiment_runner.ExperimentRunner(self.exp, json_report=False,
+ using_schedv2=False,
+ log=self.mock_logger,
+ cmd_exec=self.mock_cmd_exec)
self.assertEqual (er.STATUS_TIME_DELAY, 30)
@@ -145,8 +151,10 @@
# Test 1: log_level == "quiet"
self.exp.log_level = "quiet"
- er = experiment_runner.ExperimentRunner(self.exp, self.mock_logger,
- self.mock_cmd_exec)
+ er = experiment_runner.ExperimentRunner(self.exp, json_report=False,
+ using_schedv2=False,
+ log = self.mock_logger,
+ cmd_exec =self.mock_cmd_exec)
er.STATUS_TIME_DELAY = 2
mock_status_string.return_value = "Fake status string"
er._Run(self.exp)
@@ -168,8 +176,10 @@
reset()
self.exp.log_level = "average"
mock_status_string.call_count = 0
- er = experiment_runner.ExperimentRunner(self.exp, self.mock_logger,
- self.mock_cmd_exec)
+ er = experiment_runner.ExperimentRunner(self.exp, json_report=False,
+ using_schedv2=False,
+ log=self.mock_logger,
+ cmd_exec=self.mock_cmd_exec)
er.STATUS_TIME_DELAY = 2
mock_status_string.return_value = "Fake status string"
er._Run(self.exp)
@@ -192,8 +202,10 @@
reset()
self.exp.log_level = "verbose"
mock_status_string.call_count = 0
- er = experiment_runner.ExperimentRunner(self.exp, self.mock_logger,
- self.mock_cmd_exec)
+ er = experiment_runner.ExperimentRunner(self.exp, json_report=False,
+ using_schedv2=False,
+ log=self.mock_logger,
+ cmd_exec=self.mock_cmd_exec)
er.STATUS_TIME_DELAY = 2
mock_status_string.return_value = "Fake status string"
mock_progress_string.return_value = "Fake progress string"
@@ -220,8 +232,10 @@
def test_print_table(self, mock_report):
self.mock_logger.Reset()
mock_report.return_value = "This is a fake experiment report."
- er = experiment_runner.ExperimentRunner(self.exp, self.mock_logger,
- self.mock_cmd_exec)
+ er = experiment_runner.ExperimentRunner(self.exp, json_report=False,
+ using_schedv2=False,
+ log=self.mock_logger,
+ cmd_exec=self.mock_cmd_exec)
er._PrintTable(self.exp)
self.assertEqual(mock_report.call_count, 1)
self.assertEqual(self.mock_logger.output_msgs,
@@ -243,8 +257,10 @@
self.mock_logger.Reset()
config.AddConfig("no_email", True)
self.exp.email_to = ["jane.doe@google.com"]
- er = experiment_runner.ExperimentRunner(self.exp, self.mock_logger,
- self.mock_cmd_exec)
+ er = experiment_runner.ExperimentRunner(self.exp, json_report=False,
+ using_schedv2=False,
+ log=self.mock_logger,
+ cmd_exec=self.mock_cmd_exec)
# Test 1. Config:no_email; exp.email_to set ==> no email sent
er._Email(self.exp)
self.assertEqual(mock_getuser.call_count, 0)
@@ -356,10 +372,11 @@
@mock.patch.object(FileUtils, 'MkDirP')
@mock.patch.object(FileUtils, 'WriteFile')
@mock.patch.object(HTMLResultsReport, 'GetReport')
+ @mock.patch.object(TextResultsReport, 'GetReport')
@mock.patch.object(Result, 'CopyResultsTo')
@mock.patch.object(Result, 'CleanUp')
- def test_store_results(self, mock_cleanup, mock_copy, mock_report,
- mock_writefile, mock_mkdir, mock_rmdir):
+ def test_store_results(self, mock_cleanup, mock_copy, mock_text_report,
+ mock_report, mock_writefile, mock_mkdir, mock_rmdir):
self.mock_logger.Reset()
self.exp.results_directory='/usr/local/crosperf-results'
@@ -368,8 +385,10 @@
bench_run.name)
self.assertEqual (len(self.exp.benchmark_runs), 6)
- er = experiment_runner.ExperimentRunner(self.exp, self.mock_logger,
- self.mock_cmd_exec)
+ er = experiment_runner.ExperimentRunner(self.exp, json_report=False,
+ using_schedv2=False,
+ log=self.mock_logger,
+ cmd_exec=self.mock_cmd_exec)
# Test 1. Make sure nothing is done if _terminated is true.
er._terminated = True
@@ -383,7 +402,8 @@
self.assertEqual(self.mock_logger.LogOutputCount, 0)
# Test 2. _terminated is false; everything works properly.
- fake_result = Result(self.mock_logger, self.exp.labels[0], "average")
+ fake_result = Result(self.mock_logger, self.exp.labels[0], "average",
+ "daisy1")
for r in self.exp.benchmark_runs:
r.result = fake_result
er._terminated = False
@@ -406,10 +426,10 @@
mock_rmdir.called_with('/usr/local/crosperf-results')
self.assertEqual(self.mock_logger.LogOutputCount, 4)
self.assertEqual(self.mock_logger.output_msgs,
- [ 'Storing experiment file in /usr/local/crosperf-results.',
- 'Storing results report in /usr/local/crosperf-results.',
- 'Storing email message body in /usr/local/crosperf-results.',
- 'Storing results of each benchmark run.' ])
+ ['Storing experiment file in /usr/local/crosperf-results.',
+ 'Storing results report in /usr/local/crosperf-results.',
+ 'Storing email message body in /usr/local/crosperf-results.',
+ 'Storing results of each benchmark run.'])
diff --git a/crosperf/label.py b/crosperf/label.py
index fba91a8..a34416d 100644
--- a/crosperf/label.py
+++ b/crosperf/label.py
@@ -123,6 +123,7 @@
self.checksum = ''
self.log_level = log_level
self.compiler = compiler
+ self.chrome_version = "Fake Chrome Version 50"
def _GetImageType(self, chromeos_image):
image_type = None
diff --git a/crosperf/machine_manager.py b/crosperf/machine_manager.py
index d505ada..c87c9b9 100644
--- a/crosperf/machine_manager.py
+++ b/crosperf/machine_manager.py
@@ -526,6 +526,97 @@
class MockCrosMachine(CrosMachine):
"""Mock cros machine class."""
# pylint: disable=super-init-not-called
+
+ MEMINFO_STRING = """MemTotal: 3990332 kB
+MemFree: 2608396 kB
+Buffers: 147168 kB
+Cached: 811560 kB
+SwapCached: 0 kB
+Active: 503480 kB
+Inactive: 628572 kB
+Active(anon): 174532 kB
+Inactive(anon): 88576 kB
+Active(file): 328948 kB
+Inactive(file): 539996 kB
+Unevictable: 0 kB
+Mlocked: 0 kB
+SwapTotal: 5845212 kB
+SwapFree: 5845212 kB
+Dirty: 9384 kB
+Writeback: 0 kB
+AnonPages: 173408 kB
+Mapped: 146268 kB
+Shmem: 89676 kB
+Slab: 188260 kB
+SReclaimable: 169208 kB
+SUnreclaim: 19052 kB
+KernelStack: 2032 kB
+PageTables: 7120 kB
+NFS_Unstable: 0 kB
+Bounce: 0 kB
+WritebackTmp: 0 kB
+CommitLimit: 7840376 kB
+Committed_AS: 1082032 kB
+VmallocTotal: 34359738367 kB
+VmallocUsed: 364980 kB
+VmallocChunk: 34359369407 kB
+DirectMap4k: 45824 kB
+DirectMap2M: 4096000 kB
+"""
+
+ CPUINFO_STRING = """processor: 0
+vendor_id: GenuineIntel
+cpu family: 6
+model: 42
+model name: Intel(R) Celeron(R) CPU 867 @ 1.30GHz
+stepping: 7
+microcode: 0x25
+cpu MHz: 1300.000
+cache size: 2048 KB
+physical id: 0
+siblings: 2
+core id: 0
+cpu cores: 2
+apicid: 0
+initial apicid: 0
+fpu: yes
+fpu_exception: yes
+cpuid level: 13
+wp: yes
+flags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx est tm2 ssse3 cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic popcnt tsc_deadline_timer xsave lahf_lm arat epb xsaveopt pln pts dts tpr_shadow vnmi flexpriority ept vpid
+bogomips: 2594.17
+clflush size: 64
+cache_alignment: 64
+address sizes: 36 bits physical, 48 bits virtual
+power management:
+
+processor: 1
+vendor_id: GenuineIntel
+cpu family: 6
+model: 42
+model name: Intel(R) Celeron(R) CPU 867 @ 1.30GHz
+stepping: 7
+microcode: 0x25
+cpu MHz: 1300.000
+cache size: 2048 KB
+physical id: 0
+siblings: 2
+core id: 1
+cpu cores: 2
+apicid: 2
+initial apicid: 2
+fpu: yes
+fpu_exception: yes
+cpuid level: 13
+wp: yes
+flags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx est tm2 ssse3 cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic popcnt tsc_deadline_timer xsave lahf_lm arat epb xsaveopt pln pts dts tpr_shadow vnmi flexpriority ept vpid
+bogomips: 2594.17
+clflush size: 64
+cache_alignment: 64
+address sizes: 36 bits physical, 48 bits virtual
+power management:
+"""
+
def __init__(self, name, chromeos_root, log_level):
self.name = name
self.image = None
@@ -539,10 +630,17 @@
self.machine_checksum = self._GetMD5Checksum(self.checksum_string)
self.log_level = log_level
self.label = None
+ self.ce = command_executer.GetCommandExecuter(log_level=self.log_level)
def IsReachable(self):
return True
+ def _GetMemoryInfo(self):
+ self.meminfo = self.MEMINFO_STRING
+ self._ParseMemoryInfo()
+
+ def _GetCPUInfo(self):
+ self.cpuinfo = self.CPUINFO_STRING
class MockMachineManager(MachineManager):
"""Mock machine manager class."""
@@ -571,6 +669,9 @@
if cm.IsReachable():
self._all_machines.append(cm)
+ def GetChromeVersion(self, machine):
+ return "Mock Chrome Version R50"
+
def AcquireMachine(self, label):
for machine in self._all_machines:
if not machine.locked:
@@ -590,3 +691,12 @@
def GetAvailableMachines(self, label=None):
return self._all_machines
+
+ def ForceSameImageToAllMachines(self, label):
+ return 0
+
+ def ComputeCommonCheckSum(self, label):
+ common_checksum = 12345
+ for machine in self.GetMachines(label):
+ machine.machine_checksum = common_checksum
+ self.machine_checksum[label.name] = common_checksum
diff --git a/crosperf/mock_instance.py b/crosperf/mock_instance.py
index 06a574a..62ff2da 100644
--- a/crosperf/mock_instance.py
+++ b/crosperf/mock_instance.py
@@ -14,14 +14,14 @@
perf_args = "record -a -e cycles"
label1 = MockLabel("test1", "image1", "/tmp/test_benchmark_run",
- "x86-alex", "chromeos-alex1",
- image_args="",
- cache_dir="", cache_only=False)
+ "x86-alex", "chromeos-alex1", image_args="",
+ cache_dir="", cache_only=False, log_level="average",
+ compiler="gcc")
label2 = MockLabel("test2", "image2", "/tmp/test_benchmark_run_2",
- "x86-alex", "chromeos-alex2",
- image_args="",
- cache_dir="", cache_only=False)
+ "x86-alex", "chromeos-alex2", image_args="",
+ cache_dir="", cache_only=False, log_level="average",
+ compiler="gcc")
benchmark1 = Benchmark("benchmark1", "autotest_name_1",
"autotest_args", 2, "", perf_args, "", "")
diff --git a/crosperf/schedv2_unittest.py b/crosperf/schedv2_unittest.py
index 3ce4e15..3276cd0 100644
--- a/crosperf/schedv2_unittest.py
+++ b/crosperf/schedv2_unittest.py
@@ -15,8 +15,8 @@
from experiment_file import ExperimentFile
from experiment_runner import ExperimentRunner
from machine_manager import MockCrosMachine
-from utils import command_executer
-from utils.command_executer import CommandExecuter
+from cros_utils import command_executer
+from cros_utils.command_executer import CommandExecuter
from experiment_runner_unittest import FakeLogger
from schedv2 import Schedv2
diff --git a/crosperf/suite_runner.py b/crosperf/suite_runner.py
index 12237d3..afec6c8 100644
--- a/crosperf/suite_runner.py
+++ b/crosperf/suite_runner.py
@@ -9,6 +9,7 @@
import shlex
from utils import command_executer
+import test_flag
TEST_THAT_PATH = '/usr/bin/test_that'
CHROME_MOUNT_DIR = '/tmp/chrome_root'
@@ -252,7 +253,8 @@
# Check for and remove temporary file that may have been left by
# previous telemetry runs (and which might prevent this run from
# working).
- self.RemoveTelemetryTempFile (machine, label.chromeos_root)
+ if not test_flag.GetTestMode():
+ self.RemoveTelemetryTempFile (machine, label.chromeos_root)
rsa_key = os.path.join(label.chromeos_root,
"src/scripts/mod_for_test_scripts/ssh_keys/testing_rsa")
diff --git a/crosperf/suite_runner_unittest.py b/crosperf/suite_runner_unittest.py
index b14a8e1..41013f7 100755
--- a/crosperf/suite_runner_unittest.py
+++ b/crosperf/suite_runner_unittest.py
@@ -14,6 +14,7 @@
import machine_manager
import image_checksummer
import label
+import test_flag
from benchmark import Benchmark
from benchmark_run import MockBenchmarkRun
@@ -31,7 +32,7 @@
mock_logger = mock.Mock(spec=logger.Logger)
mock_label = label.MockLabel("lumpy", "lumpy_chromeos_image", "/tmp/chromeos",
"lumpy", [ "lumpy1.cros", "lumpy.cros2" ],
- "", "", False, "")
+ "", "", False, "average", "gcc", "")
telemetry_crosperf_bench = Benchmark("b1_test", # name
"octane", # test_name
"", # test_args
@@ -215,7 +216,8 @@
raised_exception = False
try:
self.runner.Test_That_Run('lumpy1.cros', self.mock_label,
- self.test_that_bench, '', 'record -a -e cycles')
+ self.test_that_bench, '',
+ 'record -a -e cycles')
except:
raised_exception = True
self.assertTrue(raised_exception)
@@ -224,7 +226,8 @@
self.mock_cmd_exec.ChrootRunCommand = mock_chroot_runcmd
self.mock_cmd_exec.CrosRunCommand = mock_cros_runcmd
res = self.runner.Test_That_Run ('lumpy1.cros', self.mock_label,
- self.test_that_bench, '--iterations=2', '')
+ self.test_that_bench, '--iterations=2',
+ '')
self.assertEqual(mock_cros_runcmd.call_count, 1)
self.assertEqual(mock_chroot_runcmd.call_count, 1)
self.assertEqual(res, 0)
@@ -233,7 +236,9 @@
args_list = mock_chroot_runcmd.call_args_list[0][0]
self.assertEqual(len(args_list), 4)
self.assertEqual(args_list[0], '/tmp/chromeos')
- self.assertEqual(args_list[1], ('/usr/bin/test_that --board=lumpy '
+ self.assertEqual(args_list[1], ('/usr/bin/test_that --autotest_dir '
+ '~/trunk/src/third_party/autotest/files '
+ '--fast --board=lumpy '
'--iterations=2 lumpy1.cros octane'))
self.assertTrue(args_list[2])
self.assertEqual(args_list[3], self.mock_cmd_term)
@@ -261,11 +266,12 @@
self.assertEqual(args_list[1],
('/usr/bin/test_that --autotest_dir '
'~/trunk/src/third_party/autotest/files '
- ' --board=lumpy --args=" test=octane '
+ ' --board=lumpy --args=" run_local=False test=octane '
'profiler=custom_perf profiler_args=\'record -a -e '
'cycles,instructions\'" lumpy1.cros telemetry_Crosperf'))
self.assertEqual(args_dict['cros_sdk_options'],
- (' --chrome_root= --chrome_root_mount=/tmp/chrome_root '
+ ('--no-ns-pid --chrome_root= '
+ '--chrome_root_mount=/tmp/chrome_root '
'FEATURES="-usersandbox" CHROME_ROOT=/tmp/chrome_root'))
self.assertEqual(args_dict['command_terminator'], self.mock_cmd_term)
self.assertTrue(args_dict['return_output'])
@@ -319,6 +325,7 @@
raises_exception = True
self.assertTrue(raises_exception)
+ test_flag.SetTestMode(True)
res = self.runner.Telemetry_Run('lumpy1.cros', self.mock_label,
self.telemetry_bench, '')
self.assertEqual(res, 0)