Allow tests to define run_once instead of execute.

This allows us to move a whole lot of boilerplate out of each test into
a common place in test.py

Also allow for a postprocess phase for tests - to provide a cleaner alternative
to run_once

Signed-off-by: Martin J. Bligh <mbligh@google.com>



git-svn-id: http://test.kernel.org/svn/autotest/trunk@1846 592f7852-d20e-0410-864c-8624ca9c26a4
diff --git a/client/common_lib/test.py b/client/common_lib/test.py
index fedaa11..3fc694b 100644
--- a/client/common_lib/test.py
+++ b/client/common_lib/test.py
@@ -110,13 +110,35 @@
         pass
 
 
-    def cleanup(self):
+    def warmup(self):
         pass
 
 
-    def execute(self):
-        raise NotImplementedError("This function must be overriden by "
-                                  "the test class")
+    def execute(self, iterations=1, *args, **dargs):
+        self.warmup(*args, **dargs)
+
+        profilers = self.job.profilers
+        # Dropped profilers.only() - if you want that, use iterations=0
+        for i in range(iterations):
+            self.run_once(*args, **dargs)
+
+        # Do a profiling run if necessary
+        if profilers.present():
+            profilers.start(self)
+            self.run_once(*args, **dargs)
+            profilers.stop(self)
+            profilers.report(self)
+
+        # Do any postprocessing, normally extracting performance keyvals, etc
+        self.postprocess()
+
+
+    def postprocess(self):
+        pass
+
+
+    def cleanup(self):
+        pass
 
 
     def _exec(self, args, dargs):
diff --git a/client/tests/dbench/dbench.py b/client/tests/dbench/dbench.py
index bd6fdd2..e4e5213 100755
--- a/client/tests/dbench/dbench.py
+++ b/client/tests/dbench/dbench.py
@@ -15,7 +15,11 @@
         utils.system('make')
 
 
-    def execute(self, iterations = 1, dir = None, nprocs = None, args = ''):
+    def intialize(self):
+        self.results = []
+
+
+    def run_once(self, dir = None, nprocs = None, args = ''):
         if not nprocs:
             nprocs = self.job.cpu_count()
         profilers = self.job.profilers
@@ -24,24 +28,12 @@
             args += ' -D ' + dir
         args += ' %s' % nprocs
         cmd = self.srcdir + '/dbench ' + args
-        results = []
-        if not profilers.only():
-            for i in range(iterations):
-                results.append(utils.system_output(cmd, retain_output=True))
-
-        # Do a profiling run if necessary
-        if profilers.present():
-            profilers.start(self)
-            results.append(utils.system_output(cmd, retain_output=True))
-            profilers.stop(self)
-            profilers.report(self)
-
-        self.__format_results("\n".join(results))
+        self.results.append(utils.system_output(cmd, retain_output=True))
 
 
-    def __format_results(self, results):
+    def postprocess(self):
         out = open(self.resultsdir + '/keyval', 'w')
         pattern = re.compile(r"Throughput (.*?) MB/sec (.*?) procs")
-        for result in pattern.findall(results):
+        for result in pattern.findall("\n".join(self.results)):
             print >> out, "throughput=%s\nprocs=%s\n" % result
         out.close()
diff --git a/client/tests/tsc/tsc.py b/client/tests/tsc/tsc.py
index 6e7ca64..2dbdcf5 100755
--- a/client/tests/tsc/tsc.py
+++ b/client/tests/tsc/tsc.py
@@ -12,6 +12,5 @@
         utils.system('make')
 
 
-    def execute(self, iterations = 1, args = ''):
-        for i in range(iterations):
-            utils.system(self.srcdir + '/checktsc ' + args)
+    def run_once(self, args = ''):
+        utils.system(self.srcdir + '/checktsc ' + args)