Clean up the test discovery a bit.  Uses os.walk instead of the obsolete
os.path.walk.  Now prunes subdirectories which are not python modules
from further searching.  This will prevent any attempts to run tests
from non-repository random "data" files such as results directories, etc.

TESTED: I verified that exactly the same set of tests are found and
executed using this code as with the old code.

Signed-off-by: Gregory Smith <gps@google.com>


git-svn-id: http://test.kernel.org/svn/autotest/trunk@3358 592f7852-d20e-0410-864c-8624ca9c26a4
diff --git a/utils/unittest_suite.py b/utils/unittest_suite.py
index cb9150a..b290dde 100755
--- a/utils/unittest_suite.py
+++ b/utils/unittest_suite.py
@@ -5,8 +5,6 @@
 from autotest_lib.utils import parallel
 
 
-root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
-
 parser = optparse.OptionParser()
 parser.add_option("-r", action="store", type="string", dest="start",
                   default='',
@@ -16,7 +14,6 @@
 parser.add_option("--debug", action="store_true", dest="debug", default=False,
                   help="run in debug mode")
 
-
 LONG_TESTS = set((
     'monitor_db_unittest.py',
     'barrier_unittest.py',
@@ -28,46 +25,75 @@
     'logging_manager_test.py',
     ))
 
-modules = []
+ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
 
 
-def lister(full, dirname, files):
-    if not os.path.exists(os.path.join(dirname, '__init__.py')):
-        return
-    for f in files:
-        if f.endswith('_unittest.py') or f.endswith('_test.py'):
-            if not full and f in LONG_TESTS:
-                continue
-            temp = os.path.join(dirname, f).strip('.py')
-            mod_name = ['autotest_lib'] + temp[len(root)+1:].split('/')
-            modules.append(mod_name)
+class TestFailure(Exception): pass
 
 
-def run_test(mod_name):
+def run_test(mod_names, options):
+    """
+    @param mod_names: A list of individual parts of the module name to import
+            and run as a test suite.
+    @param options: optparse options.
+    """
     if not options.debug:
         parallel.redirect_io()
 
-    print "Running %s" % '.'.join(mod_name)
-    mod = common.setup_modules.import_module(mod_name[-1],
-                                             '.'.join(mod_name[:-1]))
+    print "Running %s" % '.'.join(mod_names)
+    mod = common.setup_modules.import_module(mod_names[-1],
+                                             '.'.join(mod_names[:-1]))
     test = unittest.defaultTestLoader.loadTestsFromModule(mod)
     suite = unittest.TestSuite(test)
     runner = unittest.TextTestRunner(verbosity=2)
     result = runner.run(suite)
     if result.errors or result.failures:
-        raise Exception("%s failed" % '.'.join(mod_name))
+        raise TestFailure(
+                "%s had %d failures and %d errors." %
+                ('.'.join(mod_names), len(result.failures), len(result.errors)))
 
 
-def run_tests(start, full=False):
-    os.path.walk(start, lister, full)
+def find_and_run_tests(start, options):
+    """
+    Find and run Python unittest suites below the given directory.  Only look
+    in subdirectories of start that are actual importable Python modules.
+
+    @param start: The absolute directory to look for tests under.
+    @param options: optparse options.
+    """
+    modules = []
+
+    for dirpath, subdirs, filenames in os.walk(start):
+        # Only look in and below subdirectories that are python modules.
+        if '__init__.py' not in filenames:
+            # Skip all subdirectories below this one, it is not a module.
+            del subdirs[:]
+            if options.debug:
+                print 'Skipping', dirpath
+            continue  # Skip this directory.
+
+        # Look for unittest files.
+        for fname in filenames:
+            if fname.endswith('_unittest.py') or fname.endswith('_test.py'):
+                if not options.full and fname in LONG_TESTS:
+                    continue
+                path_no_py = os.path.join(dirpath, fname).rstrip('.py')
+                assert path_no_py.startswith(ROOT)
+                names = path_no_py[len(ROOT)+1:].split('/')
+                modules.append(['autotest_lib'] + names)
+                if options.debug:
+                    print 'testing', path_no_py
+
+    if options.debug:
+        print 'Number of test modules found:', len(modules)
 
     functions = {}
-    for module in modules:
+    for module_names in modules:
         # Create a function that'll test a particular module.  module=module
         # is a hack to force python to evaluate the params now.  We then
         # rename the function to make error reporting nicer.
-        run_module = lambda module=module: run_test(module)
-        name = '.'.join(module)
+        run_module = lambda module=module_names: run_test(module, options)
+        name = '.'.join(module_names)
         run_module.__name__ = name
         functions[run_module] = set()
 
@@ -83,7 +109,6 @@
 
 
 def main():
-    global options, args
     options, args = parser.parse_args()
     if args:
         parser.error('Unexpected argument(s): %s' % args)
@@ -92,17 +117,20 @@
 
     # Strip the arguments off the command line, so that the unit tests do not
     # see them.
-    sys.argv = [sys.argv[0]]
+    del sys.argv[1:]
 
-    errors = run_tests(os.path.join(root, options.start), options.full)
+    absolute_start = os.path.join(ROOT, options.start)
+    errors = find_and_run_tests(absolute_start, options)
     if errors:
         print "%d tests resulted in an error/failure:" % len(errors)
         for error in errors:
             print "\t%s" % error
+        print "Rerun", sys.argv[0], "--debug to see the failure details."
         sys.exit(1)
     else:
         print "All passed!"
         sys.exit(0)
 
+
 if __name__ == "__main__":
     main()