multithread on kernel LTP tests for increasing presubmit test speed

Test: local
Change-Id: Ib0bfd0b972ffb31c701071d2172ebae43f774897
diff --git a/testcases/kernel/ltp/KernelLtpTest.py b/testcases/kernel/ltp/KernelLtpTest.py
index c149152..34b8f31 100644
--- a/testcases/kernel/ltp/KernelLtpTest.py
+++ b/testcases/kernel/ltp/KernelLtpTest.py
@@ -149,7 +149,8 @@
         self.shell.Execute("mkdir %s -p" % ltp_configs.LTPDIR)
         src = os.path.join(self.data_file_path, str(n_bit), 'ltp', '.')
         self._dut.adb.push(src, ltp_configs.LTPDIR)
-        logging.info('finished pushing files from %s to %s', src, ltp_configs.LTPDIR)
+        logging.info('finished pushing files from %s to %s', src,
+                     ltp_configs.LTPDIR)
 
     def GetEnvp(self):
         """Generate the environment variable required to run the tests."""
@@ -286,18 +287,33 @@
                 args=args,
                 name_func=name_func)
 
+        settings_multithread = []
+        settings_singlethread = []
+        for test_case in settings:
+            if (test_case.note == 'staging' or test_case.testsuite in
+                    ltp_configs.TEST_SUITES_MULTITHREAD_DISABLED):
+                settings_singlethread.append(test_case)
+            else:
+                settings_multithread.append(test_case)
+
+        failed_tests = self.runGeneratedTests(
+            test_func=test_func,
+            settings=settings_singlethread,
+            args=args,
+            name_func=name_func)
+
         # Shuffle the tests to reduce resource competition probability
-        random.shuffle(settings)
+        random.shuffle(settings_multithread)
 
         # Create a queue for thread workers to pull tasks
         q = queue.Queue()
-        map(q.put, settings)
+        map(q.put, settings_multithread)
 
         # Create individual shell sessions for thread workers
         for i in xrange(n_workers):
             self._dut.shell.InvokeTerminal("shell_thread_{}".format(i))
 
-        failed_tests = set()
+        failed_multithread_tests = set()
         with futures.ThreadPoolExecutor(max_workers=n_workers) as executor:
             fs = [executor.submit(self.RunLtpWorker, q, args, name_func, i)
                   for i in xrange(n_workers)]
@@ -305,20 +321,23 @@
             failed_test_sets = map(futures.Future.result, fs)
             for failed_test_set in failed_test_sets:
                 for test_case in failed_test_set:
-                    failed_tests.add(test_case)
+                    failed_multithread_tests.add(test_case)
 
-        for test_case in failed_tests:
+        for test_case in failed_multithread_tests:
             logging.info(
                 "Test case %s failed during multi-thread run, rerunning...",
                 test_case)
 
         # In the end, rerun all failed tests to confirm their failure
         # in sequential.
-        return self.runGeneratedTests(
-            test_func=test_func,
-            settings=failed_tests,
-            args=args,
-            name_func=name_func)
+        failed_tests.extend(
+            self.runGeneratedTests(
+                test_func=test_func,
+                settings=failed_multithread_tests,
+                args=args,
+                name_func=name_func))
+
+        return failed_tests
 
     #@Override
     def filterOneTest(self, test_name):
diff --git a/testcases/kernel/ltp/ltp_configs.py b/testcases/kernel/ltp/ltp_configs.py
index a4538cc..a05833d 100644
--- a/testcases/kernel/ltp/ltp_configs.py
+++ b/testcases/kernel/ltp/ltp_configs.py
@@ -139,7 +139,14 @@
     'timers',
     # The following are not included in default LTP scenario group
     'securebits',
-    'tracing'
+    'tracing',
+]
+
+# List of LTP test suites that will not run in multi-thread mode
+TEST_SUITES_MULTITHREAD_DISABLED = [
+    'dio',
+    'io',
+    'mm',
 ]
 
 # Staging tests are for debugging and verifying fixed tests
diff --git a/testcases/kernel/ltp/stable/KernelLtpTest.config b/testcases/kernel/ltp/stable/KernelLtpTest.config
index 119d247..ccfe589 100644
--- a/testcases/kernel/ltp/stable/KernelLtpTest.config
+++ b/testcases/kernel/ltp/stable/KernelLtpTest.config
@@ -2,5 +2,5 @@
     "run_staging": false,
     "run_32bit": true,
     "run_64bit": true,
-    "number_of_threads": 1
+    "number_of_threads": 0
 }
diff --git a/testcases/kernel/ltp/staging/KernelLtpStagingTest.config b/testcases/kernel/ltp/staging/KernelLtpStagingTest.config
index 5ee149e..4c45444 100644
--- a/testcases/kernel/ltp/staging/KernelLtpStagingTest.config
+++ b/testcases/kernel/ltp/staging/KernelLtpStagingTest.config
@@ -2,5 +2,5 @@
     "run_staging": true,
     "run_32bit": true,
     "run_64bit": true,
-    "number_of_threads": 1
+    "number_of_threads": 0
 }
diff --git a/testcases/kernel/ltp/test_cases_parser.py b/testcases/kernel/ltp/test_cases_parser.py
index 436740a..8a28ca1 100644
--- a/testcases/kernel/ltp/test_cases_parser.py
+++ b/testcases/kernel/ltp/test_cases_parser.py
@@ -133,11 +133,13 @@
                 continue
 
             # For failing tests that are being inspected
-            if (not run_staging and self.IsTestcaseInList(
-                    testcase, ltp_configs.STAGING_TESTS, n_bit)):
-                logging.info("[Parser] Skipping test case %s. Reason: "
-                             "staging" % testcase.fullname)
-                continue
+            if self.IsTestcaseInList(testcase, ltp_configs.STAGING_TESTS, n_bit):
+                if not run_staging:
+                    logging.info("[Parser] Skipping test case %s. Reason: "
+                                 "staging" % testcase.fullname)
+                    continue
+                else:
+                    testcase.note = "staging"
 
             logging.info("[Parser] Adding test case %s." % testcase.fullname)
             yield testcase