Extend the status logging mechanism to handled multiple tests in
parallel on a single machine without interleaving the status logs from
each test. This introduces some delay into the processing of these
logs since the status logging has to delay writing out the status
logs until it can impose a serial order on them.

However, it does NOT delay the writing of the logs to stdout; since
stdout is intended for human consumption and not machine processing it
is much more convenient for the logging to be visible in the logs at
the point in time where the event occured.

This implementation tries to avoid introducing a sitation where logs
are dropped on the floor because their being held back from the status
log. In the worst case on the client side the logs if autotest fails
before accumulating the logs in status then the intermediate logs will
still be available in individual status.* files, and on the server side
the server will automatically write out whatever logs it has
accumulated so far in the case of a failure (under the assumption that
it is still possible to write to status.log).

Signed-off-by: John Admanski <jadmanski@google.com>



git-svn-id: http://test.kernel.org/svn/autotest/trunk@1089 592f7852-d20e-0410-864c-8624ca9c26a4
diff --git a/client/bin/job.py b/client/bin/job.py
index 8352cba..4c0cc95 100755
--- a/client/bin/job.py
+++ b/client/bin/job.py
@@ -46,6 +46,8 @@
 			the job configuration for this job
 	"""
 
+	DEFAULT_LOG_FILENAME = "status"
+
 	def __init__(self, control, jobtag, cont, harness_type=None):
 		"""
 			control
@@ -94,6 +96,7 @@
 
 		self.control = control
 		self.jobtag = jobtag
+		self.log_filename = self.DEFAULT_LOG_FILENAME
 
 		self.stdout = fd_stack.fd_stack(1, sys.stdout)
 		self.stderr = fd_stack.fd_stack(2, sys.stderr)
@@ -413,21 +416,32 @@
 		print "job: noop: " + text
 
 
-	# Job control primatives.
-
-	def __parallel_execute(self, func, *args):
-		func(*args)
-
-
 	def parallel(self, *tasklist):
 		"""Run tasks in parallel"""
 
 		pids = []
-		for task in tasklist:
-			pids.append(fork_start(self.resultdir,
-					lambda: self.__parallel_execute(*task)))
-		for pid in pids:
+		old_log_filename = self.log_filename
+		for i, task in enumerate(tasklist):
+			self.log_filename = old_log_filename + (".%d" % i)
+			task_func = lambda: task[0](*task[1:])
+			pids.append(fork_start(self.resultdir, task_func))
+
+		old_log_path = os.path.join(self.resultdir, old_log_filename)
+		old_log = open(old_log_path, "a")
+		for i, pid in enumerate(pids):
+			# wait for the task to finish
 			fork_waitfor(self.resultdir, pid)
+			# copy the logs from the subtask into the main log
+			new_log_path = old_log_path + (".%d" % i)
+			if os.path.exists(new_log_path):
+				new_log = open(new_log_path)
+				old_log.write(new_log.read())
+				new_log.close()
+				old_log.flush()
+				os.remove(new_log_path)
+		old_log.close()
+
+		self.log_filename = old_log_filename
 
 
 	def quit(self):
@@ -562,14 +576,27 @@
 						 status))
 		msg = '\t' * self.group_level + msg
 
-		self.harness.test_status_detail(status_code, substr,
-							operation, status)
-		self.harness.test_status(msg)
+		msg_tag = ""
+		if "." in self.log_filename:
+			msg_tag = self.log_filename.split(".", 1)[1]
+
+		self.harness.test_status_detail(status_code, substr, operation,
+						status, msg_tag)
+		self.harness.test_status(msg, msg_tag)
+
+		# log to stdout (if enabled)
+		#if self.log_filename == self.DEFAULT_LOG_FILENAME:
 		print msg
-		status_file = os.path.join(self.resultdir, 'status')
+
+		# log to the "root" status log
+		status_file = os.path.join(self.resultdir, self.log_filename)
 		open(status_file, "a").write(msg + "\n")
+
+		# log to the subdir status log (if subdir is set)
 		if subdir:
-			status_file = os.path.join(self.resultdir, subdir, 'status')
+			status_file = os.path.join(self.resultdir,
+						   subdir,
+						   self.DEFAULT_LOG_FILENAME)
 			open(status_file, "a").write(msg + "\n")