Merge remote branch 'cros/upstream' into master

Merged to upstream autotest @4749~@5215.

The entire change list description is too big to enlist here. Please refer to upstream (http://autotest.kernel.org/browser) for more details.

BUG=
TEST=emerged both x86 and arm build.
Tested emerged x86 build bvt against a chromeos device.

Review URL: http://codereview.chromium.org/6246035

Change-Id: I8455f2135c87c321c6efc232e2869dc8f675395e
diff --git a/.gitignore b/.gitignore
index 355a9e3..ec559c1 100644
--- a/.gitignore
+++ b/.gitignore
@@ -20,6 +20,8 @@
 private_host_attributes_config.py
 
 # default svn:ignore property:
+*.o
+#*#
 .*.rej
 *.rej
 .*~
@@ -27,9 +29,13 @@
 .#*
 .DS_Store.
 # additional patterns:
+*.pyc
 client/control
 client/results/
 client/tests/kvm/images
 client/tests/kvm/env
 client/tmp
 client/tests/kvm/*.cfg
+server/tmp
+tko/parsers/test/site_scenarios
+ExternalSource
diff --git a/cli/atest b/cli/atest
index 3174d02..8386920 100755
--- a/cli/atest
+++ b/cli/atest
@@ -1,7 +1,6 @@
 #!/usr/bin/python -u
 
-import base64, sys
-
+import sys
 import common
 from autotest_lib.cli import atest
 
diff --git a/cli/job.py b/cli/job.py
index 660dfe5..c909591 100644
--- a/cli/job.py
+++ b/cli/job.py
@@ -609,6 +609,12 @@
         for field in ('name', 'created_on', 'id', 'owner'):
             del clone_info['job'][field]
 
+        # Also remove parameterized_job field, as the feature still is
+        # incomplete, this tool does not attempt to support it for now,
+        # it uses a different API function and it breaks create_job()
+        if clone_info['job'].has_key('parameterized_job'):
+            del clone_info['job']['parameterized_job']
+
         # Keyword args cannot be unicode strings
         self.data.update((str(key), val)
                          for key, val in clone_info['job'].iteritems())
diff --git a/client/bin/autotest b/client/bin/autotest
index 1862881..e843b7f 100755
--- a/client/bin/autotest
+++ b/client/bin/autotest
@@ -56,6 +56,11 @@
                   type='string', default=None, action='store',
                   help='a comma seperated list of client tests to prebuild on '
                        'the server. Use all to prebuild all of them.')
+
+parser.add_option('--tap', dest='tap_report', action='store_true',
+                  default=None, help='Output TAP (Test anything '
+                  'protocol) reports')
+
 def usage():
     parser.print_help()
     sys.exit(1)
diff --git a/client/bin/base_sysinfo.py b/client/bin/base_sysinfo.py
index e58f61b..7e06f14 100644
--- a/client/bin/base_sysinfo.py
+++ b/client/bin/base_sysinfo.py
@@ -78,11 +78,12 @@
 
 
 class command(loggable):
-    def __init__(self, cmd, logf=None, log_in_keyval=False):
+    def __init__(self, cmd, logf=None, log_in_keyval=False, compress_log=False):
         if not logf:
             logf = cmd.replace(" ", "_")
         super(command, self).__init__(logf, log_in_keyval)
         self.cmd = cmd
+        self._compress_log = compress_log
 
 
     def __repr__(self):
@@ -111,16 +112,21 @@
 
 
     def run(self, logdir):
-        stdin = open(os.devnull, "r")
-        stdout = open(os.path.join(logdir, self.logf), "w")
-        stderr = open(os.devnull, "w")
         env = os.environ.copy()
         if "PATH" not in env:
             env["PATH"] = "/usr/bin:/bin"
-        subprocess.call(self.cmd, stdin=stdin, stdout=stdout, stderr=stderr,
-                        shell=True, env=env)
-        for f in (stdin, stdout, stderr):
-            f.close()
+        logf_path = os.path.join(logdir, self.logf)
+        stdin = open(os.devnull, "r")
+        stderr = open(os.devnull, "w")
+        stdout = open(logf_path, "w")
+        try:
+            subprocess.call(self.cmd, stdin=stdin, stdout=stdout, stderr=stderr,
+                            shell=True, env=env)
+        finally:
+            for f in (stdin, stdout, stderr):
+                f.close()
+            if self._compress_log and os.path.exists(logf_path):
+                utils.system('gzip -9 "%s"' % logf_path, ignore_status=True)
 
 
 class base_sysinfo(object):
@@ -161,7 +167,11 @@
 
         # add in a couple of extra files and commands we want to grab
         self.test_loggables.add(command("df -mP", logf="df"))
-        self.test_loggables.add(command("dmesg -c", logf="dmesg"))
+        # We compress the dmesg because it can get large when kernels are
+        # configured with a large buffer and some tests trigger OOMs or
+        # other large "spam" that fill it up...
+        self.test_loggables.add(command("dmesg -c", logf="dmesg",
+                                        compress_log=True))
         self.boot_loggables.add(logfile("/proc/cmdline",
                                              log_in_keyval=True))
         # log /proc/mounts but with custom filename since we already
diff --git a/client/bin/harness.py b/client/bin/harness.py
index b4c9dfd..bcb4f96 100644
--- a/client/bin/harness.py
+++ b/client/bin/harness.py
@@ -5,7 +5,7 @@
 
 __author__ = """Copyright Andy Whitcroft 2006"""
 
-import os, sys
+import os, sys, logging
 import common
 
 class harness(object):
@@ -86,6 +86,8 @@
     if not which:
         which = 'standalone'
 
+    logging.debug('Selected harness: %s' % which)
+
     harness_name = 'harness_%s' % which
     harness_module = common.setup_modules.import_module(harness_name,
                                                         'autotest_lib.client.bin')
diff --git a/client/bin/job.py b/client/bin/job.py
index 3d552ce..3e285c6 100644
--- a/client/bin/job.py
+++ b/client/bin/job.py
@@ -135,6 +135,11 @@
         return autodir, clientdir, None
 
 
+    @classmethod
+    def _parse_args(cls, args):
+        return re.findall("[^\s]*?['|\"].*?['|\"]|[^\s]+", args)
+
+
     def _find_resultdir(self, options):
         """
         Determine the directory for storing results. On a client this is
@@ -162,9 +167,9 @@
             self._cleanup_results_dir()
 
         logging_manager.configure_logging(
-                client_logging_config.ClientLoggingConfig(),
-                results_dir=self.resultdir,
-                verbose=options.verbose)
+            client_logging_config.ClientLoggingConfig(),
+            results_dir=self.resultdir,
+            verbose=options.verbose)
         logging.info('Writing results to %s', self.resultdir)
 
         # init_group_level needs the state
@@ -174,7 +179,20 @@
         self._next_step_index = 0
         self._load_state()
 
-        self.harness = harness.select(options.harness, self)
+        # harness is chosen by following rules:
+        # 1. explicitly specified via command line
+        # 2. harness stored in state file (if continuing job '-c')
+        # 3. default harness
+        selected_harness = None
+        if options.harness:
+            selected_harness = options.harness
+            self._state.set('client', 'harness', selected_harness)
+        else:
+            stored_harness = self._state.get('client', 'harness', None)
+            if stored_harness:
+                selected_harness = stored_harness
+
+        self.harness = harness.select(selected_harness, self)
 
         # set up the status logger
         def client_job_record_hook(entry):
@@ -190,8 +208,8 @@
             # send the entry to stdout, if it's enabled
             logging.info(rendered_entry)
         self._logger = base_job.status_logger(
-            self, status_indenter(self), record_hook=client_job_record_hook)
-
+            self, status_indenter(self), record_hook=client_job_record_hook,
+            tap_writer=self._tap)
 
     def _post_record_init(self, control, options, drop_caches,
                           extra_copy_cmdline):
@@ -230,7 +248,7 @@
 
         self.args = []
         if options.args:
-            self.args = options.args.split()
+            self.args = self._parse_args(options.args)
 
         if options.user:
             self.user = options.user
@@ -582,6 +600,7 @@
 
         try:
             self.record('START', subdir, testname)
+            self._state.set('client', 'unexpected_reboot', (subdir, testname))
             result = function(*args, **dargs)
             self.record('END GOOD', subdir, testname)
             return result
@@ -600,6 +619,8 @@
             err_msg = str(e) + '\n' + traceback.format_exc()
             self.record('END ERROR', subdir, testname, err_msg)
             raise
+        finally:
+            self._state.discard('client', 'unexpected_reboot')
 
 
     def run_group(self, function, tag=None, **dargs):
@@ -664,7 +685,6 @@
         partition_list = partition_lib.get_partition_list(self,
                                                           exclude_swap=False)
         mount_info = partition_lib.get_mount_info(partition_list)
-
         old_mount_info = self._state.get('client', 'mount_info')
         if mount_info != old_mount_info:
             new_entries = mount_info - old_mount_info
@@ -870,7 +890,12 @@
 
 
     def complete(self, status):
-        """Clean up and exit"""
+        """Write pending TAP reports, clean up, and exit"""
+        # write out TAP reports
+        if self._tap.do_tap_report:
+            self._tap.write()
+            self._tap._write_tap_archive()
+
         # We are about to exit 'complete' so clean up the control file.
         dest = os.path.join(self.resultdir, os.path.basename(self._state_file))
         shutil.move(self._state_file, dest)
@@ -1043,6 +1068,14 @@
         if not self._is_continuation:
             if 'step_init' in global_control_vars:
                 self.next_step(global_control_vars['step_init'])
+        else:
+            # if last job failed due to unexpected reboot, record it as fail
+            # so harness gets called
+            last_job = self._state.get('client', 'unexpected_reboot', None)
+            if last_job:
+                subdir, testname = last_job
+                self.record('FAIL', subdir, testname, 'unexpected reboot')
+                self.record('END FAIL', subdir, testname)
 
         # Iterate through the steps.  If we reboot, we'll simply
         # continue iterating on the next step.
@@ -1182,13 +1215,13 @@
         sys.exit(1)
 
     except error.JobError, instance:
-        logging.error("JOB ERROR: " + instance.args[0])
+        logging.error("JOB ERROR: " + str(instance))
         if myjob:
             command = None
             if len(instance.args) > 1:
                 command = instance.args[1]
-                myjob.record('ABORT', None, command, instance.args[0])
-            myjob.record('END ABORT', None, None, instance.args[0])
+                myjob.record('ABORT', None, command, str(instance))
+            myjob.record('END ABORT', None, None, str(instance))
             assert myjob._record_indent == 0
             myjob.complete(1)
         else:
diff --git a/client/bin/job_unittest.py b/client/bin/job_unittest.py
index 24c0501..0e5aede 100755
--- a/client/bin/job_unittest.py
+++ b/client/bin/job_unittest.py
@@ -18,7 +18,7 @@
     job_class = job.base_client_job
 
     def setUp(self):
-        self.god = mock.mock_god()
+        self.god = mock.mock_god(ut=self)
         self.god.stub_with(job.base_client_job, '_get_environ_autodir',
                            classmethod(lambda cls: '/adir'))
         self.job = self.job_class.__new__(self.job_class)
@@ -86,6 +86,7 @@
             user = None
             log = False
             args = ''
+            tap_report = None
         self.god.stub_function_to_return(job.utils, 'drop_caches', None)
 
         self.job._job_state = base_job_unittest.stub_job_state
@@ -109,7 +110,7 @@
 class test_base_job(unittest.TestCase):
     def setUp(self):
         # make god
-        self.god = mock.mock_god()
+        self.god = mock.mock_god(ut=self)
 
         # need to set some environ variables
         self.autodir = "autodir"
@@ -241,6 +242,7 @@
         options.hostname = 'localhost'
         options.user = 'my_user'
         options.args = ''
+        options.tap_report = None
         self.job.__init__(self.control, options,
                           extra_copy_cmdline=['more-blah'])
 
@@ -280,6 +282,7 @@
         options.hostname = 'localhost'
         options.user = 'my_user'
         options.args = ''
+        options.tap_report = None
         error = Exception('fail')
 
         self.god.stub_function(self.job, '_post_record_init')
@@ -691,5 +694,18 @@
         self.god.check_playback()
 
 
+    def test_parse_args(self):
+        test_set = {"a='foo bar baz' b='moo apt'":
+                    ["a='foo bar baz'", "b='moo apt'"],
+                    "a='foo bar baz' only=gah":
+                    ["a='foo bar baz'", "only=gah"],
+                    "a='b c d' no=argh":
+                    ["a='b c d'", "no=argh"]}
+        for t in test_set:
+            parsed_args = job.base_client_job._parse_args(t)
+            expected_args = test_set[t]
+            self.assertEqual(parsed_args, expected_args)
+
+
 if __name__ == "__main__":
     unittest.main()
diff --git a/client/bin/kernel.py b/client/bin/kernel.py
index f3f5a38..cb0ef99 100644
--- a/client/bin/kernel.py
+++ b/client/bin/kernel.py
@@ -332,15 +332,17 @@
             utils.extract_tarball_to_dir(tarball, self.build_dir)
 
 
-    def extraversion(self, tag, append=1):
+    def extraversion(self, tag, append=True):
         os.chdir(self.build_dir)
-        extraversion_sub = r's/^EXTRAVERSION =\s*\(.*\)/EXTRAVERSION = '
+        extraversion_sub = r's/^CONFIG_LOCALVERSION=\s*"\(.*\)"/CONFIG_LOCALVERSION='
+        cfg = self.build_dir + '/.config'
         if append:
-            p = extraversion_sub + '\\1-%s/' % tag
+            p = extraversion_sub + '"\\1-%s"/' % tag
         else:
-            p = extraversion_sub + '-%s/' % tag
-        utils.system('mv Makefile Makefile.old')
-        utils.system('sed "%s" < Makefile.old > Makefile' % p)
+            p = extraversion_sub + '"-%s"/' % tag
+        utils.system('mv %s %s.old' % (cfg, cfg))
+        utils.system("sed '%s' < %s.old > %s" % (p, cfg, cfg))
+        self.config(make='oldconfig')
 
 
     @log.record
diff --git a/client/bin/kernel_unittest.py b/client/bin/kernel_unittest.py
index 6761c05..4ed0d37 100755
--- a/client/bin/kernel_unittest.py
+++ b/client/bin/kernel_unittest.py
@@ -425,13 +425,17 @@
     def test_extraversion(self):
         self.construct_kernel()
         tag = "tag"
+        # setup
+        self.god.stub_function(self.kernel, "config")
 
         # record
         os.chdir.expect_call(self.build_dir)
-        extraversion_sub = r's/^EXTRAVERSION =\s*\(.*\)/EXTRAVERSION = '
-        p = extraversion_sub + '\\1-%s/' % tag
-        utils.system.expect_call('mv Makefile Makefile.old')
-        utils.system.expect_call('sed "%s" < Makefile.old > Makefile' % p)
+        extraversion_sub = r's/^CONFIG_LOCALVERSION=\s*"\(.*\)"/CONFIG_LOCALVERSION='
+        cfg = self.build_dir + '/.config'
+        p = extraversion_sub + '"\\1-%s"/' % tag
+        utils.system.expect_call('mv %s %s.old' % (cfg, cfg))
+        utils.system.expect_call("sed '%s' < %s.old > %s" % (p, cfg, cfg))
+        self.kernel.config.expect_call(make='oldconfig')
 
         # run and check
         self.kernel.extraversion(tag)
diff --git a/client/bin/partition.py b/client/bin/partition.py
index 355d1b3..7381f75 100644
--- a/client/bin/partition.py
+++ b/client/bin/partition.py
@@ -185,7 +185,7 @@
     mount_info = set()
     for p in partition_list:
         try:
-            uuid = utils.system_output('blkid -s UUID -o value %s' % p.device)
+            uuid = utils.system_output('blkid -p -s UUID -o value %s' % p.device)
         except error.CmdError:
             # fall back to using the partition
             uuid = p.device
diff --git a/client/bin/setup_job_unittest.py b/client/bin/setup_job_unittest.py
index f901bac..f137ddf 100755
--- a/client/bin/setup_job_unittest.py
+++ b/client/bin/setup_job_unittest.py
@@ -79,6 +79,7 @@
             hostname = None
             user = None
             log = False
+            tap_report = None
 
         self.job.__init__(options)
 
@@ -169,6 +170,7 @@
         options.verbose = False
         options.hostname = 'localhost'
         options.user = 'my_user'
+        options.tap_report = None
         self.job.__init__(options)
 
         # check
diff --git a/client/common_lib/barrier_unittest.py b/client/common_lib/barrier_unittest.py
index 37de2bf..58abeb3 100755
--- a/client/common_lib/barrier_unittest.py
+++ b/client/common_lib/barrier_unittest.py
@@ -47,15 +47,13 @@
 
 
     def test_get_host_from_id(self):
-        b = barrier.barrier('127.0.0.1#', 'testgethost', 100)
-
-        hostname = b._get_host_from_id('my_host')
+        hostname = barrier.get_host_from_id('my_host')
         self.assertEqual(hostname, 'my_host')
 
-        hostname = b._get_host_from_id('my_host#')
+        hostname = barrier.get_host_from_id('my_host#')
         self.assertEqual(hostname, 'my_host')
 
-        self.assertRaises(error.BarrierError, b._get_host_from_id, '#my_host')
+        self.assertRaises(error.BarrierError, barrier.get_host_from_id, '#my_host')
 
 
     def test_update_timeout(self):
diff --git a/client/common_lib/base_barrier.py b/client/common_lib/base_barrier.py
index e4de635..e1063a9 100644
--- a/client/common_lib/base_barrier.py
+++ b/client/common_lib/base_barrier.py
@@ -5,6 +5,16 @@
 # default barrier port
 _DEFAULT_PORT = 11922
 
+def get_host_from_id(hostid):
+    # Remove any trailing local identifier following a #.
+    # This allows multiple members per host which is particularly
+    # helpful in testing.
+    if not hostid.startswith('#'):
+        return hostid.split('#')[0]
+    else:
+        raise error.BarrierError(
+            "Invalid Host id: Host Address should be specified")
+
 
 class BarrierAbortError(error.BarrierError):
     """Special BarrierError raised when an explicit abort is requested."""
@@ -159,17 +169,6 @@
         self._waiting = {}  # Maps from hostname -> (client, addr) tuples.
 
 
-    def _get_host_from_id(self, hostid):
-        # Remove any trailing local identifier following a #.
-        # This allows multiple members per host which is particularly
-        # helpful in testing.
-        if not hostid.startswith('#'):
-            return hostid.split('#')[0]
-        else:
-            raise error.BarrierError(
-                    "Invalid Host id: Host Address should be specified")
-
-
     def _update_timeout(self, timeout):
         if timeout is not None and self._start_time is not None:
             self._timeout_secs = (time() - self._start_time) + timeout
@@ -397,14 +396,14 @@
                 remote.settimeout(30)
                 if is_master:
                     # Connect to all slaves.
-                    host = self._get_host_from_id(self._members[self._seen])
+                    host = get_host_from_id(self._members[self._seen])
                     logging.info("calling slave: %s", host)
                     connection = (remote, (host, self._port))
                     remote.connect(connection[1])
                     self._master_welcome(connection)
                 else:
                     # Just connect to the master.
-                    host = self._get_host_from_id(self._masterid)
+                    host = get_host_from_id(self._masterid)
                     logging.info("calling master")
                     connection = (remote, (host, self._port))
                     remote.connect(connection[1])
diff --git a/client/common_lib/base_barrier_unittest.py b/client/common_lib/base_barrier_unittest.py
index 52d8e17..71ea538 100755
--- a/client/common_lib/base_barrier_unittest.py
+++ b/client/common_lib/base_barrier_unittest.py
@@ -5,7 +5,7 @@
 import os, sys, socket, errno, unittest, threading
 from time import time, sleep
 import common
-from autotest_lib.client.common_lib import error, barrier
+from autotest_lib.client.common_lib import error, barrier, base_barrier
 from autotest_lib.client.common_lib.test_utils import mock
 
 
@@ -46,15 +46,14 @@
 
 
     def test_get_host_from_id(self):
-        b = barrier.barrier('127.0.0.1#', 'testgethost', 100)
-
-        hostname = b._get_host_from_id('my_host')
+        hostname = base_barrier.get_host_from_id('my_host')
         self.assertEqual(hostname, 'my_host')
 
-        hostname = b._get_host_from_id('my_host#')
+        hostname = base_barrier.get_host_from_id('my_host#')
         self.assertEqual(hostname, 'my_host')
 
-        self.assertRaises(error.BarrierError, b._get_host_from_id, '#my_host')
+        self.assertRaises(error.BarrierError,
+                          base_barrier.get_host_from_id, '#my_host')
 
 
     def test_update_timeout(self):
diff --git a/client/common_lib/base_job.py b/client/common_lib/base_job.py
index 3c77d38..c5f55f8 100644
--- a/client/common_lib/base_job.py
+++ b/client/common_lib/base_job.py
@@ -1,6 +1,6 @@
 import os, copy, logging, errno, fcntl, time, re, weakref, traceback
+import tarfile
 import cPickle as pickle
-
 from autotest_lib.client.common_lib import autotemp, error, log
 
 
@@ -422,6 +422,9 @@
     TIMESTAMP_FIELD = 'timestamp'
     LOCALTIME_FIELD = 'localtime'
 
+    # non-space whitespace is forbidden in any fields
+    BAD_CHAR_REGEX = re.compile(r'[\t\n\r\v\f]')
+
     def __init__(self, status_code, subdir, operation, message, fields,
                  timestamp=None):
         """Construct a status.log entry.
@@ -439,18 +442,16 @@
 
         @raise ValueError: if any of the parameters are invalid
         """
-        # non-space whitespace is forbidden in any fields
-        bad_char_regex = r'[\t\n\r\v\f]'
 
         if not log.is_valid_status(status_code):
             raise ValueError('status code %r is not valid' % status_code)
         self.status_code = status_code
 
-        if subdir and re.search(bad_char_regex, subdir):
+        if subdir and self.BAD_CHAR_REGEX.search(subdir):
             raise ValueError('Invalid character in subdir string')
         self.subdir = subdir
 
-        if operation and re.search(bad_char_regex, operation):
+        if operation and self.BAD_CHAR_REGEX.search(operation):
             raise ValueError('Invalid character in operation string')
         self.operation = operation
 
@@ -460,7 +461,7 @@
         message_lines = message.split('\n')
         self.message = message_lines[0].replace('\t', ' ' * 8)
         self.extra_message_lines = message_lines[1:]
-        if re.search(bad_char_regex, self.message):
+        if self.BAD_CHAR_REGEX.search(self.message):
             raise ValueError('Invalid character in message %r' % self.message)
 
         if not fields:
@@ -468,7 +469,7 @@
         else:
             self.fields = fields.copy()
         for key, value in self.fields.iteritems():
-            if re.search(bad_char_regex, key + value):
+            if self.BAD_CHAR_REGEX.search(key + value):
                 raise ValueError('Invalid character in %r=%r field'
                                  % (key, value))
 
@@ -574,7 +575,8 @@
     @property subdir_filename: The filename to write subdir-level logs to.
     """
     def __init__(self, job, indenter, global_filename='status',
-                 subdir_filename='status', record_hook=None):
+                 subdir_filename='status', record_hook=None,
+                 tap_writer=None):
         """Construct a logger instance.
 
         @param job: A reference to the job object this is logging for. Only a
@@ -589,12 +591,18 @@
         @param record_hook: An optional function to be called before an entry
             is logged. The function should expect a single parameter, a
             copy of the status_log_entry object.
+        @param tap_writer: An instance of the class TAPReport for addionally
+            writing TAP files
         """
         self._jobref = weakref.ref(job)
         self._indenter = indenter
         self.global_filename = global_filename
         self.subdir_filename = subdir_filename
         self._record_hook = record_hook
+        if tap_writer is None:
+            self._tap_writer = TAPReport(None)
+        else:
+            self._tap_writer = tap_writer
 
 
     def render_entry(self, log_entry):
@@ -647,6 +655,10 @@
             finally:
                 fileobj.close()
 
+        # write to TAPRecord instance
+        if log_entry.is_end() and self._tap_writer.do_tap_report:
+            self._tap_writer.record(log_entry, self._indenter.indent, log_files)
+
         # adjust the indentation if this was a START or END entry
         if log_entry.is_start():
             self._indenter.increment()
@@ -654,6 +666,191 @@
             self._indenter.decrement()
 
 
+class TAPReport(object):
+    """
+    Deal with TAP reporting for the Autotest client.
+    """
+
+    job_statuses = {
+        "TEST_NA": False,
+        "ABORT": False,
+        "ERROR": False,
+        "FAIL": False,
+        "WARN": False,
+        "GOOD": True,
+        "START": True,
+        "END GOOD": True,
+        "ALERT": False,
+        "RUNNING": False,
+        "NOSTATUS": False
+    }
+
+
+    def __init__(self, enable, resultdir=None, global_filename='status'):
+        """
+        @param enable: Set self.do_tap_report to trigger TAP reporting.
+        @param resultdir: Path where the TAP report files will be written.
+        @param global_filename: File name of the status files .tap extensions
+                will be appended.
+        """
+        self.do_tap_report = enable
+        if resultdir is not None:
+            self.resultdir = os.path.abspath(resultdir)
+        self._reports_container = {}
+        self._keyval_container = {} # {'path1': [entries],}
+        self.global_filename = global_filename
+
+
+    @classmethod
+    def tap_ok(self, success, counter, message):
+        """
+        return a TAP message string.
+
+        @param success: True for positive message string.
+        @param counter: number of TAP line in plan.
+        @param message: additional message to report in TAP line.
+        """
+        if success:
+            message = "ok %s - %s" % (counter, message)
+        else:
+            message = "not ok %s - %s" % (counter, message)
+        return message
+
+
+    def record(self, log_entry, indent, log_files):
+        """
+        Append a job-level status event to self._reports_container. All
+        events will be written to TAP log files at the end of the test run.
+        Otherwise, it's impossilble to determine the TAP plan.
+
+        @param log_entry: A string status code describing the type of status
+                entry being recorded. It must pass log.is_valid_status to be
+                considered valid.
+        @param indent: Level of the log_entry to determine the operation if
+                log_entry.operation is not given.
+        @param log_files: List of full path of files the TAP report will be
+                written to at the end of the test.
+        """
+        for log_file in log_files:
+            log_file_path = os.path.dirname(log_file)
+            key = log_file_path.split(self.resultdir, 1)[1].strip(os.sep)
+            if not key:
+                key = 'root'
+
+            if not self._reports_container.has_key(key):
+                self._reports_container[key] = []
+
+            if log_entry.operation:
+                operation = log_entry.operation
+            elif indent == 1:
+                operation = "job"
+            else:
+                operation = "unknown"
+            entry = self.tap_ok(
+                self.job_statuses.get(log_entry.status_code, False),
+                len(self._reports_container[key]) + 1, operation + "\n"
+            )
+            self._reports_container[key].append(entry)
+
+
+    def record_keyval(self, path, dictionary, type_tag=None):
+        """
+        Append a key-value pairs of dictionary to self._keyval_container in
+        TAP format. Once finished write out the keyval.tap file to the file
+        system.
+
+        If type_tag is None, then the key must be composed of alphanumeric
+        characters (or dashes + underscores). However, if type-tag is not
+        null then the keys must also have "{type_tag}" as a suffix. At
+        the moment the only valid values of type_tag are "attr" and "perf".
+
+        @param path: The full path of the keyval.tap file to be created
+        @param dictionary: The keys and values.
+        @param type_tag: The type of the values
+        """
+        self._keyval_container.setdefault(path, [0, []])
+        self._keyval_container[path][0] += 1
+
+        if type_tag is None:
+            key_regex = re.compile(r'^[-\.\w]+$')
+        else:
+            if type_tag not in ('attr', 'perf'):
+                raise ValueError('Invalid type tag: %s' % type_tag)
+            escaped_tag = re.escape(type_tag)
+            key_regex = re.compile(r'^[-\.\w]+\{%s\}$' % escaped_tag)
+        self._keyval_container[path][1].extend([
+            self.tap_ok(True, self._keyval_container[path][0], "results"),
+            "\n  ---\n",
+        ])
+        try:
+            for key in sorted(dictionary.keys()):
+                if not key_regex.search(key):
+                    raise ValueError('Invalid key: %s' % key)
+                self._keyval_container[path][1].append(
+                    '  %s: %s\n' % (key.replace('{', '_').rstrip('}'),
+                                    dictionary[key])
+                )
+        finally:
+            self._keyval_container[path][1].append("  ...\n")
+        self._write_keyval()
+
+
+    def _write_reports(self):
+        """
+        Write TAP reports to file.
+        """
+        for key in self._reports_container.keys():
+            if key == 'root':
+                sub_dir = ''
+            else:
+                sub_dir = key
+            tap_fh = open(os.sep.join(
+                [self.resultdir, sub_dir, self.global_filename]
+            ) + ".tap", 'w')
+            tap_fh.write('1..' + str(len(self._reports_container[key])) + '\n')
+            tap_fh.writelines(self._reports_container[key])
+            tap_fh.close()
+
+
+    def _write_keyval(self):
+        """
+        Write the self._keyval_container key values to a file.
+        """
+        for path in self._keyval_container.keys():
+            tap_fh = open(path + ".tap", 'w')
+            tap_fh.write('1..' + str(self._keyval_container[path][0]) + '\n')
+            tap_fh.writelines(self._keyval_container[path][1])
+            tap_fh.close()
+
+
+    def write(self):
+        """
+        Write the TAP reports to files.
+        """
+        self._write_reports()
+
+
+    def _write_tap_archive(self):
+        """
+        Write a tar archive containing all the TAP files and
+        a meta.yml containing the file names.
+        """
+        os.chdir(self.resultdir)
+        tap_files = []
+        for rel_path, d, files in os.walk('.'):
+            tap_files.extend(["/".join(
+                [rel_path, f]) for f in files if f.endswith('.tap')])
+        meta_yaml = open('meta.yml', 'w')
+        meta_yaml.write('file_order:\n')
+        tap_tar = tarfile.open(self.resultdir + '/tap.tar.gz', 'w:gz')
+        for f in tap_files:
+            meta_yaml.write("  - " + f.lstrip('./') + "\n")
+            tap_tar.add(f)
+        meta_yaml.close()
+        tap_tar.add('meta.yml')
+        tap_tar.close()
+
+
 class base_job(object):
     """An abstract base class for the various autotest job classes.
 
@@ -799,6 +996,11 @@
         # initialize all the job state
         self._state = self._job_state()
 
+        # initialize tap reporting
+        if dargs.has_key('options'):
+            self._tap = self._tap_init(dargs['options'].tap_report)
+        else:
+            self._tap = self._tap_init(False)
 
     @classmethod
     def _find_base_directories(cls):
@@ -959,6 +1161,11 @@
                               subdir, e)
             raise error.TestError('%s directory creation failed' % subdir)
 
+    def _tap_init(self, enable):
+        """Initialize TAP reporting
+        """
+        return TAPReport(enable, resultdir=self.resultdir)
+
 
     def record(self, status_code, subdir, operation, status='',
                optional_fields=None):
diff --git a/client/common_lib/base_utils.py b/client/common_lib/base_utils.py
new file mode 100644
index 0000000..c8f2f1a
--- /dev/null
+++ b/client/common_lib/base_utils.py
@@ -0,0 +1,1717 @@
+#
+# Copyright 2008 Google Inc. Released under the GPL v2
+
+import os, pickle, random, re, resource, select, shutil, signal, StringIO
+import socket, struct, subprocess, sys, time, textwrap, urlparse
+import warnings, smtplib, logging, urllib2
+from threading import Thread, Event
+try:
+    import hashlib
+except ImportError:
+    import md5, sha
+from autotest_lib.client.common_lib import error, logging_manager
+
+def deprecated(func):
+    """This is a decorator which can be used to mark functions as deprecated.
+    It will result in a warning being emmitted when the function is used."""
+    def new_func(*args, **dargs):
+        warnings.warn("Call to deprecated function %s." % func.__name__,
+                      category=DeprecationWarning)
+        return func(*args, **dargs)
+    new_func.__name__ = func.__name__
+    new_func.__doc__ = func.__doc__
+    new_func.__dict__.update(func.__dict__)
+    return new_func
+
+
+class _NullStream(object):
+    def write(self, data):
+        pass
+
+
+    def flush(self):
+        pass
+
+
+TEE_TO_LOGS = object()
+_the_null_stream = _NullStream()
+
+DEFAULT_STDOUT_LEVEL = logging.DEBUG
+DEFAULT_STDERR_LEVEL = logging.ERROR
+
+# prefixes for logging stdout/stderr of commands
+STDOUT_PREFIX = '[stdout] '
+STDERR_PREFIX = '[stderr] '
+
+
+def get_stream_tee_file(stream, level, prefix=''):
+    if stream is None:
+        return _the_null_stream
+    if stream is TEE_TO_LOGS:
+        return logging_manager.LoggingFile(level=level, prefix=prefix)
+    return stream
+
+
+class BgJob(object):
+    def __init__(self, command, stdout_tee=None, stderr_tee=None, verbose=True,
+                 stdin=None, stderr_level=DEFAULT_STDERR_LEVEL):
+        self.command = command
+        self.stdout_tee = get_stream_tee_file(stdout_tee, DEFAULT_STDOUT_LEVEL,
+                                              prefix=STDOUT_PREFIX)
+        self.stderr_tee = get_stream_tee_file(stderr_tee, stderr_level,
+                                              prefix=STDERR_PREFIX)
+        self.result = CmdResult(command)
+
+        # allow for easy stdin input by string, we'll let subprocess create
+        # a pipe for stdin input and we'll write to it in the wait loop
+        if isinstance(stdin, basestring):
+            self.string_stdin = stdin
+            stdin = subprocess.PIPE
+        else:
+            self.string_stdin = None
+
+        if verbose:
+            logging.debug("Running '%s'" % command)
+        self.sp = subprocess.Popen(command, stdout=subprocess.PIPE,
+                                   stderr=subprocess.PIPE,
+                                   preexec_fn=self._reset_sigpipe, shell=True,
+                                   executable="/bin/bash",
+                                   stdin=stdin)
+
+
+    def output_prepare(self, stdout_file=None, stderr_file=None):
+        self.stdout_file = stdout_file
+        self.stderr_file = stderr_file
+
+
+    def process_output(self, stdout=True, final_read=False):
+        """output_prepare must be called prior to calling this"""
+        if stdout:
+            pipe, buf, tee = self.sp.stdout, self.stdout_file, self.stdout_tee
+        else:
+            pipe, buf, tee = self.sp.stderr, self.stderr_file, self.stderr_tee
+
+        if final_read:
+            # read in all the data we can from pipe and then stop
+            data = []
+            while select.select([pipe], [], [], 0)[0]:
+                data.append(os.read(pipe.fileno(), 1024))
+                if len(data[-1]) == 0:
+                    break
+            data = "".join(data)
+        else:
+            # perform a single read
+            data = os.read(pipe.fileno(), 1024)
+        buf.write(data)
+        tee.write(data)
+
+
+    def cleanup(self):
+        self.stdout_tee.flush()
+        self.stderr_tee.flush()
+        self.sp.stdout.close()
+        self.sp.stderr.close()
+        self.result.stdout = self.stdout_file.getvalue()
+        self.result.stderr = self.stderr_file.getvalue()
+
+
+    def _reset_sigpipe(self):
+        signal.signal(signal.SIGPIPE, signal.SIG_DFL)
+
+
+def ip_to_long(ip):
+    # !L is a long in network byte order
+    return struct.unpack('!L', socket.inet_aton(ip))[0]
+
+
+def long_to_ip(number):
+    # See above comment.
+    return socket.inet_ntoa(struct.pack('!L', number))
+
+
+def create_subnet_mask(bits):
+    return (1 << 32) - (1 << 32-bits)
+
+
+def format_ip_with_mask(ip, mask_bits):
+    masked_ip = ip_to_long(ip) & create_subnet_mask(mask_bits)
+    return "%s/%s" % (long_to_ip(masked_ip), mask_bits)
+
+
+def normalize_hostname(alias):
+    ip = socket.gethostbyname(alias)
+    return socket.gethostbyaddr(ip)[0]
+
+
+def get_ip_local_port_range():
+    match = re.match(r'\s*(\d+)\s*(\d+)\s*$',
+                     read_one_line('/proc/sys/net/ipv4/ip_local_port_range'))
+    return (int(match.group(1)), int(match.group(2)))
+
+
+def set_ip_local_port_range(lower, upper):
+    write_one_line('/proc/sys/net/ipv4/ip_local_port_range',
+                   '%d %d\n' % (lower, upper))
+
+
+
+def send_email(mail_from, mail_to, subject, body):
+    """
+    Sends an email via smtp
+
+    mail_from: string with email address of sender
+    mail_to: string or list with email address(es) of recipients
+    subject: string with subject of email
+    body: (multi-line) string with body of email
+    """
+    if isinstance(mail_to, str):
+        mail_to = [mail_to]
+    msg = "From: %s\nTo: %s\nSubject: %s\n\n%s" % (mail_from, ','.join(mail_to),
+                                                   subject, body)
+    try:
+        mailer = smtplib.SMTP('localhost')
+        try:
+            mailer.sendmail(mail_from, mail_to, msg)
+        finally:
+            mailer.quit()
+    except Exception, e:
+        # Emails are non-critical, not errors, but don't raise them
+        print "Sending email failed. Reason: %s" % repr(e)
+
+
+def read_one_line(filename):
+    return open(filename, 'r').readline().rstrip('\n')
+
+
+def read_file(filename):
+    f = open(filename)
+    try:
+        return f.read()
+    finally:
+        f.close()
+
+
+def get_field(data, param, linestart="", sep=" "):
+    """
+    Parse data from string.
+    @param data: Data to parse.
+        example:
+          data:
+             cpu   324 345 34  5 345
+             cpu0  34  11  34 34  33
+             ^^^^
+             start of line
+             params 0   1   2  3   4
+    @param param: Position of parameter after linestart marker.
+    @param linestart: String to which start line with parameters.
+    @param sep: Separator between parameters regular expression.
+    """
+    search = re.compile(r"(?<=^%s)\s*(.*)" % linestart, re.MULTILINE)
+    find = search.search(data)
+    if find != None:
+        return re.split("%s" % sep, find.group(1))[param]
+    else:
+        print "There is no line which starts with %s in data." % linestart
+        return None
+
+
+def write_one_line(filename, line):
+    open_write_close(filename, line.rstrip('\n') + '\n')
+
+
+def open_write_close(filename, data):
+    f = open(filename, 'w')
+    try:
+        f.write(data)
+    finally:
+        f.close()
+
+
+def matrix_to_string(matrix, header=None):
+    """
+    Return a pretty, aligned string representation of a nxm matrix.
+
+    This representation can be used to print any tabular data, such as
+    database results. It works by scanning the lengths of each element
+    in each column, and determining the format string dynamically.
+
+    @param matrix: Matrix representation (list with n rows of m elements).
+    @param header: Optional tuple or list with header elements to be displayed.
+    """
+    if type(header) is list:
+        header = tuple(header)
+    lengths = []
+    if header:
+        for column in header:
+            lengths.append(len(column))
+    for row in matrix:
+        for i, column in enumerate(row):
+            column = str(column)
+            cl = len(column)
+            try:
+                ml = lengths[i]
+                if cl > ml:
+                    lengths[i] = cl
+            except IndexError:
+                lengths.append(cl)
+
+    lengths = tuple(lengths)
+    format_string = ""
+    for length in lengths:
+        format_string += "%-" + str(length) + "s "
+    format_string += "\n"
+
+    matrix_str = ""
+    if header:
+        matrix_str += format_string % header
+    for row in matrix:
+        matrix_str += format_string % tuple(row)
+
+    return matrix_str
+
+
+def read_keyval(path):
+    """
+    Read a key-value pair format file into a dictionary, and return it.
+    Takes either a filename or directory name as input. If it's a
+    directory name, we assume you want the file to be called keyval.
+    """
+    if os.path.isdir(path):
+        path = os.path.join(path, 'keyval')
+    keyval = {}
+    if os.path.exists(path):
+        for line in open(path):
+            line = re.sub('#.*', '', line).rstrip()
+            if not re.search(r'^[-\.\w]+=', line):
+                raise ValueError('Invalid format line: %s' % line)
+            key, value = line.split('=', 1)
+            if re.search('^\d+$', value):
+                value = int(value)
+            elif re.search('^(\d+\.)?\d+$', value):
+                value = float(value)
+            keyval[key] = value
+    return keyval
+
+
+def write_keyval(path, dictionary, type_tag=None, tap_report=None):
+    """
+    Write a key-value pair format file out to a file. This uses append
+    mode to open the file, so existing text will not be overwritten or
+    reparsed.
+
+    If type_tag is None, then the key must be composed of alphanumeric
+    characters (or dashes+underscores). However, if type-tag is not
+    null then the keys must also have "{type_tag}" as a suffix. At
+    the moment the only valid values of type_tag are "attr" and "perf".
+
+    @param path: full path of the file to be written
+    @param dictionary: the items to write
+    @param type_tag: see text above
+    """
+    if os.path.isdir(path):
+        path = os.path.join(path, 'keyval')
+    keyval = open(path, 'a')
+
+    if type_tag is None:
+        key_regex = re.compile(r'^[-\.\w]+$')
+    else:
+        if type_tag not in ('attr', 'perf'):
+            raise ValueError('Invalid type tag: %s' % type_tag)
+        escaped_tag = re.escape(type_tag)
+        key_regex = re.compile(r'^[-\.\w]+\{%s\}$' % escaped_tag)
+    try:
+        for key in sorted(dictionary.keys()):
+            if not key_regex.search(key):
+                raise ValueError('Invalid key: %s' % key)
+            keyval.write('%s=%s\n' % (key, dictionary[key]))
+    finally:
+        keyval.close()
+
+    # same for tap
+    if tap_report is not None and tap_report.do_tap_report:
+        tap_report.record_keyval(path, dictionary, type_tag=type_tag)
+
+class FileFieldMonitor(object):
+    """
+    Monitors the information from the file and reports it's values.
+
+    It gather the information at start and stop of the measurement or
+    continuously during the measurement.
+    """
+    class Monitor(Thread):
+        """
+        Internal monitor class to ensure continuous monitor of monitored file.
+        """
+        def __init__(self, master):
+            """
+            @param master: Master class which control Monitor
+            """
+            Thread.__init__(self)
+            self.master = master
+
+        def run(self):
+            """
+            Start monitor in thread mode
+            """
+            while not self.master.end_event.isSet():
+                self.master._get_value(self.master.logging)
+                time.sleep(self.master.time_step)
+
+
+    def __init__(self, status_file, data_to_read, mode_diff, continuously=False,
+                 contlogging=False, separator=" +", time_step=0.1):
+        """
+        Initialize variables.
+        @param status_file: File contain status.
+        @param mode_diff: If True make a difference of value, else average.
+        @param data_to_read: List of tuples with data position.
+            format: [(start_of_line,position in params)]
+            example:
+              data:
+                 cpu   324 345 34  5 345
+                 cpu0  34  11  34 34  33
+                 ^^^^
+                 start of line
+                 params 0   1   2  3   4
+        @param mode_diff: True to subtract old value from new value,
+            False make average of the values.
+        @parma continuously: Start the monitoring thread using the time_step
+            as the measurement period.
+        @param contlogging: Log data in continuous run.
+        @param separator: Regular expression of separator.
+        @param time_step: Time period of the monitoring value.
+        """
+        self.end_event = Event()
+        self.start_time = 0
+        self.end_time = 0
+        self.test_time = 0
+
+        self.status_file = status_file
+        self.separator = separator
+        self.data_to_read = data_to_read
+        self.num_of_params = len(self.data_to_read)
+        self.mode_diff = mode_diff
+        self.continuously = continuously
+        self.time_step = time_step
+
+        self.value = [0 for i in range(self.num_of_params)]
+        self.old_value = [0 for i in range(self.num_of_params)]
+        self.log = []
+        self.logging = contlogging
+
+        self.started = False
+        self.num_of_get_value = 0
+        self.monitor = None
+
+
+    def _get_value(self, logging=True):
+        """
+        Return current values.
+        @param logging: If true log value in memory. There can be problem
+          with long run.
+        """
+        data = read_file(self.status_file)
+        value = []
+        for i in range(self.num_of_params):
+            value.append(int(get_field(data,
+                             self.data_to_read[i][1],
+                             self.data_to_read[i][0],
+                             self.separator)))
+
+        if logging:
+            self.log.append(value)
+        if not self.mode_diff:
+            value = map(lambda x, y: x + y, value, self.old_value)
+
+        self.old_value = value
+        self.num_of_get_value += 1
+        return value
+
+
+    def start(self):
+        """
+        Start value monitor.
+        """
+        if self.started:
+            self.stop()
+        self.old_value = [0 for i in range(self.num_of_params)]
+        self.num_of_get_value = 0
+        self.log = []
+        self.end_event.clear()
+        self.start_time = time.time()
+        self._get_value()
+        self.started = True
+        if (self.continuously):
+            self.monitor = FileFieldMonitor.Monitor(self)
+            self.monitor.start()
+
+
+    def stop(self):
+        """
+        Stop value monitor.
+        """
+        if self.started:
+            self.started = False
+            self.end_time = time.time()
+            self.test_time = self.end_time - self.start_time
+            self.value = self._get_value()
+            if (self.continuously):
+                self.end_event.set()
+                self.monitor.join()
+            if (self.mode_diff):
+                self.value = map(lambda x, y: x - y, self.log[-1], self.log[0])
+            else:
+                self.value = map(lambda x: x / self.num_of_get_value,
+                                 self.value)
+
+
+    def get_status(self):
+        """
+        @return: Status of monitored process average value,
+            time of test and array of monitored values and time step of
+            continuous run.
+        """
+        if self.started:
+            self.stop()
+        if self.mode_diff:
+            for i in range(len(self.log) - 1):
+                self.log[i] = (map(lambda x, y: x - y,
+                                   self.log[i + 1], self.log[i]))
+            self.log.pop()
+        return (self.value, self.test_time, self.log, self.time_step)
+
+
+def is_url(path):
+    """Return true if path looks like a URL"""
+    # for now, just handle http and ftp
+    url_parts = urlparse.urlparse(path)
+    return (url_parts[0] in ('http', 'ftp'))
+
+
+def urlopen(url, data=None, timeout=5):
+    """Wrapper to urllib2.urlopen with timeout addition."""
+
+    # Save old timeout
+    old_timeout = socket.getdefaulttimeout()
+    socket.setdefaulttimeout(timeout)
+    try:
+        return urllib2.urlopen(url, data=data)
+    finally:
+        socket.setdefaulttimeout(old_timeout)
+
+
+def urlretrieve(url, filename, data=None, timeout=300):
+    """Retrieve a file from given url."""
+    logging.debug('Fetching %s -> %s', url, filename)
+
+    src_file = urlopen(url, data=data, timeout=timeout)
+    try:
+        dest_file = open(filename, 'wb')
+        try:
+            shutil.copyfileobj(src_file, dest_file)
+        finally:
+            dest_file.close()
+    finally:
+        src_file.close()
+
+
+def hash(type, input=None):
+    """
+    Returns an hash object of type md5 or sha1. This function is implemented in
+    order to encapsulate hash objects in a way that is compatible with python
+    2.4 and python 2.6 without warnings.
+
+    Note that even though python 2.6 hashlib supports hash types other than
+    md5 and sha1, we are artificially limiting the input values in order to
+    make the function to behave exactly the same among both python
+    implementations.
+
+    @param input: Optional input string that will be used to update the hash.
+    """
+    if type not in ['md5', 'sha1']:
+        raise ValueError("Unsupported hash type: %s" % type)
+
+    try:
+        hash = hashlib.new(type)
+    except NameError:
+        if type == 'md5':
+            hash = md5.new()
+        elif type == 'sha1':
+            hash = sha.new()
+
+    if input:
+        hash.update(input)
+
+    return hash
+
+
+def get_file(src, dest, permissions=None):
+    """Get a file from src, which can be local or a remote URL"""
+    if src == dest:
+        return
+
+    if is_url(src):
+        urlretrieve(src, dest)
+    else:
+        shutil.copyfile(src, dest)
+
+    if permissions:
+        os.chmod(dest, permissions)
+    return dest
+
+
+def unmap_url(srcdir, src, destdir='.'):
+    """
+    Receives either a path to a local file or a URL.
+    returns either the path to the local file, or the fetched URL
+
+    unmap_url('/usr/src', 'foo.tar', '/tmp')
+                            = '/usr/src/foo.tar'
+    unmap_url('/usr/src', 'http://site/file', '/tmp')
+                            = '/tmp/file'
+                            (after retrieving it)
+    """
+    if is_url(src):
+        url_parts = urlparse.urlparse(src)
+        filename = os.path.basename(url_parts[2])
+        dest = os.path.join(destdir, filename)
+        return get_file(src, dest)
+    else:
+        return os.path.join(srcdir, src)
+
+
+def update_version(srcdir, preserve_srcdir, new_version, install,
+                   *args, **dargs):
+    """
+    Make sure srcdir is version new_version
+
+    If not, delete it and install() the new version.
+
+    In the preserve_srcdir case, we just check it's up to date,
+    and if not, we rerun install, without removing srcdir
+    """
+    versionfile = os.path.join(srcdir, '.version')
+    install_needed = True
+
+    if os.path.exists(versionfile):
+        old_version = pickle.load(open(versionfile))
+        if old_version == new_version:
+            install_needed = False
+
+    if install_needed:
+        if not preserve_srcdir and os.path.exists(srcdir):
+            shutil.rmtree(srcdir)
+        install(*args, **dargs)
+        if os.path.exists(srcdir):
+            pickle.dump(new_version, open(versionfile, 'w'))
+
+
+def get_stderr_level(stderr_is_expected):
+    if stderr_is_expected:
+        return DEFAULT_STDOUT_LEVEL
+    return DEFAULT_STDERR_LEVEL
+
+
+def run(command, timeout=None, ignore_status=False,
+        stdout_tee=None, stderr_tee=None, verbose=True, stdin=None,
+        stderr_is_expected=None, args=()):
+    """
+    Run a command on the host.
+
+    @param command: the command line string.
+    @param timeout: time limit in seconds before attempting to kill the
+            running process. The run() function will take a few seconds
+            longer than 'timeout' to complete if it has to kill the process.
+    @param ignore_status: do not raise an exception, no matter what the exit
+            code of the command is.
+    @param stdout_tee: optional file-like object to which stdout data
+            will be written as it is generated (data will still be stored
+            in result.stdout).
+    @param stderr_tee: likewise for stderr.
+    @param verbose: if True, log the command being run.
+    @param stdin: stdin to pass to the executed process (can be a file
+            descriptor, a file object of a real file or a string).
+    @param args: sequence of strings of arguments to be given to the command
+            inside " quotes after they have been escaped for that; each
+            element in the sequence will be given as a separate command
+            argument
+
+    @return a CmdResult object
+
+    @raise CmdError: the exit code of the command execution was not 0
+    """
+    if isinstance(args, basestring):
+        raise TypeError('Got a string for the "args" keyword argument, '
+                        'need a sequence.')
+
+    for arg in args:
+        command += ' "%s"' % sh_escape(arg)
+    if stderr_is_expected is None:
+        stderr_is_expected = ignore_status
+
+    bg_job = join_bg_jobs(
+        (BgJob(command, stdout_tee, stderr_tee, verbose, stdin=stdin,
+               stderr_level=get_stderr_level(stderr_is_expected)),),
+        timeout)[0]
+    if not ignore_status and bg_job.result.exit_status:
+        raise error.CmdError(command, bg_job.result,
+                             "Command returned non-zero exit status")
+
+    return bg_job.result
+
+
+def run_parallel(commands, timeout=None, ignore_status=False,
+                 stdout_tee=None, stderr_tee=None):
+    """
+    Behaves the same as run() with the following exceptions:
+
+    - commands is a list of commands to run in parallel.
+    - ignore_status toggles whether or not an exception should be raised
+      on any error.
+
+    @return: a list of CmdResult objects
+    """
+    bg_jobs = []
+    for command in commands:
+        bg_jobs.append(BgJob(command, stdout_tee, stderr_tee,
+                             stderr_level=get_stderr_level(ignore_status)))
+
+    # Updates objects in bg_jobs list with their process information
+    join_bg_jobs(bg_jobs, timeout)
+
+    for bg_job in bg_jobs:
+        if not ignore_status and bg_job.result.exit_status:
+            raise error.CmdError(command, bg_job.result,
+                                 "Command returned non-zero exit status")
+
+    return [bg_job.result for bg_job in bg_jobs]
+
+
+@deprecated
+def run_bg(command):
+    """Function deprecated. Please use BgJob class instead."""
+    bg_job = BgJob(command)
+    return bg_job.sp, bg_job.result
+
+
+def join_bg_jobs(bg_jobs, timeout=None):
+    """Joins the bg_jobs with the current thread.
+
+    Returns the same list of bg_jobs objects that was passed in.
+    """
+    ret, timeout_error = 0, False
+    for bg_job in bg_jobs:
+        bg_job.output_prepare(StringIO.StringIO(), StringIO.StringIO())
+
+    try:
+        # We are holding ends to stdin, stdout pipes
+        # hence we need to be sure to close those fds no mater what
+        start_time = time.time()
+        timeout_error = _wait_for_commands(bg_jobs, start_time, timeout)
+
+        for bg_job in bg_jobs:
+            # Process stdout and stderr
+            bg_job.process_output(stdout=True,final_read=True)
+            bg_job.process_output(stdout=False,final_read=True)
+    finally:
+        # close our ends of the pipes to the sp no matter what
+        for bg_job in bg_jobs:
+            bg_job.cleanup()
+
+    if timeout_error:
+        # TODO: This needs to be fixed to better represent what happens when
+        # running in parallel. However this is backwards compatable, so it will
+        # do for the time being.
+        raise error.CmdError(bg_jobs[0].command, bg_jobs[0].result,
+                             "Command(s) did not complete within %d seconds"
+                             % timeout)
+
+
+    return bg_jobs
+
+
+def _wait_for_commands(bg_jobs, start_time, timeout):
+    # This returns True if it must return due to a timeout, otherwise False.
+
+    # To check for processes which terminate without producing any output
+    # a 1 second timeout is used in select.
+    SELECT_TIMEOUT = 1
+
+    read_list = []
+    write_list = []
+    reverse_dict = {}
+
+    for bg_job in bg_jobs:
+        read_list.append(bg_job.sp.stdout)
+        read_list.append(bg_job.sp.stderr)
+        reverse_dict[bg_job.sp.stdout] = (bg_job, True)
+        reverse_dict[bg_job.sp.stderr] = (bg_job, False)
+        if bg_job.string_stdin is not None:
+            write_list.append(bg_job.sp.stdin)
+            reverse_dict[bg_job.sp.stdin] = bg_job
+
+    if timeout:
+        stop_time = start_time + timeout
+        time_left = stop_time - time.time()
+    else:
+        time_left = None # so that select never times out
+
+    while not timeout or time_left > 0:
+        # select will return when we may write to stdin or when there is
+        # stdout/stderr output we can read (including when it is
+        # EOF, that is the process has terminated).
+        read_ready, write_ready, _ = select.select(read_list, write_list, [],
+                                                   SELECT_TIMEOUT)
+
+        # os.read() has to be used instead of
+        # subproc.stdout.read() which will otherwise block
+        for file_obj in read_ready:
+            bg_job, is_stdout = reverse_dict[file_obj]
+            bg_job.process_output(is_stdout)
+
+        for file_obj in write_ready:
+            # we can write PIPE_BUF bytes without blocking
+            # POSIX requires PIPE_BUF is >= 512
+            bg_job = reverse_dict[file_obj]
+            file_obj.write(bg_job.string_stdin[:512])
+            bg_job.string_stdin = bg_job.string_stdin[512:]
+            # no more input data, close stdin, remove it from the select set
+            if not bg_job.string_stdin:
+                file_obj.close()
+                write_list.remove(file_obj)
+                del reverse_dict[file_obj]
+
+        all_jobs_finished = True
+        for bg_job in bg_jobs:
+            if bg_job.result.exit_status is not None:
+                continue
+
+            bg_job.result.exit_status = bg_job.sp.poll()
+            if bg_job.result.exit_status is not None:
+                # process exited, remove its stdout/stdin from the select set
+                bg_job.result.duration = time.time() - start_time
+                read_list.remove(bg_job.sp.stdout)
+                read_list.remove(bg_job.sp.stderr)
+                del reverse_dict[bg_job.sp.stdout]
+                del reverse_dict[bg_job.sp.stderr]
+            else:
+                all_jobs_finished = False
+
+        if all_jobs_finished:
+            return False
+
+        if timeout:
+            time_left = stop_time - time.time()
+
+    # Kill all processes which did not complete prior to timeout
+    for bg_job in bg_jobs:
+        if bg_job.result.exit_status is not None:
+            continue
+
+        logging.warn('run process timeout (%s) fired on: %s', timeout,
+                     bg_job.command)
+        nuke_subprocess(bg_job.sp)
+        bg_job.result.exit_status = bg_job.sp.poll()
+        bg_job.result.duration = time.time() - start_time
+
+    return True
+
+
+def pid_is_alive(pid):
+    """
+    True if process pid exists and is not yet stuck in Zombie state.
+    Zombies are impossible to move between cgroups, etc.
+    pid can be integer, or text of integer.
+    """
+    path = '/proc/%s/stat' % pid
+
+    try:
+        stat = read_one_line(path)
+    except IOError:
+        if not os.path.exists(path):
+            # file went away
+            return False
+        raise
+
+    return stat.split()[2] != 'Z'
+
+
+def signal_pid(pid, sig):
+    """
+    Sends a signal to a process id. Returns True if the process terminated
+    successfully, False otherwise.
+    """
+    try:
+        os.kill(pid, sig)
+    except OSError:
+        # The process may have died before we could kill it.
+        pass
+
+    for i in range(5):
+        if not pid_is_alive(pid):
+            return True
+        time.sleep(1)
+
+    # The process is still alive
+    return False
+
+
+def nuke_subprocess(subproc):
+    # check if the subprocess is still alive, first
+    if subproc.poll() is not None:
+        return subproc.poll()
+
+    # the process has not terminated within timeout,
+    # kill it via an escalating series of signals.
+    signal_queue = [signal.SIGTERM, signal.SIGKILL]
+    for sig in signal_queue:
+        signal_pid(subproc.pid, sig)
+        if subproc.poll() is not None:
+            return subproc.poll()
+
+
+def nuke_pid(pid, signal_queue=(signal.SIGTERM, signal.SIGKILL)):
+    # the process has not terminated within timeout,
+    # kill it via an escalating series of signals.
+    for sig in signal_queue:
+        if signal_pid(pid, sig):
+            return
+
+    # no signal successfully terminated the process
+    raise error.AutoservRunError('Could not kill %d' % pid, None)
+
+
+def system(command, timeout=None, ignore_status=False):
+    """
+    Run a command
+
+    @param timeout: timeout in seconds
+    @param ignore_status: if ignore_status=False, throw an exception if the
+            command's exit code is non-zero
+            if ignore_stauts=True, return the exit code.
+
+    @return exit status of command
+            (note, this will always be zero unless ignore_status=True)
+    """
+    return run(command, timeout=timeout, ignore_status=ignore_status,
+               stdout_tee=TEE_TO_LOGS, stderr_tee=TEE_TO_LOGS).exit_status
+
+
+def system_parallel(commands, timeout=None, ignore_status=False):
+    """This function returns a list of exit statuses for the respective
+    list of commands."""
+    return [bg_jobs.exit_status for bg_jobs in
+            run_parallel(commands, timeout=timeout, ignore_status=ignore_status,
+                         stdout_tee=TEE_TO_LOGS, stderr_tee=TEE_TO_LOGS)]
+
+
+def system_output(command, timeout=None, ignore_status=False,
+                  retain_output=False, args=()):
+    """
+    Run a command and return the stdout output.
+
+    @param command: command string to execute.
+    @param timeout: time limit in seconds before attempting to kill the
+            running process. The function will take a few seconds longer
+            than 'timeout' to complete if it has to kill the process.
+    @param ignore_status: do not raise an exception, no matter what the exit
+            code of the command is.
+    @param retain_output: set to True to make stdout/stderr of the command
+            output to be also sent to the logging system
+    @param args: sequence of strings of arguments to be given to the command
+            inside " quotes after they have been escaped for that; each
+            element in the sequence will be given as a separate command
+            argument
+
+    @return a string with the stdout output of the command.
+    """
+    if retain_output:
+        out = run(command, timeout=timeout, ignore_status=ignore_status,
+                  stdout_tee=TEE_TO_LOGS, stderr_tee=TEE_TO_LOGS,
+                  args=args).stdout
+    else:
+        out = run(command, timeout=timeout, ignore_status=ignore_status,
+                  args=args).stdout
+    if out[-1:] == '\n':
+        out = out[:-1]
+    return out
+
+
+def system_output_parallel(commands, timeout=None, ignore_status=False,
+                           retain_output=False):
+    if retain_output:
+        out = [bg_job.stdout for bg_job
+               in run_parallel(commands, timeout=timeout,
+                               ignore_status=ignore_status,
+                               stdout_tee=TEE_TO_LOGS, stderr_tee=TEE_TO_LOGS)]
+    else:
+        out = [bg_job.stdout for bg_job in run_parallel(commands,
+                                  timeout=timeout, ignore_status=ignore_status)]
+    for x in out:
+        if out[-1:] == '\n': out = out[:-1]
+    return out
+
+
+def strip_unicode(input):
+    if type(input) == list:
+        return [strip_unicode(i) for i in input]
+    elif type(input) == dict:
+        output = {}
+        for key in input.keys():
+            output[str(key)] = strip_unicode(input[key])
+        return output
+    elif type(input) == unicode:
+        return str(input)
+    else:
+        return input
+
+
+def get_cpu_percentage(function, *args, **dargs):
+    """Returns a tuple containing the CPU% and return value from function call.
+
+    This function calculates the usage time by taking the difference of
+    the user and system times both before and after the function call.
+    """
+    child_pre = resource.getrusage(resource.RUSAGE_CHILDREN)
+    self_pre = resource.getrusage(resource.RUSAGE_SELF)
+    start = time.time()
+    to_return = function(*args, **dargs)
+    elapsed = time.time() - start
+    self_post = resource.getrusage(resource.RUSAGE_SELF)
+    child_post = resource.getrusage(resource.RUSAGE_CHILDREN)
+
+    # Calculate CPU Percentage
+    s_user, s_system = [a - b for a, b in zip(self_post, self_pre)[:2]]
+    c_user, c_system = [a - b for a, b in zip(child_post, child_pre)[:2]]
+    cpu_percent = (s_user + c_user + s_system + c_system) / elapsed
+
+    return cpu_percent, to_return
+
+
+class SystemLoad(object):
+    """
+    Get system and/or process values and return average value of load.
+    """
+    def __init__(self, pids, advanced=False, time_step=0.1, cpu_cont=False,
+                 use_log=False):
+        """
+        @param pids: List of pids to be monitored. If pid = 0 whole system will
+          be monitored. pid == 0 means whole system.
+        @param advanced: monitor add value for system irq count and softirq
+          for process minor and maior page fault
+        @param time_step: Time step for continuous monitoring.
+        @param cpu_cont: If True monitor CPU load continuously.
+        @param use_log: If true every monitoring is logged for dump.
+        """
+        self.pids = []
+        self.stats = {}
+        for pid in pids:
+            if pid == 0:
+                cpu = FileFieldMonitor("/proc/stat",
+                                       [("cpu", 0), # User Time
+                                        ("cpu", 2), # System Time
+                                        ("intr", 0), # IRQ Count
+                                        ("softirq", 0)], # Soft IRQ Count
+                                       True,
+                                       cpu_cont,
+                                       use_log,
+                                       " +",
+                                       time_step)
+                mem = FileFieldMonitor("/proc/meminfo",
+                                       [("MemTotal:", 0), # Mem Total
+                                        ("MemFree:", 0), # Mem Free
+                                        ("Buffers:", 0), # Buffers
+                                        ("Cached:", 0)], # Cached
+                                       False,
+                                       True,
+                                       use_log,
+                                       " +",
+                                       time_step)
+                self.stats[pid] = ["TOTAL", cpu, mem]
+                self.pids.append(pid)
+            else:
+                name = ""
+                if (type(pid) is int):
+                    self.pids.append(pid)
+                    name = get_process_name(pid)
+                else:
+                    self.pids.append(pid[0])
+                    name = pid[1]
+
+                cpu = FileFieldMonitor("/proc/%d/stat" %
+                                       self.pids[-1],
+                                       [("", 13), # User Time
+                                        ("", 14), # System Time
+                                        ("", 9), # Minority Page Fault
+                                        ("", 11)], # Majority Page Fault
+                                       True,
+                                       cpu_cont,
+                                       use_log,
+                                       " +",
+                                       time_step)
+                mem = FileFieldMonitor("/proc/%d/status" %
+                                       self.pids[-1],
+                                       [("VmSize:", 0), # Virtual Memory Size
+                                        ("VmRSS:", 0), # Resident Set Size
+                                        ("VmPeak:", 0), # Peak VM Size
+                                        ("VmSwap:", 0)], # VM in Swap
+                                       False,
+                                       True,
+                                       use_log,
+                                       " +",
+                                       time_step)
+                self.stats[self.pids[-1]] = [name, cpu, mem]
+
+        self.advanced = advanced
+
+
+    def __str__(self):
+        """
+        Define format how to print
+        """
+        out = ""
+        for pid in self.pids:
+            for stat in self.stats[pid][1:]:
+                out += str(stat.get_status()) + "\n"
+        return out
+
+
+    def start(self, pids=[]):
+        """
+        Start monitoring of the process system usage.
+        @param pids: List of PIDs you intend to control. Use pids=[] to control
+            all defined PIDs.
+        """
+        if pids == []:
+            pids = self.pids
+
+        for pid in pids:
+            for stat in self.stats[pid][1:]:
+                stat.start()
+
+
+    def stop(self, pids=[]):
+        """
+        Stop monitoring of the process system usage.
+        @param pids: List of PIDs you intend to control. Use pids=[] to control
+            all defined PIDs.
+        """
+        if pids == []:
+            pids = self.pids
+
+        for pid in pids:
+            for stat in self.stats[pid][1:]:
+                stat.stop()
+
+
+    def dump(self, pids=[]):
+        """
+        Get the status of monitoring.
+        @param pids: List of PIDs you intend to control. Use pids=[] to control
+            all defined PIDs.
+         @return:
+            tuple([cpu load], [memory load]):
+                ([(PID1, (PID1_cpu_meas)), (PID2, (PID2_cpu_meas)), ...],
+                 [(PID1, (PID1_mem_meas)), (PID2, (PID2_mem_meas)), ...])
+
+            PID1_cpu_meas:
+                average_values[], test_time, cont_meas_values[[]], time_step
+            PID1_mem_meas:
+                average_values[], test_time, cont_meas_values[[]], time_step
+            where average_values[] are the measured values (mem_free,swap,...)
+            which are described in SystemLoad.__init__()-FileFieldMonitor.
+            cont_meas_values[[]] is a list of average_values in the sampling
+            times.
+        """
+        if pids == []:
+            pids = self.pids
+
+        cpus = []
+        memory = []
+        for pid in pids:
+            stat = (pid, self.stats[pid][1].get_status())
+            cpus.append(stat)
+        for pid in pids:
+            stat = (pid, self.stats[pid][2].get_status())
+            memory.append(stat)
+
+        return (cpus, memory)
+
+
+    def get_cpu_status_string(self, pids=[]):
+        """
+        Convert status to string array.
+        @param pids: List of PIDs you intend to control. Use pids=[] to control
+            all defined PIDs.
+        @return: String format to table.
+        """
+        if pids == []:
+            pids = self.pids
+
+        headers = ["NAME",
+                   ("%7s") % "PID",
+                   ("%5s") % "USER",
+                   ("%5s") % "SYS",
+                   ("%5s") % "SUM"]
+        if self.advanced:
+            headers.extend(["MINFLT/IRQC",
+                            "MAJFLT/SOFTIRQ"])
+        headers.append(("%11s") % "TIME")
+        textstatus = []
+        for pid in pids:
+            stat = self.stats[pid][1].get_status()
+            time = stat[1]
+            stat = stat[0]
+            textstatus.append(["%s" % self.stats[pid][0],
+                               "%7s" % pid,
+                               "%4.0f%%" % (stat[0] / time),
+                               "%4.0f%%" % (stat[1] / time),
+                               "%4.0f%%" % ((stat[0] + stat[1]) / time),
+                               "%10.3fs" % time])
+            if self.advanced:
+                textstatus[-1].insert(-1, "%11d" % stat[2])
+                textstatus[-1].insert(-1, "%14d" % stat[3])
+
+        return matrix_to_string(textstatus, tuple(headers))
+
+
+    def get_mem_status_string(self, pids=[]):
+        """
+        Convert status to string array.
+        @param pids: List of PIDs you intend to control. Use pids=[] to control
+            all defined PIDs.
+        @return: String format to table.
+        """
+        if pids == []:
+            pids = self.pids
+
+        headers = ["NAME",
+                   ("%7s") % "PID",
+                   ("%8s") % "TOTAL/VMSIZE",
+                   ("%8s") % "FREE/VMRSS",
+                   ("%8s") % "BUFFERS/VMPEAK",
+                   ("%8s") % "CACHED/VMSWAP",
+                   ("%11s") % "TIME"]
+        textstatus = []
+        for pid in pids:
+            stat = self.stats[pid][2].get_status()
+            time = stat[1]
+            stat = stat[0]
+            textstatus.append(["%s" % self.stats[pid][0],
+                               "%7s" % pid,
+                               "%10dMB" % (stat[0] / 1024),
+                               "%8dMB" % (stat[1] / 1024),
+                               "%12dMB" % (stat[2] / 1024),
+                               "%11dMB" % (stat[3] / 1024),
+                               "%10.3fs" % time])
+
+        return matrix_to_string(textstatus, tuple(headers))
+
+
+def get_arch(run_function=run):
+    """
+    Get the hardware architecture of the machine.
+    run_function is used to execute the commands. It defaults to
+    utils.run() but a custom method (if provided) should be of the
+    same schema as utils.run. It should return a CmdResult object and
+    throw a CmdError exception.
+    """
+    arch = run_function('/bin/uname -m').stdout.rstrip()
+    if re.match(r'i\d86$', arch):
+        arch = 'i386'
+    return arch
+
+
+def get_num_logical_cpus_per_socket(run_function=run):
+    """
+    Get the number of cores (including hyperthreading) per cpu.
+    run_function is used to execute the commands. It defaults to
+    utils.run() but a custom method (if provided) should be of the
+    same schema as utils.run. It should return a CmdResult object and
+    throw a CmdError exception.
+    """
+    siblings = run_function('grep "^siblings" /proc/cpuinfo').stdout.rstrip()
+    num_siblings = map(int,
+                       re.findall(r'^siblings\s*:\s*(\d+)\s*$',
+                                  siblings, re.M))
+    if len(num_siblings) == 0:
+        raise error.TestError('Unable to find siblings info in /proc/cpuinfo')
+    if min(num_siblings) != max(num_siblings):
+        raise error.TestError('Number of siblings differ %r' %
+                              num_siblings)
+    return num_siblings[0]
+
+
+def merge_trees(src, dest):
+    """
+    Merges a source directory tree at 'src' into a destination tree at
+    'dest'. If a path is a file in both trees than the file in the source
+    tree is APPENDED to the one in the destination tree. If a path is
+    a directory in both trees then the directories are recursively merged
+    with this function. In any other case, the function will skip the
+    paths that cannot be merged (instead of failing).
+    """
+    if not os.path.exists(src):
+        return # exists only in dest
+    elif not os.path.exists(dest):
+        if os.path.isfile(src):
+            shutil.copy2(src, dest) # file only in src
+        else:
+            shutil.copytree(src, dest, symlinks=True) # dir only in src
+        return
+    elif os.path.isfile(src) and os.path.isfile(dest):
+        # src & dest are files in both trees, append src to dest
+        destfile = open(dest, "a")
+        try:
+            srcfile = open(src)
+            try:
+                destfile.write(srcfile.read())
+            finally:
+                srcfile.close()
+        finally:
+            destfile.close()
+    elif os.path.isdir(src) and os.path.isdir(dest):
+        # src & dest are directories in both trees, so recursively merge
+        for name in os.listdir(src):
+            merge_trees(os.path.join(src, name), os.path.join(dest, name))
+    else:
+        # src & dest both exist, but are incompatible
+        return
+
+
+class CmdResult(object):
+    """
+    Command execution result.
+
+    command:     String containing the command line itself
+    exit_status: Integer exit code of the process
+    stdout:      String containing stdout of the process
+    stderr:      String containing stderr of the process
+    duration:    Elapsed wall clock time running the process
+    """
+
+
+    def __init__(self, command="", stdout="", stderr="",
+                 exit_status=None, duration=0):
+        self.command = command
+        self.exit_status = exit_status
+        self.stdout = stdout
+        self.stderr = stderr
+        self.duration = duration
+
+
+    def __repr__(self):
+        wrapper = textwrap.TextWrapper(width = 78,
+                                       initial_indent="\n    ",
+                                       subsequent_indent="    ")
+
+        stdout = self.stdout.rstrip()
+        if stdout:
+            stdout = "\nstdout:\n%s" % stdout
+
+        stderr = self.stderr.rstrip()
+        if stderr:
+            stderr = "\nstderr:\n%s" % stderr
+
+        return ("* Command: %s\n"
+                "Exit status: %s\n"
+                "Duration: %s\n"
+                "%s"
+                "%s"
+                % (wrapper.fill(self.command), self.exit_status,
+                self.duration, stdout, stderr))
+
+
+class run_randomly:
+    def __init__(self, run_sequentially=False):
+        # Run sequentially is for debugging control files
+        self.test_list = []
+        self.run_sequentially = run_sequentially
+
+
+    def add(self, *args, **dargs):
+        test = (args, dargs)
+        self.test_list.append(test)
+
+
+    def run(self, fn):
+        while self.test_list:
+            test_index = random.randint(0, len(self.test_list)-1)
+            if self.run_sequentially:
+                test_index = 0
+            (args, dargs) = self.test_list.pop(test_index)
+            fn(*args, **dargs)
+
+
+def import_site_module(path, module, dummy=None, modulefile=None):
+    """
+    Try to import the site specific module if it exists.
+
+    @param path full filename of the source file calling this (ie __file__)
+    @param module full module name
+    @param dummy dummy value to return in case there is no symbol to import
+    @param modulefile module filename
+
+    @return site specific module or dummy
+
+    @raises ImportError if the site file exists but imports fails
+    """
+    short_module = module[module.rfind(".") + 1:]
+
+    if not modulefile:
+        modulefile = short_module + ".py"
+
+    if os.path.exists(os.path.join(os.path.dirname(path), modulefile)):
+        return __import__(module, {}, {}, [short_module])
+    return dummy
+
+
+def import_site_symbol(path, module, name, dummy=None, modulefile=None):
+    """
+    Try to import site specific symbol from site specific file if it exists
+
+    @param path full filename of the source file calling this (ie __file__)
+    @param module full module name
+    @param name symbol name to be imported from the site file
+    @param dummy dummy value to return in case there is no symbol to import
+    @param modulefile module filename
+
+    @return site specific symbol or dummy
+
+    @raises ImportError if the site file exists but imports fails
+    """
+    module = import_site_module(path, module, modulefile=modulefile)
+    if not module:
+        return dummy
+
+    # special unique value to tell us if the symbol can't be imported
+    cant_import = object()
+
+    obj = getattr(module, name, cant_import)
+    if obj is cant_import:
+        logging.debug("unable to import site symbol '%s', using non-site "
+                      "implementation", name)
+        return dummy
+
+    return obj
+
+
+def import_site_class(path, module, classname, baseclass, modulefile=None):
+    """
+    Try to import site specific class from site specific file if it exists
+
+    Args:
+        path: full filename of the source file calling this (ie __file__)
+        module: full module name
+        classname: class name to be loaded from site file
+        baseclass: base class object to return when no site file present or
+            to mixin when site class exists but is not inherited from baseclass
+        modulefile: module filename
+
+    Returns: baseclass if site specific class does not exist, the site specific
+        class if it exists and is inherited from baseclass or a mixin of the
+        site specific class and baseclass when the site specific class exists
+        and is not inherited from baseclass
+
+    Raises: ImportError if the site file exists but imports fails
+    """
+
+    res = import_site_symbol(path, module, classname, None, modulefile)
+    if res:
+        if not issubclass(res, baseclass):
+            # if not a subclass of baseclass then mix in baseclass with the
+            # site specific class object and return the result
+            res = type(classname, (res, baseclass), {})
+    else:
+        res = baseclass
+
+    return res
+
+
+def import_site_function(path, module, funcname, dummy, modulefile=None):
+    """
+    Try to import site specific function from site specific file if it exists
+
+    Args:
+        path: full filename of the source file calling this (ie __file__)
+        module: full module name
+        funcname: function name to be imported from site file
+        dummy: dummy function to return in case there is no function to import
+        modulefile: module filename
+
+    Returns: site specific function object or dummy
+
+    Raises: ImportError if the site file exists but imports fails
+    """
+
+    return import_site_symbol(path, module, funcname, dummy, modulefile)
+
+
+def _get_pid_path(program_name):
+    my_path = os.path.dirname(__file__)
+    return os.path.abspath(os.path.join(my_path, "..", "..",
+                                        "%s.pid" % program_name))
+
+
+def write_pid(program_name):
+    """
+    Try to drop <program_name>.pid in the main autotest directory.
+
+    Args:
+      program_name: prefix for file name
+    """
+    pidfile = open(_get_pid_path(program_name), "w")
+    try:
+        pidfile.write("%s\n" % os.getpid())
+    finally:
+        pidfile.close()
+
+
+def delete_pid_file_if_exists(program_name):
+    """
+    Tries to remove <program_name>.pid from the main autotest directory.
+    """
+    pidfile_path = _get_pid_path(program_name)
+
+    try:
+        os.remove(pidfile_path)
+    except OSError:
+        if not os.path.exists(pidfile_path):
+            return
+        raise
+
+
+def get_pid_from_file(program_name):
+    """
+    Reads the pid from <program_name>.pid in the autotest directory.
+
+    @param program_name the name of the program
+    @return the pid if the file exists, None otherwise.
+    """
+    pidfile_path = _get_pid_path(program_name)
+    if not os.path.exists(pidfile_path):
+        return None
+
+    pidfile = open(_get_pid_path(program_name), 'r')
+
+    try:
+        try:
+            pid = int(pidfile.readline())
+        except IOError:
+            if not os.path.exists(pidfile_path):
+                return None
+            raise
+    finally:
+        pidfile.close()
+
+    return pid
+
+
+def get_process_name(pid):
+    """
+    Get process name from PID.
+    @param pid: PID of process.
+    """
+    return get_field(read_file("/proc/%d/stat" % pid), 1)[1:-1]
+
+
+def program_is_alive(program_name):
+    """
+    Checks if the process is alive and not in Zombie state.
+
+    @param program_name the name of the program
+    @return True if still alive, False otherwise
+    """
+    pid = get_pid_from_file(program_name)
+    if pid is None:
+        return False
+    return pid_is_alive(pid)
+
+
+def signal_program(program_name, sig=signal.SIGTERM):
+    """
+    Sends a signal to the process listed in <program_name>.pid
+
+    @param program_name the name of the program
+    @param sig signal to send
+    """
+    pid = get_pid_from_file(program_name)
+    if pid:
+        signal_pid(pid, sig)
+
+
+def get_relative_path(path, reference):
+    """Given 2 absolute paths "path" and "reference", compute the path of
+    "path" as relative to the directory "reference".
+
+    @param path the absolute path to convert to a relative path
+    @param reference an absolute directory path to which the relative
+        path will be computed
+    """
+    # normalize the paths (remove double slashes, etc)
+    assert(os.path.isabs(path))
+    assert(os.path.isabs(reference))
+
+    path = os.path.normpath(path)
+    reference = os.path.normpath(reference)
+
+    # we could use os.path.split() but it splits from the end
+    path_list = path.split(os.path.sep)[1:]
+    ref_list = reference.split(os.path.sep)[1:]
+
+    # find the longest leading common path
+    for i in xrange(min(len(path_list), len(ref_list))):
+        if path_list[i] != ref_list[i]:
+            # decrement i so when exiting this loop either by no match or by
+            # end of range we are one step behind
+            i -= 1
+            break
+    i += 1
+    # drop the common part of the paths, not interested in that anymore
+    del path_list[:i]
+
+    # for each uncommon component in the reference prepend a ".."
+    path_list[:0] = ['..'] * (len(ref_list) - i)
+
+    return os.path.join(*path_list)
+
+
+def sh_escape(command):
+    """
+    Escape special characters from a command so that it can be passed
+    as a double quoted (" ") string in a (ba)sh command.
+
+    Args:
+            command: the command string to escape.
+
+    Returns:
+            The escaped command string. The required englobing double
+            quotes are NOT added and so should be added at some point by
+            the caller.
+
+    See also: http://www.tldp.org/LDP/abs/html/escapingsection.html
+    """
+    command = command.replace("\\", "\\\\")
+    command = command.replace("$", r'\$')
+    command = command.replace('"', r'\"')
+    command = command.replace('`', r'\`')
+    return command
+
+
+def configure(extra=None, configure='./configure'):
+    """
+    Run configure passing in the correct host, build, and target options.
+
+    @param extra: extra command line arguments to pass to configure
+    @param configure: which configure script to use
+    """
+    args = []
+    if 'CHOST' in os.environ:
+        args.append('--host=' + os.environ['CHOST'])
+    if 'CBUILD' in os.environ:
+        args.append('--build=' + os.environ['CBUILD'])
+    if 'CTARGET' in os.environ:
+        args.append('--target=' + os.environ['CTARGET'])
+    if extra:
+        args.append(extra)
+
+    system('%s %s' % (configure, ' '.join(args)))
+
+
+def make(extra='', make='make', timeout=None, ignore_status=False):
+    """
+    Run make, adding MAKEOPTS to the list of options.
+
+    @param extra: extra command line arguments to pass to make.
+    """
+    cmd = '%s %s %s' % (make, os.environ.get('MAKEOPTS', ''), extra)
+    return system(cmd, timeout=timeout, ignore_status=ignore_status)
+
+
+def compare_versions(ver1, ver2):
+    """Version number comparison between ver1 and ver2 strings.
+
+    >>> compare_tuple("1", "2")
+    -1
+    >>> compare_tuple("foo-1.1", "foo-1.2")
+    -1
+    >>> compare_tuple("1.2", "1.2a")
+    -1
+    >>> compare_tuple("1.2b", "1.2a")
+    1
+    >>> compare_tuple("1.3.5.3a", "1.3.5.3b")
+    -1
+
+    Args:
+        ver1: version string
+        ver2: version string
+
+    Returns:
+        int:  1 if ver1 >  ver2
+              0 if ver1 == ver2
+             -1 if ver1 <  ver2
+    """
+    ax = re.split('[.-]', ver1)
+    ay = re.split('[.-]', ver2)
+    while len(ax) > 0 and len(ay) > 0:
+        cx = ax.pop(0)
+        cy = ay.pop(0)
+        maxlen = max(len(cx), len(cy))
+        c = cmp(cx.zfill(maxlen), cy.zfill(maxlen))
+        if c != 0:
+            return c
+    return cmp(len(ax), len(ay))
+
+
+def args_to_dict(args):
+    """Convert autoserv extra arguments in the form of key=val or key:val to a
+    dictionary.  Each argument key is converted to lowercase dictionary key.
+
+    Args:
+        args - list of autoserv extra arguments.
+
+    Returns:
+        dictionary
+    """
+    arg_re = re.compile(r'(\w+)[:=](.*)$')
+    dict = {}
+    for arg in args:
+        match = arg_re.match(arg)
+        if match:
+            dict[match.group(1).lower()] = match.group(2)
+        else:
+            logging.warning("args_to_dict: argument '%s' doesn't match "
+                            "'%s' pattern. Ignored." % (arg, arg_re.pattern))
+    return dict
+
+
+def get_unused_port():
+    """
+    Finds a semi-random available port. A race condition is still
+    possible after the port number is returned, if another process
+    happens to bind it.
+
+    Returns:
+        A port number that is unused on both TCP and UDP.
+    """
+
+    def try_bind(port, socket_type, socket_proto):
+        s = socket.socket(socket.AF_INET, socket_type, socket_proto)
+        try:
+            try:
+                s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+                s.bind(('', port))
+                return s.getsockname()[1]
+            except socket.error:
+                return None
+        finally:
+            s.close()
+
+    # On the 2.6 kernel, calling try_bind() on UDP socket returns the
+    # same port over and over. So always try TCP first.
+    while True:
+        # Ask the OS for an unused port.
+        port = try_bind(0, socket.SOCK_STREAM, socket.IPPROTO_TCP)
+        # Check if this port is unused on the other protocol.
+        if port and try_bind(port, socket.SOCK_DGRAM, socket.IPPROTO_UDP):
+            return port
diff --git a/client/common_lib/utils_unittest.py b/client/common_lib/base_utils_unittest.py
similarity index 72%
rename from client/common_lib/utils_unittest.py
rename to client/common_lib/base_utils_unittest.py
index 8eef49c..39acab2 100755
--- a/client/common_lib/utils_unittest.py
+++ b/client/common_lib/base_utils_unittest.py
@@ -3,14 +3,14 @@
 import os, unittest, StringIO, socket, urllib2, shutil, subprocess, logging
 
 import common
-from autotest_lib.client.common_lib import utils, autotemp
+from autotest_lib.client.common_lib import base_utils, autotemp
 from autotest_lib.client.common_lib.test_utils import mock
 
 
 class test_read_one_line(unittest.TestCase):
     def setUp(self):
-        self.god = mock.mock_god()
-        self.god.stub_function(utils, "open")
+        self.god = mock.mock_god(ut=self)
+        self.god.stub_function(base_utils, "open")
 
 
     def tearDown(self):
@@ -18,82 +18,82 @@
 
 
     def test_ip_to_long(self):
-        self.assertEqual(utils.ip_to_long('0.0.0.0'), 0)
-        self.assertEqual(utils.ip_to_long('255.255.255.255'), 4294967295)
-        self.assertEqual(utils.ip_to_long('192.168.0.1'), 3232235521)
-        self.assertEqual(utils.ip_to_long('1.2.4.8'), 16909320)
+        self.assertEqual(base_utils.ip_to_long('0.0.0.0'), 0)
+        self.assertEqual(base_utils.ip_to_long('255.255.255.255'), 4294967295)
+        self.assertEqual(base_utils.ip_to_long('192.168.0.1'), 3232235521)
+        self.assertEqual(base_utils.ip_to_long('1.2.4.8'), 16909320)
 
 
     def test_long_to_ip(self):
-        self.assertEqual(utils.long_to_ip(0), '0.0.0.0')
-        self.assertEqual(utils.long_to_ip(4294967295), '255.255.255.255')
-        self.assertEqual(utils.long_to_ip(3232235521), '192.168.0.1')
-        self.assertEqual(utils.long_to_ip(16909320), '1.2.4.8')
+        self.assertEqual(base_utils.long_to_ip(0), '0.0.0.0')
+        self.assertEqual(base_utils.long_to_ip(4294967295), '255.255.255.255')
+        self.assertEqual(base_utils.long_to_ip(3232235521), '192.168.0.1')
+        self.assertEqual(base_utils.long_to_ip(16909320), '1.2.4.8')
 
 
     def test_create_subnet_mask(self):
-        self.assertEqual(utils.create_subnet_mask(0), 0)
-        self.assertEqual(utils.create_subnet_mask(32), 4294967295)
-        self.assertEqual(utils.create_subnet_mask(25), 4294967168)
+        self.assertEqual(base_utils.create_subnet_mask(0), 0)
+        self.assertEqual(base_utils.create_subnet_mask(32), 4294967295)
+        self.assertEqual(base_utils.create_subnet_mask(25), 4294967168)
 
 
     def test_format_ip_with_mask(self):
-        self.assertEqual(utils.format_ip_with_mask('192.168.0.1', 0),
+        self.assertEqual(base_utils.format_ip_with_mask('192.168.0.1', 0),
                          '0.0.0.0/0')
-        self.assertEqual(utils.format_ip_with_mask('192.168.0.1', 32),
+        self.assertEqual(base_utils.format_ip_with_mask('192.168.0.1', 32),
                          '192.168.0.1/32')
-        self.assertEqual(utils.format_ip_with_mask('192.168.0.1', 26),
+        self.assertEqual(base_utils.format_ip_with_mask('192.168.0.1', 26),
                          '192.168.0.0/26')
-        self.assertEqual(utils.format_ip_with_mask('192.168.0.255', 26),
+        self.assertEqual(base_utils.format_ip_with_mask('192.168.0.255', 26),
                          '192.168.0.192/26')
 
 
     def create_test_file(self, contents):
         test_file = StringIO.StringIO(contents)
-        utils.open.expect_call("filename", "r").and_return(test_file)
+        base_utils.open.expect_call("filename", "r").and_return(test_file)
 
 
     def test_reads_one_line_file(self):
         self.create_test_file("abc\n")
-        self.assertEqual("abc", utils.read_one_line("filename"))
+        self.assertEqual("abc", base_utils.read_one_line("filename"))
         self.god.check_playback()
 
 
     def test_strips_read_lines(self):
         self.create_test_file("abc   \n")
-        self.assertEqual("abc   ", utils.read_one_line("filename"))
+        self.assertEqual("abc   ", base_utils.read_one_line("filename"))
         self.god.check_playback()
 
 
     def test_drops_extra_lines(self):
         self.create_test_file("line 1\nline 2\nline 3\n")
-        self.assertEqual("line 1", utils.read_one_line("filename"))
+        self.assertEqual("line 1", base_utils.read_one_line("filename"))
         self.god.check_playback()
 
 
     def test_works_on_empty_file(self):
         self.create_test_file("")
-        self.assertEqual("", utils.read_one_line("filename"))
+        self.assertEqual("", base_utils.read_one_line("filename"))
         self.god.check_playback()
 
 
     def test_works_on_file_with_no_newlines(self):
         self.create_test_file("line but no newline")
         self.assertEqual("line but no newline",
-                         utils.read_one_line("filename"))
+                         base_utils.read_one_line("filename"))
         self.god.check_playback()
 
 
     def test_preserves_leading_whitespace(self):
         self.create_test_file("   has leading whitespace")
         self.assertEqual("   has leading whitespace",
-                         utils.read_one_line("filename"))
+                         base_utils.read_one_line("filename"))
 
 
 class test_write_one_line(unittest.TestCase):
     def setUp(self):
-        self.god = mock.mock_god()
-        self.god.stub_function(utils, "open")
+        self.god = mock.mock_god(ut=self)
+        self.god.stub_function(base_utils, "open")
 
 
     def tearDown(self):
@@ -102,8 +102,8 @@
 
     def get_write_one_line_output(self, content):
         test_file = mock.SaveDataAfterCloseStringIO()
-        utils.open.expect_call("filename", "w").and_return(test_file)
-        utils.write_one_line("filename", content)
+        base_utils.open.expect_call("filename", "w").and_return(test_file)
+        base_utils.write_one_line("filename", content)
         self.god.check_playback()
         return test_file.final_data
 
@@ -130,8 +130,8 @@
 
 class test_open_write_close(unittest.TestCase):
     def setUp(self):
-        self.god = mock.mock_god()
-        self.god.stub_function(utils, "open")
+        self.god = mock.mock_god(ut=self)
+        self.god.stub_function(base_utils, "open")
 
 
     def tearDown(self):
@@ -141,16 +141,16 @@
     def test_simple_functionality(self):
         data = "\n\nwhee\n"
         test_file = mock.SaveDataAfterCloseStringIO()
-        utils.open.expect_call("filename", "w").and_return(test_file)
-        utils.open_write_close("filename", data)
+        base_utils.open.expect_call("filename", "w").and_return(test_file)
+        base_utils.open_write_close("filename", data)
         self.god.check_playback()
         self.assertEqual(data, test_file.final_data)
 
 
 class test_read_keyval(unittest.TestCase):
     def setUp(self):
-        self.god = mock.mock_god()
-        self.god.stub_function(utils, "open")
+        self.god = mock.mock_god(ut=self)
+        self.god.stub_function(base_utils, "open")
         self.god.stub_function(os.path, "isdir")
         self.god.stub_function(os.path, "exists")
 
@@ -162,13 +162,13 @@
     def create_test_file(self, filename, contents):
         test_file = StringIO.StringIO(contents)
         os.path.exists.expect_call(filename).and_return(True)
-        utils.open.expect_call(filename).and_return(test_file)
+        base_utils.open.expect_call(filename).and_return(test_file)
 
 
     def read_keyval(self, contents):
         os.path.isdir.expect_call("file").and_return(False)
         self.create_test_file("file", contents)
-        keyval = utils.read_keyval("file")
+        keyval = base_utils.read_keyval("file")
         self.god.check_playback()
         return keyval
 
@@ -176,21 +176,21 @@
     def test_returns_empty_when_file_doesnt_exist(self):
         os.path.isdir.expect_call("file").and_return(False)
         os.path.exists.expect_call("file").and_return(False)
-        self.assertEqual({}, utils.read_keyval("file"))
+        self.assertEqual({}, base_utils.read_keyval("file"))
         self.god.check_playback()
 
 
     def test_accesses_files_directly(self):
         os.path.isdir.expect_call("file").and_return(False)
         self.create_test_file("file", "")
-        utils.read_keyval("file")
+        base_utils.read_keyval("file")
         self.god.check_playback()
 
 
     def test_accesses_directories_through_keyval_file(self):
         os.path.isdir.expect_call("dir").and_return(True)
         self.create_test_file("dir/keyval", "")
-        utils.read_keyval("dir")
+        base_utils.read_keyval("dir")
         self.god.check_playback()
 
 
@@ -247,8 +247,8 @@
 
 class test_write_keyval(unittest.TestCase):
     def setUp(self):
-        self.god = mock.mock_god()
-        self.god.stub_function(utils, "open")
+        self.god = mock.mock_god(ut=self)
+        self.god.stub_function(base_utils, "open")
         self.god.stub_function(os.path, "isdir")
 
 
@@ -268,12 +268,12 @@
             expected_filename = filename
         test_file = StringIO.StringIO()
         self.god.stub_function(test_file, "close")
-        utils.open.expect_call(expected_filename, "a").and_return(test_file)
+        base_utils.open.expect_call(expected_filename, "a").and_return(test_file)
         test_file.close.expect_call()
         if type_tag is None:
-            utils.write_keyval(filename, dictionary)
+            base_utils.write_keyval(filename, dictionary)
         else:
-            utils.write_keyval(filename, dictionary, type_tag)
+            base_utils.write_keyval(filename, dictionary, type_tag)
         return test_file.getvalue()
 
 
@@ -330,32 +330,32 @@
 
 class test_is_url(unittest.TestCase):
     def test_accepts_http(self):
-        self.assertTrue(utils.is_url("http://example.com"))
+        self.assertTrue(base_utils.is_url("http://example.com"))
 
 
     def test_accepts_ftp(self):
-        self.assertTrue(utils.is_url("ftp://ftp.example.com"))
+        self.assertTrue(base_utils.is_url("ftp://ftp.example.com"))
 
 
     def test_rejects_local_path(self):
-        self.assertFalse(utils.is_url("/home/username/file"))
+        self.assertFalse(base_utils.is_url("/home/username/file"))
 
 
     def test_rejects_local_filename(self):
-        self.assertFalse(utils.is_url("filename"))
+        self.assertFalse(base_utils.is_url("filename"))
 
 
     def test_rejects_relative_local_path(self):
-        self.assertFalse(utils.is_url("somedir/somesubdir/file"))
+        self.assertFalse(base_utils.is_url("somedir/somesubdir/file"))
 
 
     def test_rejects_local_path_containing_url(self):
-        self.assertFalse(utils.is_url("somedir/http://path/file"))
+        self.assertFalse(base_utils.is_url("somedir/http://path/file"))
 
 
 class test_urlopen(unittest.TestCase):
     def setUp(self):
-        self.god = mock.mock_god()
+        self.god = mock.mock_god(ut=self)
 
 
     def tearDown(self):
@@ -382,7 +382,7 @@
 
     def test_timeout_set_during_call(self):
         self.stub_urlopen_with_timeout_check(30, "retval", "url")
-        retval = utils.urlopen("url", timeout=30)
+        retval = base_utils.urlopen("url", timeout=30)
         self.assertEquals(retval, "retval")
 
 
@@ -391,7 +391,7 @@
         self.stub_urlopen_with_timeout_check(30, None, "url")
         try:
             socket.setdefaulttimeout(1234)
-            utils.urlopen("url", timeout=30)
+            base_utils.urlopen("url", timeout=30)
             self.assertEquals(1234, socket.getdefaulttimeout())
         finally:
             socket.setdefaulttimeout(old_timeout)
@@ -401,18 +401,18 @@
         def test_func(timeout):
             self.assertTrue(timeout is not None)
         self.stub_urlopen_with_timeout_comparison(test_func, None, "url")
-        utils.urlopen("url")
+        base_utils.urlopen("url")
 
 
     def test_args_are_untouched(self):
         self.stub_urlopen_with_timeout_check(30, None, "http://url",
                                              "POST data")
-        utils.urlopen("http://url", timeout=30, data="POST data")
+        base_utils.urlopen("http://url", timeout=30, data="POST data")
 
 
 class test_urlretrieve(unittest.TestCase):
     def setUp(self):
-        self.god = mock.mock_god()
+        self.god = mock.mock_god(ut=self)
 
 
     def tearDown(self):
@@ -420,9 +420,9 @@
 
 
     def test_urlopen_passed_arguments(self):
-        self.god.stub_function(utils, "urlopen")
-        self.god.stub_function(utils.shutil, "copyfileobj")
-        self.god.stub_function(utils, "open")
+        self.god.stub_function(base_utils, "urlopen")
+        self.god.stub_function(base_utils.shutil, "copyfileobj")
+        self.god.stub_function(base_utils, "open")
 
         url = "url"
         dest = "somefile"
@@ -432,14 +432,14 @@
         src_file = self.god.create_mock_class(file, "file")
         dest_file = self.god.create_mock_class(file, "file")
 
-        (utils.urlopen.expect_call(url, data=data, timeout=timeout)
+        (base_utils.urlopen.expect_call(url, data=data, timeout=timeout)
                 .and_return(src_file))
-        utils.open.expect_call(dest, "wb").and_return(dest_file)
-        utils.shutil.copyfileobj.expect_call(src_file, dest_file)
+        base_utils.open.expect_call(dest, "wb").and_return(dest_file)
+        base_utils.shutil.copyfileobj.expect_call(src_file, dest_file)
         dest_file.close.expect_call()
         src_file.close.expect_call()
 
-        utils.urlretrieve(url, dest, data=data, timeout=timeout)
+        base_utils.urlretrieve(url, dest, data=data, timeout=timeout)
         self.god.check_playback()
 
 
@@ -487,18 +487,18 @@
 
 
     def test_both_dont_exist(self):
-        utils.merge_trees(*self.paths("empty"))
+        base_utils.merge_trees(*self.paths("empty"))
 
 
     def test_file_only_at_src(self):
         print >> open(self.src("src_only"), "w"), "line 1"
-        utils.merge_trees(*self.paths("src_only"))
+        base_utils.merge_trees(*self.paths("src_only"))
         self.assertFileEqual("src_only")
 
 
     def test_file_only_at_dest(self):
         print >> open(self.dest("dest_only"), "w"), "line 1"
-        utils.merge_trees(*self.paths("dest_only"))
+        base_utils.merge_trees(*self.paths("dest_only"))
         self.assertEqual(False, os.path.exists(self.src("dest_only")))
         self.assertFileContents("line 1\n", "dest_only")
 
@@ -506,21 +506,21 @@
     def test_file_at_both(self):
         print >> open(self.dest("in_both"), "w"), "line 1"
         print >> open(self.src("in_both"), "w"), "line 2"
-        utils.merge_trees(*self.paths("in_both"))
+        base_utils.merge_trees(*self.paths("in_both"))
         self.assertFileContents("line 1\nline 2\n", "in_both")
 
 
     def test_directory_with_files_in_both(self):
         print >> open(self.dest("in_both"), "w"), "line 1"
         print >> open(self.src("in_both"), "w"), "line 3"
-        utils.merge_trees(*self.paths())
+        base_utils.merge_trees(*self.paths())
         self.assertFileContents("line 1\nline 3\n", "in_both")
 
 
     def test_directory_with_mix_of_files(self):
         print >> open(self.dest("in_dest"), "w"), "dest line"
         print >> open(self.src("in_src"), "w"), "src line"
-        utils.merge_trees(*self.paths())
+        base_utils.merge_trees(*self.paths())
         self.assertFileContents("dest line\n", "in_dest")
         self.assertFileContents("src line\n", "in_src")
 
@@ -532,7 +532,7 @@
         os.mkdir(self.dest("both_subdir"))
         print >> open(self.src("both_subdir", "subfile"), "w"), "src line"
         print >> open(self.dest("both_subdir", "subfile"), "w"), "dest line"
-        utils.merge_trees(*self.paths())
+        base_utils.merge_trees(*self.paths())
         self.assertFileContents("subdir line\n", "src_subdir", "subfile")
         self.assertFileContents("dest line\nsrc line\n", "both_subdir",
                                 "subfile")
@@ -540,25 +540,25 @@
 
 class test_get_relative_path(unittest.TestCase):
     def test_not_absolute(self):
-        self.assertRaises(AssertionError, utils.get_relative_path, "a", "b")
+        self.assertRaises(AssertionError, base_utils.get_relative_path, "a", "b")
 
     def test_same_dir(self):
-        self.assertEqual(utils.get_relative_path("/a/b/c", "/a/b"), "c")
+        self.assertEqual(base_utils.get_relative_path("/a/b/c", "/a/b"), "c")
 
     def test_forward_dir(self):
-        self.assertEqual(utils.get_relative_path("/a/b/c/d", "/a/b"), "c/d")
+        self.assertEqual(base_utils.get_relative_path("/a/b/c/d", "/a/b"), "c/d")
 
     def test_previous_dir(self):
-        self.assertEqual(utils.get_relative_path("/a/b", "/a/b/c/d"), "../..")
+        self.assertEqual(base_utils.get_relative_path("/a/b", "/a/b/c/d"), "../..")
 
     def test_parallel_dir(self):
-        self.assertEqual(utils.get_relative_path("/a/c/d", "/a/b/c/d"),
+        self.assertEqual(base_utils.get_relative_path("/a/c/d", "/a/b/c/d"),
                          "../../../c/d")
 
 
 class test_sh_escape(unittest.TestCase):
     def _test_in_shell(self, text):
-        escaped_text = utils.sh_escape(text)
+        escaped_text = base_utils.sh_escape(text)
         proc = subprocess.Popen('echo "%s"' % escaped_text, shell=True,
                                 stdin=open(os.devnull, 'r'),
                                 stdout=subprocess.PIPE,
@@ -627,15 +627,15 @@
 
 class test_run(unittest.TestCase):
     """
-    Test the utils.run() function.
+    Test the base_utils.run() function.
 
-    Note: This test runs simple external commands to test the utils.run()
+    Note: This test runs simple external commands to test the base_utils.run()
     API without assuming implementation details.
     """
     def setUp(self):
-        self.god = mock.mock_god()
-        self.god.stub_function(utils.logging, 'warn')
-        self.god.stub_function(utils.logging, 'debug')
+        self.god = mock.mock_god(ut=self)
+        self.god.stub_function(base_utils.logging, 'warn')
+        self.god.stub_function(base_utils.logging, 'debug')
 
 
     def tearDown(self):
@@ -653,30 +653,30 @@
     def test_default_simple(self):
         cmd = 'echo "hello world"'
         # expect some king of logging.debug() call but don't care about args
-        utils.logging.debug.expect_any_call()
-        self.__check_result(utils.run(cmd), cmd, stdout='hello world\n')
+        base_utils.logging.debug.expect_any_call()
+        self.__check_result(base_utils.run(cmd), cmd, stdout='hello world\n')
 
 
     def test_default_failure(self):
         cmd = 'exit 11'
         try:
-            utils.run(cmd, verbose=False)
-        except utils.error.CmdError, err:
+            base_utils.run(cmd, verbose=False)
+        except base_utils.error.CmdError, err:
             self.__check_result(err.result_obj, cmd, exit_status=11)
 
 
     def test_ignore_status(self):
         cmd = 'echo error >&2 && exit 11'
-        self.__check_result(utils.run(cmd, ignore_status=True, verbose=False),
+        self.__check_result(base_utils.run(cmd, ignore_status=True, verbose=False),
                             cmd, exit_status=11, stderr='error\n')
 
 
     def test_timeout(self):
         # we expect a logging.warn() message, don't care about the contents
-        utils.logging.warn.expect_any_call()
+        base_utils.logging.warn.expect_any_call()
         try:
-            utils.run('echo -n output && sleep 10', timeout=1, verbose=False)
-        except utils.error.CmdError, err:
+            base_utils.run('echo -n output && sleep 10', timeout=1, verbose=False)
+        except base_utils.error.CmdError, err:
             self.assertEquals(err.result_obj.stdout, 'output')
 
 
@@ -685,7 +685,7 @@
         stdout_tee = StringIO.StringIO()
         stderr_tee = StringIO.StringIO()
 
-        self.__check_result(utils.run(
+        self.__check_result(base_utils.run(
                 cmd, stdout_tee=stdout_tee, stderr_tee=stderr_tee,
                 verbose=False), cmd, stdout='output\n', stderr='error\n')
         self.assertEqual(stdout_tee.getvalue(), 'output\n')
@@ -694,64 +694,64 @@
 
     def test_stdin_string(self):
         cmd = 'cat'
-        self.__check_result(utils.run(cmd, verbose=False, stdin='hi!\n'),
+        self.__check_result(base_utils.run(cmd, verbose=False, stdin='hi!\n'),
                             cmd, stdout='hi!\n')
 
 
     def test_safe_args(self):
         cmd = 'echo "hello \\"world" "again"'
-        self.__check_result(utils.run(
+        self.__check_result(base_utils.run(
                 'echo', verbose=False, args=('hello "world', 'again')), cmd,
                 stdout='hello "world again\n')
 
 
     def test_safe_args_given_string(self):
         cmd = 'echo "hello \\"world" "again"'
-        self.assertRaises(TypeError, utils.run, 'echo', args='hello')
+        self.assertRaises(TypeError, base_utils.run, 'echo', args='hello')
 
 
 class test_compare_versions(unittest.TestCase):
     def test_zerofill(self):
-        self.assertEqual(utils.compare_versions('1.7', '1.10'), -1)
-        self.assertEqual(utils.compare_versions('1.222', '1.3'), 1)
-        self.assertEqual(utils.compare_versions('1.03', '1.3'), 0)
+        self.assertEqual(base_utils.compare_versions('1.7', '1.10'), -1)
+        self.assertEqual(base_utils.compare_versions('1.222', '1.3'), 1)
+        self.assertEqual(base_utils.compare_versions('1.03', '1.3'), 0)
 
 
     def test_unequal_len(self):
-        self.assertEqual(utils.compare_versions('1.3', '1.3.4'), -1)
-        self.assertEqual(utils.compare_versions('1.3.1', '1.3'), 1)
+        self.assertEqual(base_utils.compare_versions('1.3', '1.3.4'), -1)
+        self.assertEqual(base_utils.compare_versions('1.3.1', '1.3'), 1)
 
 
     def test_dash_delimited(self):
-        self.assertEqual(utils.compare_versions('1-2-3', '1-5-1'), -1)
-        self.assertEqual(utils.compare_versions('1-2-1', '1-1-1'), 1)
-        self.assertEqual(utils.compare_versions('1-2-4', '1-2-4'), 0)
+        self.assertEqual(base_utils.compare_versions('1-2-3', '1-5-1'), -1)
+        self.assertEqual(base_utils.compare_versions('1-2-1', '1-1-1'), 1)
+        self.assertEqual(base_utils.compare_versions('1-2-4', '1-2-4'), 0)
 
 
     def test_alphabets(self):
-        self.assertEqual(utils.compare_versions('m.l.b', 'n.b.a'), -1)
-        self.assertEqual(utils.compare_versions('n.b.a', 'm.l.b'), 1)
-        self.assertEqual(utils.compare_versions('abc.e', 'abc.e'), 0)
+        self.assertEqual(base_utils.compare_versions('m.l.b', 'n.b.a'), -1)
+        self.assertEqual(base_utils.compare_versions('n.b.a', 'm.l.b'), 1)
+        self.assertEqual(base_utils.compare_versions('abc.e', 'abc.e'), 0)
 
 
     def test_mix_symbols(self):
-        self.assertEqual(utils.compare_versions('k-320.1', 'k-320.3'), -1)
-        self.assertEqual(utils.compare_versions('k-231.5', 'k-231.1'), 1)
-        self.assertEqual(utils.compare_versions('k-231.1', 'k-231.1'), 0)
+        self.assertEqual(base_utils.compare_versions('k-320.1', 'k-320.3'), -1)
+        self.assertEqual(base_utils.compare_versions('k-231.5', 'k-231.1'), 1)
+        self.assertEqual(base_utils.compare_versions('k-231.1', 'k-231.1'), 0)
 
-        self.assertEqual(utils.compare_versions('k.320-1', 'k.320-3'), -1)
-        self.assertEqual(utils.compare_versions('k.231-5', 'k.231-1'), 1)
-        self.assertEqual(utils.compare_versions('k.231-1', 'k.231-1'), 0)
+        self.assertEqual(base_utils.compare_versions('k.320-1', 'k.320-3'), -1)
+        self.assertEqual(base_utils.compare_versions('k.231-5', 'k.231-1'), 1)
+        self.assertEqual(base_utils.compare_versions('k.231-1', 'k.231-1'), 0)
 
 
 class test_args_to_dict(unittest.TestCase):
     def test_no_args(self):
-        result = utils.args_to_dict([])
+        result = base_utils.args_to_dict([])
         self.assertEqual({}, result)
 
 
     def test_matches(self):
-        result = utils.args_to_dict(['aBc:DeF', 'SyS=DEf', 'XY_Z:',
+        result = base_utils.args_to_dict(['aBc:DeF', 'SyS=DEf', 'XY_Z:',
                                      'F__o0O=', 'B8r:=:=', '_bAZ_=:=:'])
         self.assertEqual(result, {'abc':'DeF', 'sys':'DEf', 'xy_z':'',
                                   'f__o0o':'', 'b8r':'=:=', '_baz_':':=:'})
@@ -765,7 +765,7 @@
         logger.setLevel(logging.ERROR)
 
         try:
-            result = utils.args_to_dict(['ab-c:DeF', '--SyS=DEf', 'a*=b', 'a*b',
+            result = base_utils.args_to_dict(['ab-c:DeF', '--SyS=DEf', 'a*=b', 'a*b',
                                          ':VAL', '=VVV', 'WORD'])
             self.assertEqual({}, result)
         finally:
@@ -783,7 +783,7 @@
 
     def test_get_port(self):
         for _ in xrange(100):
-            p = utils.get_unused_port()
+            p = base_utils.get_unused_port()
             s = self.do_bind(p, socket.SOCK_STREAM, socket.IPPROTO_TCP)
             self.assert_(s.getsockname())
             s = self.do_bind(p, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
diff --git a/client/common_lib/control_data.py b/client/common_lib/control_data.py
index 5b8caf2..dcc49cd 100644
--- a/client/common_lib/control_data.py
+++ b/client/common_lib/control_data.py
@@ -19,6 +19,7 @@
         self.experimental = False
         self.run_verify = True
         self.sync_count = 1
+        self.test_parameters = set()
 
         diff = REQUIRED_VARS - set(vars)
         if len(diff) > 0:
@@ -134,6 +135,7 @@
     def set_test_type(self, val):
         self._set_option('test_type', val, ['client', 'server'])
 
+
     def set_test_parameters(self, val):
         self._set_set('test_parameters', val)
 
diff --git a/client/common_lib/error.py b/client/common_lib/error.py
index 42dfe2b..0c5641c 100644
--- a/client/common_lib/error.py
+++ b/client/common_lib/error.py
@@ -2,13 +2,14 @@
 Internal global error types
 """
 
-import sys, traceback
+import sys, traceback, threading, logging
 from traceback import format_exception
 
 # Add names you want to be imported by 'from errors import *' to this list.
 # This must be list not a tuple as we modify it to include all of our
 # the Exception classes we define below at the end of this file.
-__all__ = ['format_error']
+__all__ = ['format_error', 'context_aware', 'context', 'get_context',
+           'exception_context']
 
 
 def format_error():
@@ -21,6 +22,141 @@
     return ''.join(trace)
 
 
+# Exception context information:
+# ------------------------------
+# Every function can have some context string associated with it.
+# The context string can be changed by calling context(str) and cleared by
+# calling context() with no parameters.
+# get_context() joins the current context strings of all functions in the
+# provided traceback.  The result is a brief description of what the test was
+# doing in the provided traceback (which should be the traceback of a caught
+# exception).
+#
+# For example: assume a() calls b() and b() calls c().
+#
+# @error.context_aware
+# def a():
+#     error.context("hello")
+#     b()
+#     error.context("world")
+#     error.get_context() ----> 'world'
+#
+# @error.context_aware
+# def b():
+#     error.context("foo")
+#     c()
+#
+# @error.context_aware
+# def c():
+#     error.context("bar")
+#     error.get_context() ----> 'hello --> foo --> bar'
+#
+# The current context is automatically inserted into exceptions raised in
+# context_aware functions, so usually test code doesn't need to call
+# error.get_context().
+
+ctx = threading.local()
+
+
+def _new_context(s=""):
+    if not hasattr(ctx, "contexts"):
+        ctx.contexts = []
+    ctx.contexts.append(s)
+
+
+def _pop_context():
+    ctx.contexts.pop()
+
+
+def context(s="", log=None):
+    """
+    Set the context for the currently executing function and optionally log it.
+
+    @param s: A string.  If not provided, the context for the current function
+            will be cleared.
+    @param log: A logging function to pass the context message to.  If None, no
+            function will be called.
+    """
+    ctx.contexts[-1] = s
+    if s and log:
+        log("Context: %s" % get_context())
+
+
+def base_context(s="", log=None):
+    """
+    Set the base context for the currently executing function and optionally
+    log it.  The base context is just another context level that is hidden by
+    default.  Functions that require a single context level should not use
+    base_context().
+
+    @param s: A string.  If not provided, the base context for the current
+            function will be cleared.
+    @param log: A logging function to pass the context message to.  If None, no
+            function will be called.
+    """
+    ctx.contexts[-1] = ""
+    ctx.contexts[-2] = s
+    if s and log:
+        log("Context: %s" % get_context())
+
+
+def get_context():
+    """Return the current context (or None if none is defined)."""
+    if hasattr(ctx, "contexts"):
+        return " --> ".join([s for s in ctx.contexts if s])
+
+
+def exception_context(e):
+    """Return the context of a given exception (or None if none is defined)."""
+    if hasattr(e, "_context"):
+        return e._context
+
+
+def set_exception_context(e, s):
+    """Set the context of a given exception."""
+    e._context = s
+
+
+def join_contexts(s1, s2):
+    """Join two context strings."""
+    if s1:
+        if s2:
+            return "%s --> %s" % (s1, s2)
+        else:
+            return s1
+    else:
+        return s2
+
+
+def context_aware(fn):
+    """A decorator that must be applied to functions that call context()."""
+    def new_fn(*args, **kwargs):
+        _new_context()
+        _new_context("(%s)" % fn.__name__)
+        try:
+            try:
+                return fn(*args, **kwargs)
+            except Exception, e:
+                if not exception_context(e):
+                    set_exception_context(e, get_context())
+                raise
+        finally:
+            _pop_context()
+            _pop_context()
+    new_fn.__name__ = fn.__name__
+    new_fn.__doc__ = fn.__doc__
+    new_fn.__dict__.update(fn.__dict__)
+    return new_fn
+
+
+def _context_message(e):
+    s = exception_context(e)
+    if s:
+        return "    [context: %s]" % s
+    else:
+        return ""
+
+
 class JobContinue(SystemExit):
     """Allow us to bail out requesting continuance."""
     pass
@@ -33,7 +169,8 @@
 
 class AutotestError(Exception):
     """The parent of all errors deliberatly thrown within the client code."""
-    pass
+    def __str__(self):
+        return Exception.__str__(self) + _context_message(self)
 
 
 class JobError(AutotestError):
@@ -46,10 +183,14 @@
     def __init__(self, unhandled_exception):
         if isinstance(unhandled_exception, JobError):
             JobError.__init__(self, *unhandled_exception.args)
+        elif isinstance(unhandled_exception, str):
+            JobError.__init__(self, unhandled_exception)
         else:
             msg = "Unhandled %s: %s"
             msg %= (unhandled_exception.__class__.__name__,
                     unhandled_exception)
+            if not isinstance(unhandled_exception, AutotestError):
+                msg += _context_message(unhandled_exception)
             msg += "\n" + traceback.format_exc()
             JobError.__init__(self, msg)
 
@@ -87,10 +228,14 @@
     def __init__(self, unhandled_exception):
         if isinstance(unhandled_exception, TestError):
             TestError.__init__(self, *unhandled_exception.args)
+        elif isinstance(unhandled_exception, str):
+            TestError.__init__(self, unhandled_exception)
         else:
             msg = "Unhandled %s: %s"
             msg %= (unhandled_exception.__class__.__name__,
                     unhandled_exception)
+            if not isinstance(unhandled_exception, AutotestError):
+                msg += _context_message(unhandled_exception)
             msg += "\n" + traceback.format_exc()
             TestError.__init__(self, msg)
 
@@ -100,10 +245,14 @@
     def __init__(self, unhandled_exception):
         if isinstance(unhandled_exception, TestFail):
             TestFail.__init__(self, *unhandled_exception.args)
+        elif isinstance(unhandled_exception, str):
+            TestFail.__init__(self, unhandled_exception)
         else:
             msg = "Unhandled %s: %s"
             msg %= (unhandled_exception.__class__.__name__,
                     unhandled_exception)
+            if not isinstance(unhandled_exception, AutotestError):
+                msg += _context_message(unhandled_exception)
             msg += "\n" + traceback.format_exc()
             TestFail.__init__(self, msg)
 
@@ -118,7 +267,6 @@
         self.result_obj = result_obj
         self.additional_text = additional_text
 
-
     def __str__(self):
         if self.result_obj.exit_status is None:
             msg = "Command <%s> failed and is not responding to signals"
@@ -129,6 +277,7 @@
 
         if self.additional_text:
             msg += ", " + self.additional_text
+        msg += _context_message(self)
         msg += '\n' + repr(self.result_obj)
         return msg
 
diff --git a/client/common_lib/logging_manager.py b/client/common_lib/logging_manager.py
index e34e9c9..96f718a 100644
--- a/client/common_lib/logging_manager.py
+++ b/client/common_lib/logging_manager.py
@@ -79,7 +79,7 @@
     return rv
 
 
-if sys.version_info[:2] > (2, 6):
+if sys.version_info[:2] > (2, 7):
     warnings.warn('This module has not been reviewed for Python %s' %
                   sys.version)
 
diff --git a/client/common_lib/magic.py b/client/common_lib/magic.py
old mode 100644
new mode 100755
diff --git a/client/common_lib/software_manager.py b/client/common_lib/software_manager.py
new file mode 100755
index 0000000..f67f667
--- /dev/null
+++ b/client/common_lib/software_manager.py
@@ -0,0 +1,788 @@
+#!/usr/bin/python
+"""
+Software package management library.
+
+This is an abstraction layer on top of the existing distributions high level
+package managers. It supports package operations useful for testing purposes,
+and multiple high level package managers (here called backends). If you want
+to make this lib to support your particular package manager/distro, please
+implement the given backend class.
+
+@author: Higor Vieira Alves (halves@br.ibm.com)
+@author: Lucas Meneghel Rodrigues (lmr@redhat.com)
+@author: Ramon de Carvalho Valle (rcvalle@br.ibm.com)
+
+@copyright: IBM 2008-2009
+@copyright: Red Hat 2009-2010
+"""
+import os, re, logging, ConfigParser, optparse, random, string
+try:
+    import yum
+except:
+    pass
+import common
+from autotest_lib.client.bin import os_dep, utils
+from autotest_lib.client.common_lib import error
+from autotest_lib.client.common_lib import logging_config, logging_manager
+
+
+def generate_random_string(length):
+    """
+    Return a random string using alphanumeric characters.
+
+    @length: Length of the string that will be generated.
+    """
+    r = random.SystemRandom()
+    str = ""
+    chars = string.letters + string.digits
+    while length > 0:
+        str += r.choice(chars)
+        length -= 1
+    return str
+
+
+class SoftwareManagerLoggingConfig(logging_config.LoggingConfig):
+    """
+    Used with the sole purpose of providing convenient logging setup
+    for the KVM test auxiliary programs.
+    """
+    def configure_logging(self, results_dir=None, verbose=False):
+        super(SoftwareManagerLoggingConfig, self).configure_logging(
+                                                            use_console=True,
+                                                            verbose=verbose)
+
+
+class SystemInspector(object):
+    """
+    System inspector class.
+
+    This may grow up to include more complete reports of operating system and
+    machine properties.
+    """
+    def __init__(self):
+        """
+        Probe system, and save information for future reference.
+        """
+        self.distro = utils.get_os_vendor()
+        self.high_level_pms = ['apt-get', 'yum', 'zypper']
+
+
+    def get_package_management(self):
+        """
+        Determine the supported package management systems present on the
+        system. If more than one package management system installed, try
+        to find the best supported system.
+        """
+        list_supported = []
+        for high_level_pm in self.high_level_pms:
+            try:
+                os_dep.command(high_level_pm)
+                list_supported.append(high_level_pm)
+            except:
+                pass
+
+        pm_supported = None
+        if len(list_supported) == 0:
+            pm_supported = None
+        if len(list_supported) == 1:
+            pm_supported = list_supported[0]
+        elif len(list_supported) > 1:
+            if 'apt-get' in list_supported and self.distro in ['Debian', 'Ubuntu']:
+                pm_supported = 'apt-get'
+            elif 'yum' in list_supported and self.distro == 'Fedora':
+                pm_supported = 'yum'
+            else:
+                pm_supported = list_supported[0]
+
+        logging.debug('Package Manager backend: %s' % pm_supported)
+        return pm_supported
+
+
+class SoftwareManager(object):
+    """
+    Package management abstraction layer.
+
+    It supports a set of common package operations for testing purposes, and it
+    uses the concept of a backend, a helper class that implements the set of
+    operations of a given package management tool.
+    """
+    def __init__(self):
+        """
+        Class constructor.
+
+        Determines the best supported package management system for the given
+        operating system running and initializes the appropriate backend.
+        """
+        inspector = SystemInspector()
+        backend_type = inspector.get_package_management()
+        if backend_type == 'yum':
+            self.backend = YumBackend()
+        elif backend_type == 'zypper':
+            self.backend = ZypperBackend()
+        elif backend_type == 'apt-get':
+            self.backend = AptBackend()
+        else:
+            raise NotImplementedError('Unimplemented package management '
+                                      'system: %s.' % backend_type)
+
+
+    def check_installed(self, name, version=None, arch=None):
+        """
+        Check whether a package is installed on this system.
+
+        @param name: Package name.
+        @param version: Package version.
+        @param arch: Package architecture.
+        """
+        return self.backend.check_installed(name, version, arch)
+
+
+    def list_all(self):
+        """
+        List all installed packages.
+        """
+        return self.backend.list_all()
+
+
+    def list_files(self, name):
+        """
+        Get a list of all files installed by package [name].
+
+        @param name: Package name.
+        """
+        return self.backend.list_files(name)
+
+
+    def install(self, name):
+        """
+        Install package [name].
+
+        @param name: Package name.
+        """
+        return self.backend.install(name)
+
+
+    def remove(self, name):
+        """
+        Remove package [name].
+
+        @param name: Package name.
+        """
+        return self.backend.remove(name)
+
+
+    def add_repo(self, url):
+        """
+        Add package repo described by [url].
+
+        @param name: URL of the package repo.
+        """
+        return self.backend.add_repo(url)
+
+
+    def remove_repo(self, url):
+        """
+        Remove package repo described by [url].
+
+        @param url: URL of the package repo.
+        """
+        return self.backend.remove_repo(url)
+
+
+    def upgrade(self):
+        """
+        Upgrade all packages available.
+        """
+        return self.backend.upgrade()
+
+
+    def provides(self, file):
+        """
+        Returns a list of packages that provides a given capability to the
+        system (be it a binary, a library).
+
+        @param file: Path to the file.
+        """
+        return self.backend.provides(file)
+
+
+    def install_what_provides(self, file):
+        """
+        Installs package that provides [file].
+
+        @param file: Path to file.
+        """
+        provides = self.provides(file)
+        if provides is not None:
+            self.install(provides)
+        else:
+            logging.warning('No package seems to provide %s', file)
+
+
+class RpmBackend(object):
+    """
+    This class implements operations executed with the rpm package manager.
+
+    rpm is a lower level package manager, used by higher level managers such
+    as yum and zypper.
+    """
+    def __init__(self):
+        self.lowlevel_base_cmd = os_dep.command('rpm')
+
+
+    def _check_installed_version(self, name, version):
+        """
+        Helper for the check_installed public method.
+
+        @param name: Package name.
+        @param version: Package version.
+        """
+        cmd = (self.lowlevel_base_cmd + ' -q --qf %{VERSION} ' + name +
+               ' 2> /dev/null')
+        inst_version = utils.system_output(cmd)
+
+        if inst_version >= version:
+            return True
+        else:
+            return False
+
+
+    def check_installed(self, name, version=None, arch=None):
+        """
+        Check if package [name] is installed.
+
+        @param name: Package name.
+        @param version: Package version.
+        @param arch: Package architecture.
+        """
+        if arch:
+            cmd = (self.lowlevel_base_cmd + ' -q --qf %{ARCH} ' + name +
+                   ' 2> /dev/null')
+            inst_archs = utils.system_output(cmd)
+            inst_archs = inst_archs.split('\n')
+
+            for inst_arch in inst_archs:
+                if inst_arch == arch:
+                    return self._check_installed_version(name, version)
+            return False
+
+        elif version:
+            return self._check_installed_version(name, version)
+        else:
+            cmd = 'rpm -q ' + name + ' 2> /dev/null'
+            return (os.system(cmd) == 0)
+
+
+    def list_all(self):
+        """
+        List all installed packages.
+        """
+        installed_packages = utils.system_output('rpm -qa').splitlines()
+        return installed_packages
+
+
+    def list_files(self, name):
+        """
+        List files installed on the system by package [name].
+
+        @param name: Package name.
+        """
+        path = os.path.abspath(name)
+        if os.path.isfile(path):
+            option = '-qlp'
+            name = path
+        else:
+            option = '-ql'
+
+        l_cmd = 'rpm' + ' ' + option + ' ' + name + ' 2> /dev/null'
+
+        try:
+            result = utils.system_output(l_cmd)
+            list_files = result.split('\n')
+            return list_files
+        except error.CmdError:
+            return []
+
+
+class DpkgBackend(object):
+    """
+    This class implements operations executed with the dpkg package manager.
+
+    dpkg is a lower level package manager, used by higher level managers such
+    as apt and aptitude.
+    """
+    def __init__(self):
+        self.lowlevel_base_cmd = os_dep.command('dpkg')
+
+
+    def check_installed(self, name):
+        if os.path.isfile(name):
+            n_cmd = (self.lowlevel_base_cmd + ' -f ' + name +
+                     ' Package 2>/dev/null')
+            name = utils.system_output(n_cmd)
+        i_cmd = self.lowlevel_base_cmd + ' -s ' + name + ' 2>/dev/null'
+        # Checking if package is installed
+        package_status = utils.system_output(i_cmd, ignore_status=True)
+        not_inst_pattern = re.compile('not-installed', re.IGNORECASE)
+        dpkg_not_installed = re.search(not_inst_pattern, package_status)
+        if dpkg_not_installed:
+            return False
+        return True
+
+
+    def list_all(self):
+        """
+        List all packages available in the system.
+        """
+        installed_packages = []
+        raw_list = utils.system_output('dpkg -l').splitlines()[5:]
+        for line in raw_list:
+            parts = line.split()
+            if parts[0] == "ii":  # only grab "installed" packages
+                installed_packages.append("%s-%s" % (parts[1], parts[2]))
+
+
+    def list_files(self, package):
+        """
+        List files installed by package [package].
+
+        @param package: Package name.
+        @return: List of paths installed by package.
+        """
+        if os.path.isfile(package):
+            l_cmd = self.lowlevel_base_cmd + ' -c ' + package
+        else:
+            l_cmd = self.lowlevel_base_cmd + ' -l ' + package
+        return utils.system_output(l_cmd).split('\n')
+
+
+class YumBackend(RpmBackend):
+    """
+    Implements the yum backend for software manager.
+
+    Set of operations for the yum package manager, commonly found on Yellow Dog
+    Linux and Red Hat based distributions, such as Fedora and Red Hat
+    Enterprise Linux.
+    """
+    def __init__(self):
+        """
+        Initializes the base command and the yum package repository.
+        """
+        super(YumBackend, self).__init__()
+        executable = os_dep.command('yum')
+        base_arguments = '-y'
+        self.base_command = executable + ' ' + base_arguments
+        self.repo_file_path = '/etc/yum.repos.d/autotest.repo'
+        self.cfgparser = ConfigParser.ConfigParser()
+        self.cfgparser.read(self.repo_file_path)
+        y_cmd = executable + ' --version | head -1'
+        self.yum_version = utils.system_output(y_cmd, ignore_status=True)
+        logging.debug('Yum backend initialized')
+        logging.debug('Yum version: %s' % self.yum_version)
+        self.yum_base = yum.YumBase()
+
+
+    def _cleanup(self):
+        """
+        Clean up the yum cache so new package information can be downloaded.
+        """
+        utils.system("yum clean all")
+
+
+    def install(self, name):
+        """
+        Installs package [name]. Handles local installs.
+        """
+        if os.path.isfile(name):
+            name = os.path.abspath(name)
+            command = 'localinstall'
+        else:
+            command = 'install'
+
+        i_cmd = self.base_command + ' ' + command + ' ' + name
+
+        try:
+            utils.system(i_cmd)
+            return True
+        except:
+            return False
+
+
+    def remove(self, name):
+        """
+        Removes package [name].
+
+        @param name: Package name (eg. 'ipython').
+        """
+        r_cmd = self.base_command + ' ' + 'erase' + ' ' + name
+        try:
+            utils.system(r_cmd)
+            return True
+        except:
+            return False
+
+
+    def add_repo(self, url):
+        """
+        Adds package repository located on [url].
+
+        @param url: Universal Resource Locator of the repository.
+        """
+        # Check if we URL is already set
+        for section in self.cfgparser.sections():
+            for option, value in self.cfgparser.items(section):
+                if option == 'url' and value == url:
+                    return True
+
+        # Didn't find it, let's set it up
+        while True:
+            section_name = 'software_manager' + '_' + generate_random_string(4)
+            if not self.cfgparser.has_section(section_name):
+                break
+        self.cfgparser.add_section(section_name)
+        self.cfgparser.set(section_name, 'name',
+                           'Repository added by the autotest software manager.')
+        self.cfgparser.set(section_name, 'url', url)
+        self.cfgparser.set(section_name, 'enabled', 1)
+        self.cfgparser.set(section_name, 'gpgcheck', 0)
+        self.cfgparser.write(self.repo_file_path)
+
+
+    def remove_repo(self, url):
+        """
+        Removes package repository located on [url].
+
+        @param url: Universal Resource Locator of the repository.
+        """
+        for section in self.cfgparser.sections():
+            for option, value in self.cfgparser.items(section):
+                if option == 'url' and value == url:
+                    self.cfgparser.remove_section(section)
+                    self.cfgparser.write(self.repo_file_path)
+
+
+    def upgrade(self):
+        """
+        Upgrade all available packages.
+        """
+        r_cmd = self.base_command + ' ' + 'update'
+        try:
+            utils.system(r_cmd)
+            return True
+        except:
+            return False
+
+
+    def provides(self, name):
+        """
+        Returns a list of packages that provides a given capability.
+
+        @param name: Capability name (eg, 'foo').
+        """
+        d_provides = self.yum_base.searchPackageProvides(args=[name])
+        provides_list = [key for key in d_provides]
+        if provides_list:
+            logging.info("Package %s provides %s", provides_list[0], name)
+            return str(provides_list[0])
+        else:
+            return None
+
+
+class ZypperBackend(RpmBackend):
+    """
+    Implements the zypper backend for software manager.
+
+    Set of operations for the zypper package manager, found on SUSE Linux.
+    """
+    def __init__(self):
+        """
+        Initializes the base command and the yum package repository.
+        """
+        super(ZypperBackend, self).__init__()
+        self.base_command = os_dep.command('zypper') + ' -n'
+        z_cmd = self.base_command + ' --version'
+        self.zypper_version = utils.system_output(z_cmd, ignore_status=True)
+        logging.debug('Zypper backend initialized')
+        logging.debug('Zypper version: %s' % self.zypper_version)
+
+
+    def install(self, name):
+        """
+        Installs package [name]. Handles local installs.
+
+        @param name: Package Name.
+        """
+        path = os.path.abspath(name)
+        i_cmd = self.base_command + ' install -l ' + name
+        try:
+            utils.system(i_cmd)
+            return True
+        except:
+            return False
+
+
+    def add_repo(self, url):
+        """
+        Adds repository [url].
+
+        @param url: URL for the package repository.
+        """
+        ar_cmd = self.base_command + ' addrepo ' + url
+        try:
+            utils.system(ar_cmd)
+            return True
+        except:
+            return False
+
+
+    def remove_repo(self, url):
+        """
+        Removes repository [url].
+
+        @param url: URL for the package repository.
+        """
+        rr_cmd = self.base_command + ' removerepo ' + url
+        try:
+            utils.system(rr_cmd)
+            return True
+        except:
+            return False
+
+
+    def remove(self, name):
+        """
+        Removes package [name].
+        """
+        r_cmd = self.base_command + ' ' + 'erase' + ' ' + name
+
+        try:
+            utils.system(r_cmd)
+            return True
+        except:
+            return False
+
+
+    def upgrade(self):
+        """
+        Upgrades all packages of the system.
+        """
+        u_cmd = self.base_command + ' update -l'
+
+        try:
+            utils.system(u_cmd)
+            return True
+        except:
+            return False
+
+
+    def provides(self, name):
+        """
+        Searches for what provides a given file.
+
+        @param name: File path.
+        """
+        p_cmd = self.base_command + ' what-provides ' + name
+        list_provides = []
+        try:
+            p_output = utils.system_output(p_cmd).split('\n')[4:]
+            for line in p_output:
+                line = [a.strip() for a in line.split('|')]
+                try:
+                    state, pname, type, version, arch, repository = line
+                    if pname not in list_provides:
+                        list_provides.append(pname)
+                except IndexError:
+                    pass
+            if len(list_provides) > 1:
+                logging.warning('More than one package found, '
+                                'opting by the first queue result')
+            if list_provides:
+                logging.info("Package %s provides %s", list_provides[0], name)
+                return list_provides[0]
+            return None
+        except:
+            return None
+
+
+class AptBackend(DpkgBackend):
+    """
+    Implements the apt backend for software manager.
+
+    Set of operations for the apt package manager, commonly found on Debian and
+    Debian based distributions, such as Ubuntu Linux.
+    """
+    def __init__(self):
+        """
+        Initializes the base command and the debian package repository.
+        """
+        super(AptBackend, self).__init__()
+        executable = os_dep.command('apt-get')
+        self.base_command = executable + ' -y'
+        self.repo_file_path = '/etc/apt/sources.list.d/autotest'
+        self.apt_version = utils.system_output('apt-get -v | head -1',
+                                               ignore_status=True)
+        logging.debug('Apt backend initialized')
+        logging.debug('apt version: %s' % self.apt_version)
+
+
+    def install(self, name):
+        """
+        Installs package [name].
+
+        @param name: Package name.
+        """
+        command = 'install'
+        i_cmd = self.base_command + ' ' + command + ' ' + name
+
+        try:
+            utils.system(i_cmd)
+            return True
+        except:
+            return False
+
+
+    def remove(self, name):
+        """
+        Remove package [name].
+
+        @param name: Package name.
+        """
+        command = 'remove'
+        flag = '--purge'
+        r_cmd = self.base_command + ' ' + command + ' ' + flag + ' ' + name
+
+        try:
+            utils.system(r_cmd)
+            return True
+        except:
+            return False
+
+
+    def add_repo(self, repo):
+        """
+        Add an apt repository.
+
+        @param repo: Repository string. Example:
+                'deb http://archive.ubuntu.com/ubuntu/ maverick universe'
+        """
+        repo_file = open(self.repo_file_path, 'a')
+        repo_file_contents = repo_file.read()
+        if repo not in repo_file_contents:
+            repo_file.write(repo)
+
+
+    def remove_repo(self, repo):
+        """
+        Remove an apt repository.
+
+        @param repo: Repository string. Example:
+                'deb http://archive.ubuntu.com/ubuntu/ maverick universe'
+        """
+        repo_file = open(self.repo_file_path, 'r')
+        new_file_contents = []
+        for line in repo_file.readlines:
+            if not line == repo:
+                new_file_contents.append(line)
+        repo_file.close()
+        new_file_contents = "\n".join(new_file_contents)
+        repo_file.open(self.repo_file_path, 'w')
+        repo_file.write(new_file_contents)
+        repo_file.close()
+
+
+    def upgrade(self):
+        """
+        Upgrade all packages of the system with eventual new versions.
+        """
+        ud_command = 'update'
+        ud_cmd = self.base_command + ' ' + ud_command
+        try:
+            utils.system(ud_cmd)
+        except:
+            logging.error("Apt package update failed")
+        up_command = 'upgrade'
+        up_cmd = self.base_command + ' ' + up_command
+        try:
+            utils.system(up_cmd)
+            return True
+        except:
+            return False
+
+
+    def provides(self, file):
+        """
+        Return a list of packages that provide [file].
+
+        @param file: File path.
+        """
+        if not self.check_installed('apt-file'):
+            self.install('apt-file')
+        command = os_dep.command('apt-file')
+        cache_update_cmd = command + ' update'
+        try:
+            utils.system(cache_update_cmd, ignore_status=True)
+        except:
+            logging.error("Apt file cache update failed")
+        fu_cmd = command + ' search ' + file
+        try:
+            provides = utils.system_output(fu_cmd).split('\n')
+            list_provides = []
+            for line in provides:
+                if line:
+                    try:
+                        line = line.split(':')
+                        package = line[0].strip()
+                        path = line[1].strip()
+                        if path == file and package not in list_provides:
+                            list_provides.append(package)
+                    except IndexError:
+                        pass
+            if len(list_provides) > 1:
+                logging.warning('More than one package found, '
+                                'opting by the first queue result')
+            if list_provides:
+                logging.info("Package %s provides %s", list_provides[0], file)
+                return list_provides[0]
+            return None
+        except:
+            return None
+
+
+if __name__ == '__main__':
+    parser = optparse.OptionParser(
+    "usage: %prog [install|remove|list-all|list-files|add-repo|remove-repo|"
+    "upgrade|what-provides|install-what-provides] arguments")
+    parser.add_option('--verbose', dest="debug", action='store_true',
+                      help='include debug messages in console output')
+
+    options, args = parser.parse_args()
+    debug = options.debug
+    logging_manager.configure_logging(SoftwareManagerLoggingConfig(),
+                                      verbose=debug)
+    software_manager = SoftwareManager()
+    if args:
+        action = args[0]
+        args = " ".join(args[1:])
+    else:
+        action = 'show-help'
+
+    if action == 'install':
+        software_manager.install(args)
+    elif action == 'remove':
+        software_manager.remove(args)
+    if action == 'list-all':
+        software_manager.list_all()
+    elif action == 'list-files':
+        software_manager.list_files(args)
+    elif action == 'add-repo':
+        software_manager.add_repo(args)
+    elif action == 'remove-repo':
+        software_manager.remove_repo(args)
+    elif action == 'upgrade':
+        software_manager.upgrade()
+    elif action == 'what-provides':
+        software_manager.provides(args)
+    elif action == 'install-what-provides':
+        software_manager.install_what_provides(args)
+    elif action == 'show-help':
+        parser.print_help()
diff --git a/client/common_lib/test.py b/client/common_lib/test.py
index 766766b..310b45d 100644
--- a/client/common_lib/test.py
+++ b/client/common_lib/test.py
@@ -71,8 +71,8 @@
 
 
     def write_test_keyval(self, attr_dict):
-        utils.write_keyval(self.outputdir, attr_dict)
-
+        utils.write_keyval(self.outputdir, attr_dict,
+                           tap_report=self.job._tap)
 
     @staticmethod
     def _append_type_to_keys(dictionary, typename):
@@ -84,25 +84,29 @@
 
 
     def write_perf_keyval(self, perf_dict):
-        self.write_iteration_keyval({}, perf_dict)
+        self.write_iteration_keyval({}, perf_dict,
+                                    tap_report=self.job._tap)
 
 
     def write_attr_keyval(self, attr_dict):
-        self.write_iteration_keyval(attr_dict, {})
+        self.write_iteration_keyval(attr_dict, {},
+                                    tap_report=self.job._tap)
 
 
-    def write_iteration_keyval(self, attr_dict, perf_dict):
+    def write_iteration_keyval(self, attr_dict, perf_dict, tap_report=None):
         # append the dictionaries before they have the {perf} and {attr} added
         self._keyvals.append({'attr':attr_dict, 'perf':perf_dict})
         self._new_keyval = True
 
         if attr_dict:
             attr_dict = self._append_type_to_keys(attr_dict, "attr")
-            utils.write_keyval(self.resultsdir, attr_dict, type_tag="attr")
+            utils.write_keyval(self.resultsdir, attr_dict, type_tag="attr",
+                               tap_report=tap_report)
 
         if perf_dict:
             perf_dict = self._append_type_to_keys(perf_dict, "perf")
-            utils.write_keyval(self.resultsdir, perf_dict, type_tag="perf")
+            utils.write_keyval(self.resultsdir, perf_dict, type_tag="perf",
+                               tap_report=tap_report)
 
         keyval_path = os.path.join(self.resultsdir, "keyval")
         print >> open(keyval_path, "a"), ""
diff --git a/client/common_lib/utils.py b/client/common_lib/utils.py
index 47f3cb4..382f79d 100644
--- a/client/common_lib/utils.py
+++ b/client/common_lib/utils.py
@@ -1,1710 +1,13 @@
-#
-# Copyright 2008 Google Inc. Released under the GPL v2
+"""
+Convenience functions for use by tests or whomever.
 
-import os, pickle, random, re, resource, select, shutil, signal, StringIO
-import socket, struct, subprocess, sys, time, textwrap, urlparse
-import warnings, smtplib, logging, urllib2
-from threading import Thread, Event
-try:
-    import hashlib
-except ImportError:
-    import md5, sha
-from autotest_lib.client.common_lib import error, logging_manager
+NOTE: this is a mixin library that pulls in functions from several places
+Note carefully what the precendece order is
 
-def deprecated(func):
-    """This is a decorator which can be used to mark functions as deprecated.
-    It will result in a warning being emmitted when the function is used."""
-    def new_func(*args, **dargs):
-        warnings.warn("Call to deprecated function %s." % func.__name__,
-                      category=DeprecationWarning)
-        return func(*args, **dargs)
-    new_func.__name__ = func.__name__
-    new_func.__doc__ = func.__doc__
-    new_func.__dict__.update(func.__dict__)
-    return new_func
+There's no really good way to do this, as this isn't a class we can do
+inheritance with, just a collection of static methods.
+"""
 
-
-class _NullStream(object):
-    def write(self, data):
-        pass
-
-
-    def flush(self):
-        pass
-
-
-TEE_TO_LOGS = object()
-_the_null_stream = _NullStream()
-
-DEFAULT_STDOUT_LEVEL = logging.DEBUG
-DEFAULT_STDERR_LEVEL = logging.ERROR
-
-# prefixes for logging stdout/stderr of commands
-STDOUT_PREFIX = '[stdout] '
-STDERR_PREFIX = '[stderr] '
-
-
-def get_stream_tee_file(stream, level, prefix=''):
-    if stream is None:
-        return _the_null_stream
-    if stream is TEE_TO_LOGS:
-        return logging_manager.LoggingFile(level=level, prefix=prefix)
-    return stream
-
-
-class BgJob(object):
-    def __init__(self, command, stdout_tee=None, stderr_tee=None, verbose=True,
-                 stdin=None, stderr_level=DEFAULT_STDERR_LEVEL):
-        self.command = command
-        self.stdout_tee = get_stream_tee_file(stdout_tee, DEFAULT_STDOUT_LEVEL,
-                                              prefix=STDOUT_PREFIX)
-        self.stderr_tee = get_stream_tee_file(stderr_tee, stderr_level,
-                                              prefix=STDERR_PREFIX)
-        self.result = CmdResult(command)
-
-        # allow for easy stdin input by string, we'll let subprocess create
-        # a pipe for stdin input and we'll write to it in the wait loop
-        if isinstance(stdin, basestring):
-            self.string_stdin = stdin
-            stdin = subprocess.PIPE
-        else:
-            self.string_stdin = None
-
-        if verbose:
-            logging.debug("Running '%s'" % command)
-        self.sp = subprocess.Popen(command, stdout=subprocess.PIPE,
-                                   stderr=subprocess.PIPE,
-                                   preexec_fn=self._reset_sigpipe, shell=True,
-                                   executable="/bin/bash",
-                                   stdin=stdin)
-
-
-    def output_prepare(self, stdout_file=None, stderr_file=None):
-        self.stdout_file = stdout_file
-        self.stderr_file = stderr_file
-
-
-    def process_output(self, stdout=True, final_read=False):
-        """output_prepare must be called prior to calling this"""
-        if stdout:
-            pipe, buf, tee = self.sp.stdout, self.stdout_file, self.stdout_tee
-        else:
-            pipe, buf, tee = self.sp.stderr, self.stderr_file, self.stderr_tee
-
-        if final_read:
-            # read in all the data we can from pipe and then stop
-            data = []
-            while select.select([pipe], [], [], 0)[0]:
-                data.append(os.read(pipe.fileno(), 1024))
-                if len(data[-1]) == 0:
-                    break
-            data = "".join(data)
-        else:
-            # perform a single read
-            data = os.read(pipe.fileno(), 1024)
-        buf.write(data)
-        tee.write(data)
-
-
-    def cleanup(self):
-        self.stdout_tee.flush()
-        self.stderr_tee.flush()
-        self.sp.stdout.close()
-        self.sp.stderr.close()
-        self.result.stdout = self.stdout_file.getvalue()
-        self.result.stderr = self.stderr_file.getvalue()
-
-
-    def _reset_sigpipe(self):
-        signal.signal(signal.SIGPIPE, signal.SIG_DFL)
-
-
-def ip_to_long(ip):
-    # !L is a long in network byte order
-    return struct.unpack('!L', socket.inet_aton(ip))[0]
-
-
-def long_to_ip(number):
-    # See above comment.
-    return socket.inet_ntoa(struct.pack('!L', number))
-
-
-def create_subnet_mask(bits):
-    return (1 << 32) - (1 << 32-bits)
-
-
-def format_ip_with_mask(ip, mask_bits):
-    masked_ip = ip_to_long(ip) & create_subnet_mask(mask_bits)
-    return "%s/%s" % (long_to_ip(masked_ip), mask_bits)
-
-
-def normalize_hostname(alias):
-    ip = socket.gethostbyname(alias)
-    return socket.gethostbyaddr(ip)[0]
-
-
-def get_ip_local_port_range():
-    match = re.match(r'\s*(\d+)\s*(\d+)\s*$',
-                     read_one_line('/proc/sys/net/ipv4/ip_local_port_range'))
-    return (int(match.group(1)), int(match.group(2)))
-
-
-def set_ip_local_port_range(lower, upper):
-    write_one_line('/proc/sys/net/ipv4/ip_local_port_range',
-                   '%d %d\n' % (lower, upper))
-
-
-
-def send_email(mail_from, mail_to, subject, body):
-    """
-    Sends an email via smtp
-
-    mail_from: string with email address of sender
-    mail_to: string or list with email address(es) of recipients
-    subject: string with subject of email
-    body: (multi-line) string with body of email
-    """
-    if isinstance(mail_to, str):
-        mail_to = [mail_to]
-    msg = "From: %s\nTo: %s\nSubject: %s\n\n%s" % (mail_from, ','.join(mail_to),
-                                                   subject, body)
-    try:
-        mailer = smtplib.SMTP('localhost')
-        try:
-            mailer.sendmail(mail_from, mail_to, msg)
-        finally:
-            mailer.quit()
-    except Exception, e:
-        # Emails are non-critical, not errors, but don't raise them
-        print "Sending email failed. Reason: %s" % repr(e)
-
-
-def read_one_line(filename):
-    return open(filename, 'r').readline().rstrip('\n')
-
-
-def read_file(filename):
-    f = open(filename)
-    try:
-        return f.read()
-    finally:
-        f.close()
-
-
-def get_field(data, param, linestart="", sep=" "):
-    """
-    Parse data from string.
-    @param data: Data to parse.
-        example:
-          data:
-             cpu   324 345 34  5 345
-             cpu0  34  11  34 34  33
-             ^^^^
-             start of line
-             params 0   1   2  3   4
-    @param param: Position of parameter after linestart marker.
-    @param linestart: String to which start line with parameters.
-    @param sep: Separator between parameters regular expression.
-    """
-    search = re.compile(r"(?<=^%s)\s*(.*)" % linestart, re.MULTILINE)
-    find = search.search(data)
-    if find != None:
-        return re.split("%s" % sep, find.group(1))[param]
-    else:
-        print "There is no line which starts with %s in data." % linestart
-        return None
-
-
-def write_one_line(filename, line):
-    open_write_close(filename, line.rstrip('\n') + '\n')
-
-
-def open_write_close(filename, data):
-    f = open(filename, 'w')
-    try:
-        f.write(data)
-    finally:
-        f.close()
-
-
-def matrix_to_string(matrix, header=None):
-    """
-    Return a pretty, aligned string representation of a nxm matrix.
-
-    This representation can be used to print any tabular data, such as
-    database results. It works by scanning the lengths of each element
-    in each column, and determining the format string dynamically.
-
-    @param matrix: Matrix representation (list with n rows of m elements).
-    @param header: Optional tuple or list with header elements to be displayed.
-    """
-    if type(header) is list:
-        header = tuple(header)
-    lengths = []
-    if header:
-        for column in header:
-            lengths.append(len(column))
-    for row in matrix:
-        for column in row:
-            i = row.index(column)
-            cl = len(column)
-            try:
-                ml = lengths[i]
-                if cl > ml:
-                    lengths[i] = cl
-            except IndexError:
-                lengths.append(cl)
-
-    lengths = tuple(lengths)
-    format_string = ""
-    for length in lengths:
-        format_string += "%-" + str(length) + "s "
-    format_string += "\n"
-
-    matrix_str = ""
-    if header:
-        matrix_str += format_string % header
-    for row in matrix:
-        matrix_str += format_string % tuple(row)
-
-    return matrix_str
-
-
-def read_keyval(path):
-    """
-    Read a key-value pair format file into a dictionary, and return it.
-    Takes either a filename or directory name as input. If it's a
-    directory name, we assume you want the file to be called keyval.
-    """
-    if os.path.isdir(path):
-        path = os.path.join(path, 'keyval')
-    keyval = {}
-    if os.path.exists(path):
-        for line in open(path):
-            line = re.sub('#.*', '', line).rstrip()
-            if not re.search(r'^[-\.\w]+=', line):
-                raise ValueError('Invalid format line: %s' % line)
-            key, value = line.split('=', 1)
-            if re.search('^\d+$', value):
-                value = int(value)
-            elif re.search('^(\d+\.)?\d+$', value):
-                value = float(value)
-            keyval[key] = value
-    return keyval
-
-
-def write_keyval(path, dictionary, type_tag=None):
-    """
-    Write a key-value pair format file out to a file. This uses append
-    mode to open the file, so existing text will not be overwritten or
-    reparsed.
-
-    If type_tag is None, then the key must be composed of alphanumeric
-    characters (or dashes+underscores). However, if type-tag is not
-    null then the keys must also have "{type_tag}" as a suffix. At
-    the moment the only valid values of type_tag are "attr" and "perf".
-    """
-    if os.path.isdir(path):
-        path = os.path.join(path, 'keyval')
-    keyval = open(path, 'a')
-
-    if type_tag is None:
-        key_regex = re.compile(r'^[-\.\w]+$')
-    else:
-        if type_tag not in ('attr', 'perf'):
-            raise ValueError('Invalid type tag: %s' % type_tag)
-        escaped_tag = re.escape(type_tag)
-        key_regex = re.compile(r'^[-\.\w]+\{%s\}$' % escaped_tag)
-    try:
-        for key in sorted(dictionary.keys()):
-            if not key_regex.search(key):
-                raise ValueError('Invalid key: %s' % key)
-            keyval.write('%s=%s\n' % (key, dictionary[key]))
-    finally:
-        keyval.close()
-
-
-class FileFieldMonitor(object):
-    """
-    Monitors the information from the file and reports it's values.
-
-    It gather the information at start and stop of the measurement or
-    continuously during the measurement.
-    """
-    class Monitor(Thread):
-        """
-        Internal monitor class to ensure continuous monitor of monitored file.
-        """
-        def __init__(self, master):
-            """
-            @param master: Master class which control Monitor
-            """
-            Thread.__init__(self)
-            self.master = master
-
-        def run(self):
-            """
-            Start monitor in thread mode
-            """
-            while not self.master.end_event.isSet():
-                self.master._get_value(self.master.logging)
-                time.sleep(self.master.time_step)
-
-
-    def __init__(self, status_file, data_to_read, mode_diff, continuously=False,
-                 contlogging=False, separator=" +", time_step=0.1):
-        """
-        Initialize variables.
-        @param status_file: File contain status.
-        @param mode_diff: If True make a difference of value, else average.
-        @param data_to_read: List of tuples with data position.
-            format: [(start_of_line,position in params)]
-            example:
-              data:
-                 cpu   324 345 34  5 345
-                 cpu0  34  11  34 34  33
-                 ^^^^
-                 start of line
-                 params 0   1   2  3   4
-        @param mode_diff: True to subtract old value from new value,
-            False make average of the values.
-        @parma continuously: Start the monitoring thread using the time_step
-            as the measurement period.
-        @param contlogging: Log data in continuous run.
-        @param separator: Regular expression of separator.
-        @param time_step: Time period of the monitoring value.
-        """
-        self.end_event = Event()
-        self.start_time = 0
-        self.end_time = 0
-        self.test_time = 0
-
-        self.status_file = status_file
-        self.separator = separator
-        self.data_to_read = data_to_read
-        self.num_of_params = len(self.data_to_read)
-        self.mode_diff = mode_diff
-        self.continuously = continuously
-        self.time_step = time_step
-
-        self.value = [0 for i in range(self.num_of_params)]
-        self.old_value = [0 for i in range(self.num_of_params)]
-        self.log = []
-        self.logging = contlogging
-
-        self.started = False
-        self.num_of_get_value = 0
-        self.monitor = None
-
-
-    def _get_value(self, logging=True):
-        """
-        Return current values.
-        @param logging: If true log value in memory. There can be problem
-          with long run.
-        """
-        data = read_file(self.status_file)
-        value = []
-        for i in range(self.num_of_params):
-            value.append(int(get_field(data,
-                             self.data_to_read[i][1],
-                             self.data_to_read[i][0],
-                             self.separator)))
-
-        if logging:
-            self.log.append(value)
-        if not self.mode_diff:
-            value = map(lambda x, y: x + y, value, self.old_value)
-
-        self.old_value = value
-        self.num_of_get_value += 1
-        return value
-
-
-    def start(self):
-        """
-        Start value monitor.
-        """
-        if self.started:
-            self.stop()
-        self.old_value = [0 for i in range(self.num_of_params)]
-        self.num_of_get_value = 0
-        self.log = []
-        self.end_event.clear()
-        self.start_time = time.time()
-        self._get_value()
-        self.started = True
-        if (self.continuously):
-            self.monitor = FileFieldMonitor.Monitor(self)
-            self.monitor.start()
-
-
-    def stop(self):
-        """
-        Stop value monitor.
-        """
-        if self.started:
-            self.started = False
-            self.end_time = time.time()
-            self.test_time = self.end_time - self.start_time
-            self.value = self._get_value()
-            if (self.continuously):
-                self.end_event.set()
-                self.monitor.join()
-            if (self.mode_diff):
-                self.value = map(lambda x, y: x - y, self.log[-1], self.log[0])
-            else:
-                self.value = map(lambda x: x / self.num_of_get_value,
-                                 self.value)
-
-
-    def get_status(self):
-        """
-        @return: Status of monitored process average value,
-            time of test and array of monitored values and time step of
-            continuous run.
-        """
-        if self.started:
-            self.stop()
-        if self.mode_diff:
-            for i in range(len(self.log) - 1):
-                self.log[i] = (map(lambda x, y: x - y,
-                                   self.log[i + 1], self.log[i]))
-            self.log.pop()
-        return (self.value, self.test_time, self.log, self.time_step)
-
-
-def is_url(path):
-    """Return true if path looks like a URL"""
-    # for now, just handle http and ftp
-    url_parts = urlparse.urlparse(path)
-    return (url_parts[0] in ('http', 'ftp'))
-
-
-def urlopen(url, data=None, timeout=5):
-    """Wrapper to urllib2.urlopen with timeout addition."""
-
-    # Save old timeout
-    old_timeout = socket.getdefaulttimeout()
-    socket.setdefaulttimeout(timeout)
-    try:
-        return urllib2.urlopen(url, data=data)
-    finally:
-        socket.setdefaulttimeout(old_timeout)
-
-
-def urlretrieve(url, filename, data=None, timeout=300):
-    """Retrieve a file from given url."""
-    logging.debug('Fetching %s -> %s', url, filename)
-
-    src_file = urlopen(url, data=data, timeout=timeout)
-    try:
-        dest_file = open(filename, 'wb')
-        try:
-            shutil.copyfileobj(src_file, dest_file)
-        finally:
-            dest_file.close()
-    finally:
-        src_file.close()
-
-
-def hash(type, input=None):
-    """
-    Returns an hash object of type md5 or sha1. This function is implemented in
-    order to encapsulate hash objects in a way that is compatible with python
-    2.4 and python 2.6 without warnings.
-
-    Note that even though python 2.6 hashlib supports hash types other than
-    md5 and sha1, we are artificially limiting the input values in order to
-    make the function to behave exactly the same among both python
-    implementations.
-
-    @param input: Optional input string that will be used to update the hash.
-    """
-    if type not in ['md5', 'sha1']:
-        raise ValueError("Unsupported hash type: %s" % type)
-
-    try:
-        hash = hashlib.new(type)
-    except NameError:
-        if type == 'md5':
-            hash = md5.new()
-        elif type == 'sha1':
-            hash = sha.new()
-
-    if input:
-        hash.update(input)
-
-    return hash
-
-
-def get_file(src, dest, permissions=None):
-    """Get a file from src, which can be local or a remote URL"""
-    if src == dest:
-        return
-
-    if is_url(src):
-        urlretrieve(src, dest)
-    else:
-        shutil.copyfile(src, dest)
-
-    if permissions:
-        os.chmod(dest, permissions)
-    return dest
-
-
-def unmap_url(srcdir, src, destdir='.'):
-    """
-    Receives either a path to a local file or a URL.
-    returns either the path to the local file, or the fetched URL
-
-    unmap_url('/usr/src', 'foo.tar', '/tmp')
-                            = '/usr/src/foo.tar'
-    unmap_url('/usr/src', 'http://site/file', '/tmp')
-                            = '/tmp/file'
-                            (after retrieving it)
-    """
-    if is_url(src):
-        url_parts = urlparse.urlparse(src)
-        filename = os.path.basename(url_parts[2])
-        dest = os.path.join(destdir, filename)
-        return get_file(src, dest)
-    else:
-        return os.path.join(srcdir, src)
-
-
-def update_version(srcdir, preserve_srcdir, new_version, install,
-                   *args, **dargs):
-    """
-    Make sure srcdir is version new_version
-
-    If not, delete it and install() the new version.
-
-    In the preserve_srcdir case, we just check it's up to date,
-    and if not, we rerun install, without removing srcdir
-    """
-    versionfile = os.path.join(srcdir, '.version')
-    install_needed = True
-
-    if os.path.exists(versionfile):
-        old_version = pickle.load(open(versionfile))
-        if old_version == new_version:
-            install_needed = False
-
-    if install_needed:
-        if not preserve_srcdir and os.path.exists(srcdir):
-            shutil.rmtree(srcdir)
-        install(*args, **dargs)
-        if os.path.exists(srcdir):
-            pickle.dump(new_version, open(versionfile, 'w'))
-
-
-def get_stderr_level(stderr_is_expected):
-    if stderr_is_expected:
-        return DEFAULT_STDOUT_LEVEL
-    return DEFAULT_STDERR_LEVEL
-
-
-def run(command, timeout=None, ignore_status=False,
-        stdout_tee=None, stderr_tee=None, verbose=True, stdin=None,
-        stderr_is_expected=None, args=()):
-    """
-    Run a command on the host.
-
-    @param command: the command line string.
-    @param timeout: time limit in seconds before attempting to kill the
-            running process. The run() function will take a few seconds
-            longer than 'timeout' to complete if it has to kill the process.
-    @param ignore_status: do not raise an exception, no matter what the exit
-            code of the command is.
-    @param stdout_tee: optional file-like object to which stdout data
-            will be written as it is generated (data will still be stored
-            in result.stdout).
-    @param stderr_tee: likewise for stderr.
-    @param verbose: if True, log the command being run.
-    @param stdin: stdin to pass to the executed process (can be a file
-            descriptor, a file object of a real file or a string).
-    @param args: sequence of strings of arguments to be given to the command
-            inside " quotes after they have been escaped for that; each
-            element in the sequence will be given as a separate command
-            argument
-
-    @return a CmdResult object
-
-    @raise CmdError: the exit code of the command execution was not 0
-    """
-    if isinstance(args, basestring):
-        raise TypeError('Got a string for the "args" keyword argument, '
-                        'need a sequence.')
-
-    for arg in args:
-        command += ' "%s"' % sh_escape(arg)
-    if stderr_is_expected is None:
-        stderr_is_expected = ignore_status
-
-    bg_job = join_bg_jobs(
-        (BgJob(command, stdout_tee, stderr_tee, verbose, stdin=stdin,
-               stderr_level=get_stderr_level(stderr_is_expected)),),
-        timeout)[0]
-    if not ignore_status and bg_job.result.exit_status:
-        raise error.CmdError(command, bg_job.result,
-                             "Command returned non-zero exit status")
-
-    return bg_job.result
-
-
-def run_parallel(commands, timeout=None, ignore_status=False,
-                 stdout_tee=None, stderr_tee=None):
-    """
-    Behaves the same as run() with the following exceptions:
-
-    - commands is a list of commands to run in parallel.
-    - ignore_status toggles whether or not an exception should be raised
-      on any error.
-
-    @return: a list of CmdResult objects
-    """
-    bg_jobs = []
-    for command in commands:
-        bg_jobs.append(BgJob(command, stdout_tee, stderr_tee,
-                             stderr_level=get_stderr_level(ignore_status)))
-
-    # Updates objects in bg_jobs list with their process information
-    join_bg_jobs(bg_jobs, timeout)
-
-    for bg_job in bg_jobs:
-        if not ignore_status and bg_job.result.exit_status:
-            raise error.CmdError(command, bg_job.result,
-                                 "Command returned non-zero exit status")
-
-    return [bg_job.result for bg_job in bg_jobs]
-
-
-@deprecated
-def run_bg(command):
-    """Function deprecated. Please use BgJob class instead."""
-    bg_job = BgJob(command)
-    return bg_job.sp, bg_job.result
-
-
-def join_bg_jobs(bg_jobs, timeout=None):
-    """Joins the bg_jobs with the current thread.
-
-    Returns the same list of bg_jobs objects that was passed in.
-    """
-    ret, timeout_error = 0, False
-    for bg_job in bg_jobs:
-        bg_job.output_prepare(StringIO.StringIO(), StringIO.StringIO())
-
-    try:
-        # We are holding ends to stdin, stdout pipes
-        # hence we need to be sure to close those fds no mater what
-        start_time = time.time()
-        timeout_error = _wait_for_commands(bg_jobs, start_time, timeout)
-
-        for bg_job in bg_jobs:
-            # Process stdout and stderr
-            bg_job.process_output(stdout=True,final_read=True)
-            bg_job.process_output(stdout=False,final_read=True)
-    finally:
-        # close our ends of the pipes to the sp no matter what
-        for bg_job in bg_jobs:
-            bg_job.cleanup()
-
-    if timeout_error:
-        # TODO: This needs to be fixed to better represent what happens when
-        # running in parallel. However this is backwards compatable, so it will
-        # do for the time being.
-        raise error.CmdError(bg_jobs[0].command, bg_jobs[0].result,
-                             "Command(s) did not complete within %d seconds"
-                             % timeout)
-
-
-    return bg_jobs
-
-
-def _wait_for_commands(bg_jobs, start_time, timeout):
-    # This returns True if it must return due to a timeout, otherwise False.
-
-    # To check for processes which terminate without producing any output
-    # a 1 second timeout is used in select.
-    SELECT_TIMEOUT = 1
-
-    read_list = []
-    write_list = []
-    reverse_dict = {}
-
-    for bg_job in bg_jobs:
-        read_list.append(bg_job.sp.stdout)
-        read_list.append(bg_job.sp.stderr)
-        reverse_dict[bg_job.sp.stdout] = (bg_job, True)
-        reverse_dict[bg_job.sp.stderr] = (bg_job, False)
-        if bg_job.string_stdin is not None:
-            write_list.append(bg_job.sp.stdin)
-            reverse_dict[bg_job.sp.stdin] = bg_job
-
-    if timeout:
-        stop_time = start_time + timeout
-        time_left = stop_time - time.time()
-    else:
-        time_left = None # so that select never times out
-
-    while not timeout or time_left > 0:
-        # select will return when we may write to stdin or when there is
-        # stdout/stderr output we can read (including when it is
-        # EOF, that is the process has terminated).
-        read_ready, write_ready, _ = select.select(read_list, write_list, [],
-                                                   SELECT_TIMEOUT)
-
-        # os.read() has to be used instead of
-        # subproc.stdout.read() which will otherwise block
-        for file_obj in read_ready:
-            bg_job, is_stdout = reverse_dict[file_obj]
-            bg_job.process_output(is_stdout)
-
-        for file_obj in write_ready:
-            # we can write PIPE_BUF bytes without blocking
-            # POSIX requires PIPE_BUF is >= 512
-            bg_job = reverse_dict[file_obj]
-            file_obj.write(bg_job.string_stdin[:512])
-            bg_job.string_stdin = bg_job.string_stdin[512:]
-            # no more input data, close stdin, remove it from the select set
-            if not bg_job.string_stdin:
-                file_obj.close()
-                write_list.remove(file_obj)
-                del reverse_dict[file_obj]
-
-        all_jobs_finished = True
-        for bg_job in bg_jobs:
-            if bg_job.result.exit_status is not None:
-                continue
-
-            bg_job.result.exit_status = bg_job.sp.poll()
-            if bg_job.result.exit_status is not None:
-                # process exited, remove its stdout/stdin from the select set
-                bg_job.result.duration = time.time() - start_time
-                read_list.remove(bg_job.sp.stdout)
-                read_list.remove(bg_job.sp.stderr)
-                del reverse_dict[bg_job.sp.stdout]
-                del reverse_dict[bg_job.sp.stderr]
-            else:
-                all_jobs_finished = False
-
-        if all_jobs_finished:
-            return False
-
-        if timeout:
-            time_left = stop_time - time.time()
-
-    # Kill all processes which did not complete prior to timeout
-    for bg_job in bg_jobs:
-        if bg_job.result.exit_status is not None:
-            continue
-
-        logging.warn('run process timeout (%s) fired on: %s', timeout,
-                     bg_job.command)
-        nuke_subprocess(bg_job.sp)
-        bg_job.result.exit_status = bg_job.sp.poll()
-        bg_job.result.duration = time.time() - start_time
-
-    return True
-
-
-def pid_is_alive(pid):
-    """
-    True if process pid exists and is not yet stuck in Zombie state.
-    Zombies are impossible to move between cgroups, etc.
-    pid can be integer, or text of integer.
-    """
-    path = '/proc/%s/stat' % pid
-
-    try:
-        stat = read_one_line(path)
-    except IOError:
-        if not os.path.exists(path):
-            # file went away
-            return False
-        raise
-
-    return stat.split()[2] != 'Z'
-
-
-def signal_pid(pid, sig):
-    """
-    Sends a signal to a process id. Returns True if the process terminated
-    successfully, False otherwise.
-    """
-    try:
-        os.kill(pid, sig)
-    except OSError:
-        # The process may have died before we could kill it.
-        pass
-
-    for i in range(5):
-        if not pid_is_alive(pid):
-            return True
-        time.sleep(1)
-
-    # The process is still alive
-    return False
-
-
-def nuke_subprocess(subproc):
-    # check if the subprocess is still alive, first
-    if subproc.poll() is not None:
-        return subproc.poll()
-
-    # the process has not terminated within timeout,
-    # kill it via an escalating series of signals.
-    signal_queue = [signal.SIGTERM, signal.SIGKILL]
-    for sig in signal_queue:
-        signal_pid(subproc.pid, sig)
-        if subproc.poll() is not None:
-            return subproc.poll()
-
-
-def nuke_pid(pid, signal_queue=(signal.SIGTERM, signal.SIGKILL)):
-    # the process has not terminated within timeout,
-    # kill it via an escalating series of signals.
-    for sig in signal_queue:
-        if signal_pid(pid, sig):
-            return
-
-    # no signal successfully terminated the process
-    raise error.AutoservRunError('Could not kill %d' % pid, None)
-
-
-def system(command, timeout=None, ignore_status=False):
-    """
-    Run a command
-
-    @param timeout: timeout in seconds
-    @param ignore_status: if ignore_status=False, throw an exception if the
-            command's exit code is non-zero
-            if ignore_stauts=True, return the exit code.
-
-    @return exit status of command
-            (note, this will always be zero unless ignore_status=True)
-    """
-    return run(command, timeout=timeout, ignore_status=ignore_status,
-               stdout_tee=TEE_TO_LOGS, stderr_tee=TEE_TO_LOGS).exit_status
-
-
-def system_parallel(commands, timeout=None, ignore_status=False):
-    """This function returns a list of exit statuses for the respective
-    list of commands."""
-    return [bg_jobs.exit_status for bg_jobs in
-            run_parallel(commands, timeout=timeout, ignore_status=ignore_status,
-                         stdout_tee=TEE_TO_LOGS, stderr_tee=TEE_TO_LOGS)]
-
-
-def system_output(command, timeout=None, ignore_status=False,
-                  retain_output=False, args=()):
-    """
-    Run a command and return the stdout output.
-
-    @param command: command string to execute.
-    @param timeout: time limit in seconds before attempting to kill the
-            running process. The function will take a few seconds longer
-            than 'timeout' to complete if it has to kill the process.
-    @param ignore_status: do not raise an exception, no matter what the exit
-            code of the command is.
-    @param retain_output: set to True to make stdout/stderr of the command
-            output to be also sent to the logging system
-    @param args: sequence of strings of arguments to be given to the command
-            inside " quotes after they have been escaped for that; each
-            element in the sequence will be given as a separate command
-            argument
-
-    @return a string with the stdout output of the command.
-    """
-    if retain_output:
-        out = run(command, timeout=timeout, ignore_status=ignore_status,
-                  stdout_tee=TEE_TO_LOGS, stderr_tee=TEE_TO_LOGS,
-                  args=args).stdout
-    else:
-        out = run(command, timeout=timeout, ignore_status=ignore_status,
-                  args=args).stdout
-    if out[-1:] == '\n':
-        out = out[:-1]
-    return out
-
-
-def system_output_parallel(commands, timeout=None, ignore_status=False,
-                           retain_output=False):
-    if retain_output:
-        out = [bg_job.stdout for bg_job
-               in run_parallel(commands, timeout=timeout,
-                               ignore_status=ignore_status,
-                               stdout_tee=TEE_TO_LOGS, stderr_tee=TEE_TO_LOGS)]
-    else:
-        out = [bg_job.stdout for bg_job in run_parallel(commands,
-                                  timeout=timeout, ignore_status=ignore_status)]
-    for x in out:
-        if out[-1:] == '\n': out = out[:-1]
-    return out
-
-
-def strip_unicode(input):
-    if type(input) == list:
-        return [strip_unicode(i) for i in input]
-    elif type(input) == dict:
-        output = {}
-        for key in input.keys():
-            output[str(key)] = strip_unicode(input[key])
-        return output
-    elif type(input) == unicode:
-        return str(input)
-    else:
-        return input
-
-
-def get_cpu_percentage(function, *args, **dargs):
-    """Returns a tuple containing the CPU% and return value from function call.
-
-    This function calculates the usage time by taking the difference of
-    the user and system times both before and after the function call.
-    """
-    child_pre = resource.getrusage(resource.RUSAGE_CHILDREN)
-    self_pre = resource.getrusage(resource.RUSAGE_SELF)
-    start = time.time()
-    to_return = function(*args, **dargs)
-    elapsed = time.time() - start
-    self_post = resource.getrusage(resource.RUSAGE_SELF)
-    child_post = resource.getrusage(resource.RUSAGE_CHILDREN)
-
-    # Calculate CPU Percentage
-    s_user, s_system = [a - b for a, b in zip(self_post, self_pre)[:2]]
-    c_user, c_system = [a - b for a, b in zip(child_post, child_pre)[:2]]
-    cpu_percent = (s_user + c_user + s_system + c_system) / elapsed
-
-    return cpu_percent, to_return
-
-
-class SystemLoad(object):
-    """
-    Get system and/or process values and return average value of load.
-    """
-    def __init__(self, pids, advanced=False, time_step=0.1, cpu_cont=False,
-                 use_log=False):
-        """
-        @param pids: List of pids to be monitored. If pid = 0 whole system will
-          be monitored. pid == 0 means whole system.
-        @param advanced: monitor add value for system irq count and softirq
-          for process minor and maior page fault
-        @param time_step: Time step for continuous monitoring.
-        @param cpu_cont: If True monitor CPU load continuously.
-        @param use_log: If true every monitoring is logged for dump.
-        """
-        self.pids = []
-        self.stats = {}
-        for pid in pids:
-            if pid == 0:
-                cpu = FileFieldMonitor("/proc/stat",
-                                       [("cpu", 0), # User Time
-                                        ("cpu", 2), # System Time
-                                        ("intr", 0), # IRQ Count
-                                        ("softirq", 0)], # Soft IRQ Count
-                                       True,
-                                       cpu_cont,
-                                       use_log,
-                                       " +",
-                                       time_step)
-                mem = FileFieldMonitor("/proc/meminfo",
-                                       [("MemTotal:", 0), # Mem Total
-                                        ("MemFree:", 0), # Mem Free
-                                        ("Buffers:", 0), # Buffers
-                                        ("Cached:", 0)], # Cached
-                                       False,
-                                       True,
-                                       use_log,
-                                       " +",
-                                       time_step)
-                self.stats[pid] = ["TOTAL", cpu, mem]
-                self.pids.append(pid)
-            else:
-                name = ""
-                if (type(pid) is int):
-                    self.pids.append(pid)
-                    name = get_process_name(pid)
-                else:
-                    self.pids.append(pid[0])
-                    name = pid[1]
-
-                cpu = FileFieldMonitor("/proc/%d/stat" %
-                                       self.pids[-1],
-                                       [("", 13), # User Time
-                                        ("", 14), # System Time
-                                        ("", 9), # Minority Page Fault
-                                        ("", 11)], # Majority Page Fault
-                                       True,
-                                       cpu_cont,
-                                       use_log,
-                                       " +",
-                                       time_step)
-                mem = FileFieldMonitor("/proc/%d/status" %
-                                       self.pids[-1],
-                                       [("VmSize:", 0), # Virtual Memory Size
-                                        ("VmRSS:", 0), # Resident Set Size
-                                        ("VmPeak:", 0), # Peak VM Size
-                                        ("VmSwap:", 0)], # VM in Swap
-                                       False,
-                                       True,
-                                       use_log,
-                                       " +",
-                                       time_step)
-                self.stats[self.pids[-1]] = [name, cpu, mem]
-
-        self.advanced = advanced
-
-
-    def __str__(self):
-        """
-        Define format how to print
-        """
-        out = ""
-        for pid in self.pids:
-            for stat in self.stats[pid][1:]:
-                out += str(stat.get_status()) + "\n"
-        return out
-
-
-    def start(self, pids=[]):
-        """
-        Start monitoring of the process system usage.
-        @param pids: List of PIDs you intend to control. Use pids=[] to control
-            all defined PIDs.
-        """
-        if pids == []:
-            pids = self.pids
-
-        for pid in pids:
-            for stat in self.stats[pid][1:]:
-                stat.start()
-
-
-    def stop(self, pids=[]):
-        """
-        Stop monitoring of the process system usage.
-        @param pids: List of PIDs you intend to control. Use pids=[] to control
-            all defined PIDs.
-        """
-        if pids == []:
-            pids = self.pids
-
-        for pid in pids:
-            for stat in self.stats[pid][1:]:
-                stat.stop()
-
-
-    def dump(self, pids=[]):
-        """
-        Get the status of monitoring.
-        @param pids: List of PIDs you intend to control. Use pids=[] to control
-            all defined PIDs.
-         @return:
-            tuple([cpu load], [memory load]):
-                ([(PID1, (PID1_cpu_meas)), (PID2, (PID2_cpu_meas)), ...],
-                 [(PID1, (PID1_mem_meas)), (PID2, (PID2_mem_meas)), ...])
-
-            PID1_cpu_meas:
-                average_values[], test_time, cont_meas_values[[]], time_step
-            PID1_mem_meas:
-                average_values[], test_time, cont_meas_values[[]], time_step
-            where average_values[] are the measured values (mem_free,swap,...)
-            which are described in SystemLoad.__init__()-FileFieldMonitor.
-            cont_meas_values[[]] is a list of average_values in the sampling
-            times.
-        """
-        if pids == []:
-            pids = self.pids
-
-        cpus = []
-        memory = []
-        for pid in pids:
-            stat = (pid, self.stats[pid][1].get_status())
-            cpus.append(stat)
-        for pid in pids:
-            stat = (pid, self.stats[pid][2].get_status())
-            memory.append(stat)
-
-        return (cpus, memory)
-
-
-    def get_cpu_status_string(self, pids=[]):
-        """
-        Convert status to string array.
-        @param pids: List of PIDs you intend to control. Use pids=[] to control
-            all defined PIDs.
-        @return: String format to table.
-        """
-        if pids == []:
-            pids = self.pids
-
-        headers = ["NAME",
-                   ("%7s") % "PID",
-                   ("%5s") % "USER",
-                   ("%5s") % "SYS",
-                   ("%5s") % "SUM"]
-        if self.advanced:
-            headers.extend(["MINFLT/IRQC",
-                            "MAJFLT/SOFTIRQ"])
-        headers.append(("%11s") % "TIME")
-        textstatus = []
-        for pid in pids:
-            stat = self.stats[pid][1].get_status()
-            time = stat[1]
-            stat = stat[0]
-            textstatus.append(["%s" % self.stats[pid][0],
-                               "%7s" % pid,
-                               "%4.0f%%" % (stat[0] / time),
-                               "%4.0f%%" % (stat[1] / time),
-                               "%4.0f%%" % ((stat[0] + stat[1]) / time),
-                               "%10.3fs" % time])
-            if self.advanced:
-                textstatus[-1].insert(-1, "%11d" % stat[2])
-                textstatus[-1].insert(-1, "%14d" % stat[3])
-
-        return matrix_to_string(textstatus, tuple(headers))
-
-
-    def get_mem_status_string(self, pids=[]):
-        """
-        Convert status to string array.
-        @param pids: List of PIDs you intend to control. Use pids=[] to control
-            all defined PIDs.
-        @return: String format to table.
-        """
-        if pids == []:
-            pids = self.pids
-
-        headers = ["NAME",
-                   ("%7s") % "PID",
-                   ("%8s") % "TOTAL/VMSIZE",
-                   ("%8s") % "FREE/VMRSS",
-                   ("%8s") % "BUFFERS/VMPEAK",
-                   ("%8s") % "CACHED/VMSWAP",
-                   ("%11s") % "TIME"]
-        textstatus = []
-        for pid in pids:
-            stat = self.stats[pid][2].get_status()
-            time = stat[1]
-            stat = stat[0]
-            textstatus.append(["%s" % self.stats[pid][0],
-                               "%7s" % pid,
-                               "%10dMB" % (stat[0] / 1024),
-                               "%8dMB" % (stat[1] / 1024),
-                               "%12dMB" % (stat[2] / 1024),
-                               "%11dMB" % (stat[3] / 1024),
-                               "%10.3fs" % time])
-
-        return matrix_to_string(textstatus, tuple(headers))
-
-
-def get_arch(run_function=run):
-    """
-    Get the hardware architecture of the machine.
-    run_function is used to execute the commands. It defaults to
-    utils.run() but a custom method (if provided) should be of the
-    same schema as utils.run. It should return a CmdResult object and
-    throw a CmdError exception.
-    """
-    arch = run_function('/bin/uname -m').stdout.rstrip()
-    if re.match(r'i\d86$', arch):
-        arch = 'i386'
-    return arch
-
-
-def get_num_logical_cpus_per_socket(run_function=run):
-    """
-    Get the number of cores (including hyperthreading) per cpu.
-    run_function is used to execute the commands. It defaults to
-    utils.run() but a custom method (if provided) should be of the
-    same schema as utils.run. It should return a CmdResult object and
-    throw a CmdError exception.
-    """
-    siblings = run_function('grep "^siblings" /proc/cpuinfo').stdout.rstrip()
-    num_siblings = map(int,
-                       re.findall(r'^siblings\s*:\s*(\d+)\s*$',
-                                  siblings, re.M))
-    if len(num_siblings) == 0:
-        raise error.TestError('Unable to find siblings info in /proc/cpuinfo')
-    if min(num_siblings) != max(num_siblings):
-        raise error.TestError('Number of siblings differ %r' %
-                              num_siblings)
-    return num_siblings[0]
-
-
-def merge_trees(src, dest):
-    """
-    Merges a source directory tree at 'src' into a destination tree at
-    'dest'. If a path is a file in both trees than the file in the source
-    tree is APPENDED to the one in the destination tree. If a path is
-    a directory in both trees then the directories are recursively merged
-    with this function. In any other case, the function will skip the
-    paths that cannot be merged (instead of failing).
-    """
-    if not os.path.exists(src):
-        return # exists only in dest
-    elif not os.path.exists(dest):
-        if os.path.isfile(src):
-            shutil.copy2(src, dest) # file only in src
-        else:
-            shutil.copytree(src, dest, symlinks=True) # dir only in src
-        return
-    elif os.path.isfile(src) and os.path.isfile(dest):
-        # src & dest are files in both trees, append src to dest
-        destfile = open(dest, "a")
-        try:
-            srcfile = open(src)
-            try:
-                destfile.write(srcfile.read())
-            finally:
-                srcfile.close()
-        finally:
-            destfile.close()
-    elif os.path.isdir(src) and os.path.isdir(dest):
-        # src & dest are directories in both trees, so recursively merge
-        for name in os.listdir(src):
-            merge_trees(os.path.join(src, name), os.path.join(dest, name))
-    else:
-        # src & dest both exist, but are incompatible
-        return
-
-
-class CmdResult(object):
-    """
-    Command execution result.
-
-    command:     String containing the command line itself
-    exit_status: Integer exit code of the process
-    stdout:      String containing stdout of the process
-    stderr:      String containing stderr of the process
-    duration:    Elapsed wall clock time running the process
-    """
-
-
-    def __init__(self, command="", stdout="", stderr="",
-                 exit_status=None, duration=0):
-        self.command = command
-        self.exit_status = exit_status
-        self.stdout = stdout
-        self.stderr = stderr
-        self.duration = duration
-
-
-    def __repr__(self):
-        wrapper = textwrap.TextWrapper(width = 78,
-                                       initial_indent="\n    ",
-                                       subsequent_indent="    ")
-
-        stdout = self.stdout.rstrip()
-        if stdout:
-            stdout = "\nstdout:\n%s" % stdout
-
-        stderr = self.stderr.rstrip()
-        if stderr:
-            stderr = "\nstderr:\n%s" % stderr
-
-        return ("* Command: %s\n"
-                "Exit status: %s\n"
-                "Duration: %s\n"
-                "%s"
-                "%s"
-                % (wrapper.fill(self.command), self.exit_status,
-                self.duration, stdout, stderr))
-
-
-class run_randomly:
-    def __init__(self, run_sequentially=False):
-        # Run sequentially is for debugging control files
-        self.test_list = []
-        self.run_sequentially = run_sequentially
-
-
-    def add(self, *args, **dargs):
-        test = (args, dargs)
-        self.test_list.append(test)
-
-
-    def run(self, fn):
-        while self.test_list:
-            test_index = random.randint(0, len(self.test_list)-1)
-            if self.run_sequentially:
-                test_index = 0
-            (args, dargs) = self.test_list.pop(test_index)
-            fn(*args, **dargs)
-
-
-def import_site_module(path, module, dummy=None, modulefile=None):
-    """
-    Try to import the site specific module if it exists.
-
-    @param path full filename of the source file calling this (ie __file__)
-    @param module full module name
-    @param dummy dummy value to return in case there is no symbol to import
-    @param modulefile module filename
-
-    @return site specific module or dummy
-
-    @raises ImportError if the site file exists but imports fails
-    """
-    short_module = module[module.rfind(".") + 1:]
-
-    if not modulefile:
-        modulefile = short_module + ".py"
-
-    if os.path.exists(os.path.join(os.path.dirname(path), modulefile)):
-        return __import__(module, {}, {}, [short_module])
-    return dummy
-
-
-def import_site_symbol(path, module, name, dummy=None, modulefile=None):
-    """
-    Try to import site specific symbol from site specific file if it exists
-
-    @param path full filename of the source file calling this (ie __file__)
-    @param module full module name
-    @param name symbol name to be imported from the site file
-    @param dummy dummy value to return in case there is no symbol to import
-    @param modulefile module filename
-
-    @return site specific symbol or dummy
-
-    @raises ImportError if the site file exists but imports fails
-    """
-    module = import_site_module(path, module, modulefile=modulefile)
-    if not module:
-        return dummy
-
-    # special unique value to tell us if the symbol can't be imported
-    cant_import = object()
-
-    obj = getattr(module, name, cant_import)
-    if obj is cant_import:
-        logging.debug("unable to import site symbol '%s', using non-site "
-                      "implementation", name)
-        return dummy
-
-    return obj
-
-
-def import_site_class(path, module, classname, baseclass, modulefile=None):
-    """
-    Try to import site specific class from site specific file if it exists
-
-    Args:
-        path: full filename of the source file calling this (ie __file__)
-        module: full module name
-        classname: class name to be loaded from site file
-        baseclass: base class object to return when no site file present or
-            to mixin when site class exists but is not inherited from baseclass
-        modulefile: module filename
-
-    Returns: baseclass if site specific class does not exist, the site specific
-        class if it exists and is inherited from baseclass or a mixin of the
-        site specific class and baseclass when the site specific class exists
-        and is not inherited from baseclass
-
-    Raises: ImportError if the site file exists but imports fails
-    """
-
-    res = import_site_symbol(path, module, classname, None, modulefile)
-    if res:
-        if not issubclass(res, baseclass):
-            # if not a subclass of baseclass then mix in baseclass with the
-            # site specific class object and return the result
-            res = type(classname, (res, baseclass), {})
-    else:
-        res = baseclass
-
-    return res
-
-
-def import_site_function(path, module, funcname, dummy, modulefile=None):
-    """
-    Try to import site specific function from site specific file if it exists
-
-    Args:
-        path: full filename of the source file calling this (ie __file__)
-        module: full module name
-        funcname: function name to be imported from site file
-        dummy: dummy function to return in case there is no function to import
-        modulefile: module filename
-
-    Returns: site specific function object or dummy
-
-    Raises: ImportError if the site file exists but imports fails
-    """
-
-    return import_site_symbol(path, module, funcname, dummy, modulefile)
-
-
-def _get_pid_path(program_name):
-    my_path = os.path.dirname(__file__)
-    return os.path.abspath(os.path.join(my_path, "..", "..",
-                                        "%s.pid" % program_name))
-
-
-def write_pid(program_name):
-    """
-    Try to drop <program_name>.pid in the main autotest directory.
-
-    Args:
-      program_name: prefix for file name
-    """
-    pidfile = open(_get_pid_path(program_name), "w")
-    try:
-        pidfile.write("%s\n" % os.getpid())
-    finally:
-        pidfile.close()
-
-
-def delete_pid_file_if_exists(program_name):
-    """
-    Tries to remove <program_name>.pid from the main autotest directory.
-    """
-    pidfile_path = _get_pid_path(program_name)
-
-    try:
-        os.remove(pidfile_path)
-    except OSError:
-        if not os.path.exists(pidfile_path):
-            return
-        raise
-
-
-def get_pid_from_file(program_name):
-    """
-    Reads the pid from <program_name>.pid in the autotest directory.
-
-    @param program_name the name of the program
-    @return the pid if the file exists, None otherwise.
-    """
-    pidfile_path = _get_pid_path(program_name)
-    if not os.path.exists(pidfile_path):
-        return None
-
-    pidfile = open(_get_pid_path(program_name), 'r')
-
-    try:
-        try:
-            pid = int(pidfile.readline())
-        except IOError:
-            if not os.path.exists(pidfile_path):
-                return None
-            raise
-    finally:
-        pidfile.close()
-
-    return pid
-
-
-def get_process_name(pid):
-    """
-    Get process name from PID.
-    @param pid: PID of process.
-    """
-    return get_field(read_file("/proc/%d/stat" % pid), 1)[1:-1]
-
-
-def program_is_alive(program_name):
-    """
-    Checks if the process is alive and not in Zombie state.
-
-    @param program_name the name of the program
-    @return True if still alive, False otherwise
-    """
-    pid = get_pid_from_file(program_name)
-    if pid is None:
-        return False
-    return pid_is_alive(pid)
-
-
-def signal_program(program_name, sig=signal.SIGTERM):
-    """
-    Sends a signal to the process listed in <program_name>.pid
-
-    @param program_name the name of the program
-    @param sig signal to send
-    """
-    pid = get_pid_from_file(program_name)
-    if pid:
-        signal_pid(pid, sig)
-
-
-def get_relative_path(path, reference):
-    """Given 2 absolute paths "path" and "reference", compute the path of
-    "path" as relative to the directory "reference".
-
-    @param path the absolute path to convert to a relative path
-    @param reference an absolute directory path to which the relative
-        path will be computed
-    """
-    # normalize the paths (remove double slashes, etc)
-    assert(os.path.isabs(path))
-    assert(os.path.isabs(reference))
-
-    path = os.path.normpath(path)
-    reference = os.path.normpath(reference)
-
-    # we could use os.path.split() but it splits from the end
-    path_list = path.split(os.path.sep)[1:]
-    ref_list = reference.split(os.path.sep)[1:]
-
-    # find the longest leading common path
-    for i in xrange(min(len(path_list), len(ref_list))):
-        if path_list[i] != ref_list[i]:
-            # decrement i so when exiting this loop either by no match or by
-            # end of range we are one step behind
-            i -= 1
-            break
-    i += 1
-    # drop the common part of the paths, not interested in that anymore
-    del path_list[:i]
-
-    # for each uncommon component in the reference prepend a ".."
-    path_list[:0] = ['..'] * (len(ref_list) - i)
-
-    return os.path.join(*path_list)
-
-
-def sh_escape(command):
-    """
-    Escape special characters from a command so that it can be passed
-    as a double quoted (" ") string in a (ba)sh command.
-
-    Args:
-            command: the command string to escape.
-
-    Returns:
-            The escaped command string. The required englobing double
-            quotes are NOT added and so should be added at some point by
-            the caller.
-
-    See also: http://www.tldp.org/LDP/abs/html/escapingsection.html
-    """
-    command = command.replace("\\", "\\\\")
-    command = command.replace("$", r'\$')
-    command = command.replace('"', r'\"')
-    command = command.replace('`', r'\`')
-    return command
-
-
-def configure(extra=None, configure='./configure'):
-    """
-    Run configure passing in the correct host, build, and target options.
-
-    @param extra: extra command line arguments to pass to configure
-    @param configure: which configure script to use
-    """
-    args = []
-    if 'CHOST' in os.environ:
-        args.append('--host=' + os.environ['CHOST'])
-    if 'CBUILD' in os.environ:
-        args.append('--build=' + os.environ['CBUILD'])
-    if 'CTARGET' in os.environ:
-        args.append('--target=' + os.environ['CTARGET'])
-    if extra:
-        args.append(extra)
-
-    system('%s %s' % (configure, ' '.join(args)))
-
-
-def make(extra='', make='make', timeout=None, ignore_status=False):
-    """
-    Run make, adding MAKEOPTS to the list of options.
-
-    @param extra: extra command line arguments to pass to make.
-    """
-    cmd = '%s %s %s' % (make, os.environ.get('MAKEOPTS', ''), extra)
-    return system(cmd, timeout=timeout, ignore_status=ignore_status)
-
-
-def compare_versions(ver1, ver2):
-    """Version number comparison between ver1 and ver2 strings.
-
-    >>> compare_tuple("1", "2")
-    -1
-    >>> compare_tuple("foo-1.1", "foo-1.2")
-    -1
-    >>> compare_tuple("1.2", "1.2a")
-    -1
-    >>> compare_tuple("1.2b", "1.2a")
-    1
-    >>> compare_tuple("1.3.5.3a", "1.3.5.3b")
-    -1
-
-    Args:
-        ver1: version string
-        ver2: version string
-
-    Returns:
-        int:  1 if ver1 >  ver2
-              0 if ver1 == ver2
-             -1 if ver1 <  ver2
-    """
-    ax = re.split('[.-]', ver1)
-    ay = re.split('[.-]', ver2)
-    while len(ax) > 0 and len(ay) > 0:
-        cx = ax.pop(0)
-        cy = ay.pop(0)
-        maxlen = max(len(cx), len(cy))
-        c = cmp(cx.zfill(maxlen), cy.zfill(maxlen))
-        if c != 0:
-            return c
-    return cmp(len(ax), len(ay))
-
-
-def args_to_dict(args):
-    """Convert autoserv extra arguments in the form of key=val or key:val to a
-    dictionary.  Each argument key is converted to lowercase dictionary key.
-
-    Args:
-        args - list of autoserv extra arguments.
-
-    Returns:
-        dictionary
-    """
-    arg_re = re.compile(r'(\w+)[:=](.*)$')
-    dict = {}
-    for arg in args:
-        match = arg_re.match(arg)
-        if match:
-            dict[match.group(1).lower()] = match.group(2)
-        else:
-            logging.warning("args_to_dict: argument '%s' doesn't match "
-                            "'%s' pattern. Ignored." % (arg, arg_re.pattern))
-    return dict
-
-
-def get_unused_port():
-    """
-    Finds a semi-random available port. A race condition is still
-    possible after the port number is returned, if another process
-    happens to bind it.
-
-    Returns:
-        A port number that is unused on both TCP and UDP.
-    """
-
-    def try_bind(port, socket_type, socket_proto):
-        s = socket.socket(socket.AF_INET, socket_type, socket_proto)
-        try:
-            try:
-                s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
-                s.bind(('', port))
-                return s.getsockname()[1]
-            except socket.error:
-                return None
-        finally:
-            s.close()
-
-    # On the 2.6 kernel, calling try_bind() on UDP socket returns the
-    # same port over and over. So always try TCP first.
-    while True:
-        # Ask the OS for an unused port.
-        port = try_bind(0, socket.SOCK_STREAM, socket.IPPROTO_TCP)
-        # Check if this port is unused on the other protocol.
-        if port and try_bind(port, socket.SOCK_DGRAM, socket.IPPROTO_UDP):
-            return port
+from autotest_lib.client.common_lib.base_utils import *
+if os.path.exists(os.path.join(os.path.dirname(__file__), 'site_utils.py')):
+    from autotest_lib.client.common_lib.site_utils import *
diff --git a/client/profilers/cpistat/cpistat.py b/client/profilers/cpistat/cpistat.py
index 01524fb..1a4328d 100644
--- a/client/profilers/cpistat/cpistat.py
+++ b/client/profilers/cpistat/cpistat.py
@@ -17,7 +17,7 @@
     def start(self, test):
         cmd = os.path.join(self.bindir, 'site_cpistat')
         if not os.path.exists(cmd):
-           cmd = os.path.join(self.bindir, 'cpistat')
+            cmd = os.path.join(self.bindir, 'cpistat')
         logfile = open(os.path.join(test.profdir, "cpistat"), 'w')
         p = subprocess.Popen(cmd, stdout=logfile,
                              stderr=subprocess.STDOUT)
diff --git a/client/tests/hackbench/hackbench.py b/client/tests/hackbench/hackbench.py
index 5861888..15e93d7 100644
--- a/client/tests/hackbench/hackbench.py
+++ b/client/tests/hackbench/hackbench.py
@@ -18,9 +18,9 @@
     def setup(self):
         os.chdir(self.srcdir)
         if 'CC' in os.environ:
-          cc = '$CC'
+            cc = '$CC'
         else:
-          cc = 'cc'
+            cc = 'cc'
         utils.system('%s -lpthread hackbench.c -o hackbench' % cc)
 
 
diff --git a/client/tests/iozone/iozone.py b/client/tests/iozone/iozone.py
old mode 100755
new mode 100644
diff --git a/client/tests/kernbench/kernbench.py b/client/tests/kernbench/kernbench.py
index a22a671..ccde09f 100644
--- a/client/tests/kernbench/kernbench.py
+++ b/client/tests/kernbench/kernbench.py
@@ -49,10 +49,13 @@
             self.threads = self.job.cpu_count()*2
 
         self.kernel = self.__init_tree(version)
-        logfile = os.path.join(self.debugdir, 'build_log')
-
         logging.info("Warmup run ...")
-        self.kernel.build_timed(self.threads, output=logfile)      # warmup run
+        logfile = os.path.join(self.debugdir, 'build_log')
+        try:
+            self.kernel.build_timed(self.threads, output=logfile)  # warmup run
+        finally:
+            if os.path.exists(logfile):
+                utils.system("gzip -9 '%s'" % logfile, ignore_status=True)
 
 
     def run_once(self):
diff --git a/client/tests/kvm/__init__.py b/client/tests/kvm/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/client/tests/kvm/__init__.py
diff --git a/client/tests/kvm/autotest_control/iozone.control b/client/tests/kvm/autotest_control/iozone.control
new file mode 100644
index 0000000..17d9be2
--- /dev/null
+++ b/client/tests/kvm/autotest_control/iozone.control
@@ -0,0 +1,18 @@
+AUTHOR = "Ying Tao <yingtao@cn.ibm.com>"
+TIME = "MEDIUM"
+NAME = "IOzone"
+TEST_TYPE = "client"
+TEST_CLASS = "Kernel"
+TEST_CATEGORY = "Benchmark"
+
+DOC = """
+Iozone is useful for performing a broad filesystem analysis of a vendors
+computer platform. The benchmark tests file I/O performance for the following
+operations:
+      Read, write, re-read, re-write, read backwards, read strided, fread,
+      fwrite, random read, pread ,mmap, aio_read, aio_write
+
+For more information see http://www.iozone.org
+"""
+
+job.run_test('iozone')
diff --git a/client/tests/kvm/build.cfg.sample b/client/tests/kvm/build.cfg.sample
index 860192b..a689ed4 100644
--- a/client/tests/kvm/build.cfg.sample
+++ b/client/tests/kvm/build.cfg.sample
@@ -16,30 +16,42 @@
         save_results = no
         variants:
             - release:
-                mode = release
+                install_mode = release
                 ## Install from a kvm release. You can optionally specify
                 ## a release tag. If you omit it, the build test will get
                 ## the latest release tag available at that moment.
                 # release_tag = 84
                 release_dir = http://downloads.sourceforge.net/project/kvm/
                 release_listing = http://sourceforge.net/projects/kvm/files/
+                # In some cases, you might want to provide a ROM dir, so ROM
+                # files can be copied from there to your source based install
+                # path_to_rom_images = /usr/share/kvm
             - snapshot:
-                mode = snapshot
+                install_mode = snapshot
                 ## Install from a kvm snapshot location. You can optionally
                 ## specify a snapshot date. If you omit it, the test will get
                 ## yesterday's snapshot.
                 # snapshot_date = 20090712
                 snapshot_dir = http://foo.org/kvm-snapshots/
+                # In some cases, you might want to provide a ROM dir, so ROM
+                # files can be copied from there to your source based install
+                # path_to_rom_images = /usr/share/kvm
             - localtar:
-                mode = localtar
+                install_mode = localtar
                 ## Install from tarball located on the host's filesystem.
                 tarball = /tmp/kvm-84.tar.gz
+                # In some cases, you might want to provide a ROM dir, so ROM
+                # files can be copied from there to your source based install
+                # path_to_rom_images = /usr/share/kvm
             - localsrc:
-                mode = localsrc
+                install_mode = localsrc
                 ## Install from tarball located on the host's filesystem.
                 srcdir = /tmp/kvm-84
+                # In some cases, you might want to provide a ROM dir, so ROM
+                # files can be copied from there to your source based install
+                # path_to_rom_images = /usr/share/kvm
             - git:
-                mode = git
+                install_mode = git
                 ## Install KVM from git repositories.
                 ## If you provide only "git_repo" and "user_git_repo", the
                 ## build test will assume it will perform all build from the
@@ -64,8 +76,11 @@
                 # kmod_lbranch = kmod_lbranch_name
                 # kmod_commit = kmod_commit_name
                 # kmod_patches = ['http://foo.com/patch1', 'http://foo.com/patch2']
+                # In some cases, you might want to provide a ROM dir, so ROM
+                # files can be copied from there to your source based install
+                # path_to_rom_images = /usr/share/kvm
             - yum:
-                mode = yum
+                install_mode = yum
                 src_pkg = qemu
                 ## Name of the rpms we need installed
                 pkg_list = ['qemu-kvm', 'qemu-kvm-tools', 'qemu-system-x86', 'qemu-common', 'qemu-img']
@@ -74,7 +89,7 @@
                 ## List of RPMs that will be installed
                 pkg_path_list = ['http://foo.com/rpm1', 'http://foo.com/rpm2']
             - koji:
-                mode = koji
+                install_mode = koji
                 ## Install KVM from koji (Fedora build server)
                 ## It is possible to install packages right from Koji if you
                 ## provide a release tag or a build.
diff --git a/client/tests/kvm/cd_hash.py b/client/tests/kvm/cd_hash.py
index bcd14dc..04f8cbe 100755
--- a/client/tests/kvm/cd_hash.py
+++ b/client/tests/kvm/cd_hash.py
@@ -8,7 +8,7 @@
 import os, sys, optparse, logging
 import common
 import kvm_utils
-from autotest_lib.client.common_lib import logging_config, logging_manager
+from autotest_lib.client.common_lib import logging_manager
 from autotest_lib.client.bin import utils
 
 
diff --git a/client/tests/kvm/control b/client/tests/kvm/control
index 63bbe5d..d226adf 100644
--- a/client/tests/kvm/control
+++ b/client/tests/kvm/control
@@ -53,6 +53,20 @@
 """
 tests_cfg = kvm_config.config()
 tests_cfg_path = os.path.join(kvm_test_dir, "tests.cfg")
+
+if args:
+    # We get test parameters from command line
+    for arg in args:
+        try:
+            (key, value) = re.findall("(.*)=(.*)", arg)[0]
+            if key == "only":
+                str += "only %s\n" % value
+            elif key == "no":
+                str += "no %s\n" % value
+            else:
+                str += "%s = %s\n" % (key, value)
+        except IndexError:
+            pass
 tests_cfg.fork_and_parse(tests_cfg_path, str)
 
 # Run the tests
diff --git a/client/tests/kvm/deps/finish.exe b/client/tests/kvm/deps/finish.exe
old mode 100755
new mode 100644
Binary files differ
diff --git a/client/tests/kvm/deps/rss.exe b/client/tests/kvm/deps/rss.exe
old mode 100755
new mode 100644
Binary files differ
diff --git a/client/tests/kvm/deps/test_clock_getres/Makefile b/client/tests/kvm/deps/test_clock_getres/Makefile
new file mode 100644
index 0000000..b4f73c7
--- /dev/null
+++ b/client/tests/kvm/deps/test_clock_getres/Makefile
@@ -0,0 +1,11 @@
+CC = gcc
+PROG = test_clock_getres
+SRC = test_clock_getres.c
+LIBS = -lrt
+
+all: $(PROG)
+
+$(PROG):
+	$(CC) $(LIBS) -o $(PROG) $(SRC)
+clean:
+	rm -f $(PROG)
diff --git a/client/tests/kvm/deps/test_clock_getres/test_clock_getres.c b/client/tests/kvm/deps/test_clock_getres/test_clock_getres.c
new file mode 100644
index 0000000..81d3b9c
--- /dev/null
+++ b/client/tests/kvm/deps/test_clock_getres/test_clock_getres.c
@@ -0,0 +1,58 @@
+/*
+ *  Test clock resolution for KVM guests that have kvm-clock as clock source
+ *
+ *  Copyright (c) 2010 Red Hat, Inc
+ *  Author: Lucas Meneghel Rodrigues <lmr@redhat.com>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include <stdio.h>
+#include <time.h>
+#include <stdlib.h>
+#include <string.h>
+
+int main(void) {
+	struct timespec res;
+	int clock_return = clock_getres(CLOCK_MONOTONIC, &res);
+	char clocksource[50];
+	char line[80];
+	FILE *fr;
+	if ((fr = fopen(
+			"/sys/devices/system/clocksource/clocksource0/current_clocksource",
+			"rt")) == NULL) {
+		perror("fopen");
+		return EXIT_FAILURE;
+	}
+	while (fgets(line, 80, fr) != NULL) {
+		sscanf(line, "%s", &clocksource);
+	}
+	fclose(fr);
+	if (!strncmp(clocksource, "kvm-clock", strlen("kvm-clock"))) {
+		if (clock_return == 0) {
+			if (res.tv_sec > 1 || res.tv_nsec > 100) {
+				printf("FAIL: clock_getres returned bad clock resolution\n");
+				return EXIT_FAILURE;
+			} else {
+				printf("PASS: check successful\n");
+				return EXIT_SUCCESS;
+			}
+		} else {
+			printf("FAIL: clock_getres failed\n");
+			return EXIT_FAILURE;
+		}
+	} else {
+		printf("FAIL: invalid clock source: %s\n", clocksource);
+		return EXIT_FAILURE;
+	}
+}
diff --git a/client/tests/kvm/deps/whql_submission_15.cs b/client/tests/kvm/deps/whql_submission_15.cs
index 8fa6856..2a29ac5 100644
--- a/client/tests/kvm/deps/whql_submission_15.cs
+++ b/client/tests/kvm/deps/whql_submission_15.cs
@@ -16,20 +16,140 @@
 {
     class AutoJob
     {
+        // Wait for a machine to show up in the data store
+        static void FindMachine(IResourcePool rootPool, string machineName)
+        {
+            Console.WriteLine("Looking for machine '{0}'", machineName);
+            IResource machine = null;
+            while (true)
+            {
+                try
+                {
+                    machine = rootPool.GetResourceByName(machineName);
+                }
+                catch (Exception e)
+                {
+                    Console.WriteLine("Warning: " + e.Message);
+                }
+                // Make sure the machine is valid
+                if (machine != null &&
+                    machine.OperatingSystem != null &&
+                    machine.OperatingSystem.Length > 0 &&
+                    machine.ProcessorArchitecture != null &&
+                    machine.ProcessorArchitecture.Length > 0 &&
+                    machine.GetDevices().Length > 0)
+                    break;
+                System.Threading.Thread.Sleep(1000);
+            }
+            Console.WriteLine("Client machine '{0}' found ({1}, {2})",
+                machineName, machine.OperatingSystem, machine.ProcessorArchitecture);
+        }
+
+        // Delete a machine pool if it exists
+        static void DeleteResourcePool(IDeviceScript script, string poolName)
+        {
+            while (true)
+            {
+                try
+                {
+                    IResourcePool pool = script.GetResourcePoolByName(poolName);
+                    if (pool != null)
+                        script.DeleteResourcePool(pool);
+                    break;
+                }
+                catch (Exception e)
+                {
+                    Console.WriteLine("Warning: " + e.Message);
+                    System.Threading.Thread.Sleep(1000);
+                }
+            }
+        }
+
+        // Set the machine's status to 'Reset' and optionally wait for it to become ready
+        static void ResetMachine(IResourcePool rootPool, string machineName, bool wait)
+        {
+            Console.WriteLine("Resetting machine '{0}'", machineName);
+            IResource machine;
+            while (true)
+            {
+                try
+                {
+                    machine = rootPool.GetResourceByName(machineName);
+                    machine.ChangeResourceStatus("Reset");
+                    break;
+                }
+                catch (Exception e)
+                {
+                    Console.WriteLine("Warning: " + e.Message);
+                    System.Threading.Thread.Sleep(5000);
+                }
+            }
+            if (wait)
+            {
+                Console.WriteLine("Waiting for machine '{0}' to be ready", machineName);
+                while (machine.Status != "Ready")
+                {
+                    try
+                    {
+                        machine = rootPool.GetResourceByName(machineName);
+                    }
+                    catch (Exception e)
+                    {
+                        Console.WriteLine("Warning: " + e.Message);
+                    }
+                    System.Threading.Thread.Sleep(1000);
+                }
+                Console.WriteLine("Machine '{0}' is ready", machineName);
+            }
+        }
+
+        // Look for a device in a machine, and if not found, keep trying for 3 minutes
+        static IDevice GetDevice(IResourcePool rootPool, string machineName, string regexStr)
+        {
+            Regex deviceRegex = new Regex(regexStr, RegexOptions.IgnoreCase);
+            int numAttempts = 1;
+            DateTime endTime = DateTime.Now.AddSeconds(180);
+            while (DateTime.Now < endTime)
+            {
+                IResource machine = rootPool.GetResourceByName(machineName);
+                Console.WriteLine("Looking for device '{0}' in machine '{1}' (machine has {2} devices)",
+                    regexStr, machineName, machine.GetDevices().Length);
+                foreach (IDevice d in machine.GetDevices())
+                {
+                    if (deviceRegex.IsMatch(d.FriendlyName))
+                    {
+                        Console.WriteLine("Found device '{0}'", d.FriendlyName);
+                        return d;
+                    }
+                }
+                Console.WriteLine("Device not found");
+                if (numAttempts % 5 == 0)
+                    ResetMachine(rootPool, machineName, true);
+                else
+                    System.Threading.Thread.Sleep(5000);
+                numAttempts++;
+            }
+            Console.WriteLine("Error: device '{0}' not found", deviceRegex);
+            return null;
+        }
+
         static int Main(string[] args)
         {
-            if (args.Length != 5)
+            if (args.Length < 5)
             {
                 Console.WriteLine("Error: incorrect number of command line arguments");
-                Console.WriteLine("Usage: {0} serverName clientName machinePoolName submissionName timeout",
+                Console.WriteLine("Usage: {0} serverName machinePoolName submissionName timeout machineName0 machineName1 ...",
                     System.Environment.GetCommandLineArgs()[0]);
                 return 1;
             }
             string serverName = args[0];
-            string clientName = args[1];
-            string machinePoolName = args[2];
-            string submissionName = args[3];
-            double timeout = Convert.ToDouble(args[4]);
+            string machinePoolName = args[1];
+            string submissionName = args[2];
+            double timeout = Convert.ToDouble(args[3]);
+
+            List<string> machines = new List<string>();
+            for (int i = 4; i < args.Length; i++)
+                machines.Add(args[i]);
 
             try
             {
@@ -37,37 +157,17 @@
                 Console.WriteLine("Initializing DeviceScript object");
                 DeviceScript script = new DeviceScript();
                 Console.WriteLine("Connecting to data store");
-
                 script.ConnectToNamedDataStore(serverName);
 
-                // Find client machine
+                // Wait for client machines to become available
                 IResourcePool rootPool = script.GetResourcePoolByName("$");
-                Console.WriteLine("Looking for client machine '{0}'", clientName);
-                IResource machine = null;
-                while (true)
-                {
-                    try
-                    {
-                        machine = rootPool.GetResourceByName(clientName);
-                    }
-                    catch (Exception e)
-                    {
-                        Console.WriteLine("Warning: " + e.Message);
-                    }
-                    // Make sure the machine is valid
-                    if (machine != null &&
-                        machine.OperatingSystem != null &&
-                        machine.OperatingSystem.Length > 0 &&
-                        machine.ProcessorArchitecture != null &&
-                        machine.ProcessorArchitecture.Length > 0 &&
-                        machine.GetDevices().Length > 0)
-                        break;
-                    System.Threading.Thread.Sleep(1000);
-                }
-                Console.WriteLine("Client machine '{0}' found ({1}, {2})",
-                    clientName, machine.OperatingSystem, machine.ProcessorArchitecture);
+                foreach (string machineName in machines)
+                    FindMachine(rootPool, machineName);
 
-                // Create machine pool and add client machine to it
+                // Delete the machine pool if it already exists
+                DeleteResourcePool(script, machinePoolName);
+
+                // Create the machine pool and add the client machines to it
                 // (this must be done because jobs cannot be scheduled for machines in the
                 // default pool)
                 try
@@ -79,76 +179,27 @@
                     Console.WriteLine("Warning: " + e.Message);
                 }
                 IResourcePool newPool = script.GetResourcePoolByName(machinePoolName);
-                Console.WriteLine("Moving the client machine to pool '{0}'", machinePoolName);
-                machine.ChangeResourcePool(newPool);
+                foreach (string machineName in machines)
+                {
+                    Console.WriteLine("Moving machine '{0}' to pool '{1}'", machineName, machinePoolName);
+                    rootPool.GetResourceByName(machineName).ChangeResourcePool(newPool);
+                }
 
                 // Reset client machine
-                if (machine.Status != "Ready")
-                {
-                    Console.WriteLine("Changing the client machine's status to 'Reset'");
-                    while (true)
-                    {
-                        try
-                        {
-                            machine = rootPool.GetResourceByName(clientName);
-                            machine.ChangeResourceStatus("Unsafe");
-                            System.Threading.Thread.Sleep(5000);
-                            machine.ChangeResourceStatus("Reset");
-                            break;
-                        }
-                        catch (Exception e)
-                        {
-                            Console.WriteLine("Warning: " + e.Message);
-                        }
-                        System.Threading.Thread.Sleep(5000);
-                    }
-                    Console.WriteLine("Waiting for client machine to be ready");
-                    while (machine.Status != "Ready")
-                    {
-                        try
-                        {
-                            machine = rootPool.GetResourceByName(clientName);
-                        }
-                        catch (Exception e)
-                        {
-                            Console.WriteLine("Warning: " + e.Message);
-                        }
-                        System.Threading.Thread.Sleep(1000);
-                    }
-                }
-                Console.WriteLine("Client machine is ready");
+                foreach (string machineName in machines)
+                    ResetMachine(rootPool, machineName, true);
 
-                // Get requested device regex and look for a matching device
-                Console.WriteLine("Device to test: ");
-                Regex deviceRegex = new Regex(Console.ReadLine(), RegexOptions.IgnoreCase);
-                Console.WriteLine("Looking for device '{0}'", deviceRegex);
-                IDevice device;
-                DateTime endTime = DateTime.Now.AddSeconds(120);
-                while (DateTime.Now < endTime)
-                {
-                    machine = rootPool.GetResourceByName(clientName);
-                    Console.WriteLine("(Client machine has {0} devices)", machine.GetDevices().Length);
-                    foreach (IDevice d in machine.GetDevices())
-                    {
-                        if (deviceRegex.IsMatch(d.FriendlyName))
-                        {
-                            device = d;
-                            goto deviceFound;
-                        }
-                    }
-                    System.Threading.Thread.Sleep(5000);
-                }
-                Console.WriteLine("Error: device '{0}' not found", deviceRegex);
-                return 1;
-
-            deviceFound:
-                Console.WriteLine("Found device '{0}'", device.FriendlyName);
+                // Get requested device regex and look for a matching device in the first machine
+                Console.WriteLine("Device to test:");
+                IDevice device = GetDevice(rootPool, machines[0], Console.ReadLine());
+                if (device == null)
+                    return 1;
 
                 // Get requested jobs regex
-                Console.WriteLine("Jobs to run: ");
+                Console.WriteLine("Jobs to run:");
                 Regex jobRegex = new Regex(Console.ReadLine(), RegexOptions.IgnoreCase);
 
-                // Create submission
+                // Create a submission
                 Object[] existingSubmissions = script.GetSubmissionByName(submissionName);
                 if (existingSubmissions.Length > 0)
                 {
@@ -156,83 +207,126 @@
                         submissionName);
                     script.DeleteSubmission(((ISubmission)existingSubmissions[0]).Id);
                 }
-                Console.WriteLine("Creating submission '{0}'", submissionName);
-                ISubmission submission = script.CreateHardwareSubmission(submissionName,
-                    newPool.ResourcePoolId, device.InstanceId);
+                string hardwareId = device.InstanceId.Remove(device.InstanceId.LastIndexOf("\\"));
+                Console.WriteLine("Creating submission '{0}' (hardware ID: {1})", submissionName, hardwareId);
+                ISubmission submission = script.CreateHardwareSubmission(submissionName, newPool.ResourcePoolId, hardwareId);
 
-                // Get DeviceData objects from the user
+                // Set submission DeviceData
                 List<Object> deviceDataList = new List<Object>();
                 while (true)
                 {
                     ISubmissionDeviceData dd = script.CreateNewSubmissionDeviceData();
-                    Console.WriteLine("DeviceData name: ");
+                    Console.WriteLine("DeviceData name:");
                     dd.Name = Console.ReadLine();
                     if (dd.Name.Length == 0)
                         break;
-                    Console.WriteLine("DeviceData data: ");
+                    Console.WriteLine("DeviceData data:");
                     dd.Data = Console.ReadLine();
                     deviceDataList.Add(dd);
                 }
-
-                // Set the submission's DeviceData
                 submission.SetDeviceData(deviceDataList.ToArray());
 
-                // Get descriptors from the user
+                // Set submission descriptors
                 List<Object> descriptorList = new List<Object>();
                 while (true)
                 {
-                    Console.WriteLine("Descriptor path: ");
+                    Console.WriteLine("Descriptor path:");
                     string descriptorPath = Console.ReadLine();
                     if (descriptorPath.Length == 0)
                         break;
                     descriptorList.Add(script.GetDescriptorByPath(descriptorPath));
                 }
-
-                // Set the submission's descriptors
                 submission.SetLogoDescriptors(descriptorList.ToArray());
 
-                // Create a schedule
-                ISchedule schedule = script.CreateNewSchedule();
+                // Set machine dimensions
+                foreach (string machineName in machines)
+                {
+                    IResource machine = rootPool.GetResourceByName(machineName);
+                    while (true)
+                    {
+                        Console.WriteLine("Dimension name ({0}):", machineName);
+                        string dimName = Console.ReadLine();
+                        if (dimName.Length == 0)
+                            break;
+                        Console.WriteLine("Dimension value ({0}):", machineName);
+                        machine.SetDimension(dimName, Console.ReadLine());
+                    }
+                    // Set the WDKSubmissionId dimension for all machines
+                    machine.SetDimension("WDKSubmissionId", submission.Id.ToString() + "_" + submission.Name);
+                }
+
+                // Get job parameters
+                List<string> paramNames = new List<string>();
+                List<string> paramValues = new List<string>();
+                foreach (string machineName in machines)
+                {
+                    while (true)
+                    {
+                        Console.WriteLine("Parameter name ({0}):", machineName);
+                        string paramName = Console.ReadLine();
+                        if (paramName.Length == 0)
+                            break;
+                        Console.WriteLine("Device regex ({0}):", machineName);
+                        IDevice d = GetDevice(rootPool, machineName, Console.ReadLine());
+                        if (d == null)
+                            return 1;
+                        string deviceName = d.GetAttribute("name")[0].ToString();
+                        Console.WriteLine("Setting parameter value to '{0}'", deviceName);
+                        paramNames.Add(paramName);
+                        paramValues.Add(deviceName);
+                    }
+                }
+
+                // Find jobs that match the requested pattern
                 Console.WriteLine("Scheduling jobs:");
-                int jobCount = 0;
+                List<IJob> jobs = new List<IJob>();
                 foreach (IJob j in submission.GetJobs())
                 {
                     if (jobRegex.IsMatch(j.Name))
-                     {
-                        Console.WriteLine("  " + j.Name);
-                        schedule.AddDeviceJob(device, j);
-                        jobCount++;
+                    {
+                        Console.WriteLine("    " + j.Name);
+                        // Set job parameters
+                        for (int i = 0; i < paramNames.Count; i++)
+                        {
+                            IParameter p = j.GetParameterByName(paramNames[i]);
+                            if (p != null)
+                                p.ScheduleValue = paramValues[i];
+                        }
+                        jobs.Add(j);
                     }
                 }
-                if (jobCount == 0)
+                if (jobs.Count == 0)
                 {
                     Console.WriteLine("Error: no submission jobs match pattern '{0}'", jobRegex);
                     return 1;
                 }
+
+                // Create a schedule, add jobs to it and run it
+                ISchedule schedule = script.CreateNewSchedule();
+                foreach (IScheduleItem item in submission.ProcessJobs(jobs.ToArray()))
+                {
+                    item.Device = device;
+                    schedule.AddScheduleItem(item);
+                }
                 schedule.AddSubmission(submission);
                 schedule.SetResourcePool(newPool);
                 script.RunSchedule(schedule);
 
                 // Wait for jobs to complete
-                Console.WriteLine("Waiting for all jobs to complete (timeout={0})", timeout);
-                endTime = DateTime.Now.AddSeconds(timeout);
-                int numCompleted = 0, numFailed = 0;
-                while (numCompleted < submission.GetResults().Length && DateTime.Now < endTime)
+                Console.WriteLine("Waiting for all jobs to complete (timeout={0}s)", timeout);
+                DateTime endTime = DateTime.Now.AddSeconds(timeout);
+                int numCompleted, numFailed;
+                do
                 {
-                    // Sleep for 30 seconds
                     System.Threading.Thread.Sleep(30000);
-                    // Count completed submission jobs
-                    numCompleted = 0;
-                    foreach (IResult r in submission.GetResults())
-                        if (r.ResultStatus != "InProgress")
-                            numCompleted++;
-                    // Report results in a Python readable format and count failed schedule jobs
-                    // (submission jobs are a subset of schedule jobs)
+                    // Report results in a Python readable format and count completed and failed schedule jobs
+                    numCompleted = numFailed = 0;
                     Console.WriteLine();
                     Console.WriteLine("---- [");
-                    numFailed = 0;
                     foreach (IResult r in schedule.GetResults())
                     {
+                        if (r.ResultStatus != "InProgress") numCompleted++;
+                        if (r.ResultStatus == "Investigate") numFailed++;
                         Console.WriteLine("  {");
                         Console.WriteLine("    'id': {0}, 'job': r'''{1}''',", r.Job.Id, r.Job.Name);
                         Console.WriteLine("    'logs': r'''{0}''',", r.LogLocation);
@@ -243,10 +337,10 @@
                         Console.WriteLine("    'pass': {0}, 'fail': {1}, 'notrun': {2}, 'notapplicable': {3}",
                             r.Pass, r.Fail, r.NotRun, r.NotApplicable);
                         Console.WriteLine("  },");
-                        numFailed += r.Fail;
                     }
                     Console.WriteLine("] ----");
-                }
+                } while (numCompleted < schedule.GetResults().Length && DateTime.Now < endTime);
+
                 Console.WriteLine();
 
                 // Cancel incomplete jobs
@@ -254,26 +348,16 @@
                     if (r.ResultStatus == "InProgress")
                         r.Cancel();
 
-                // Set the machine's status to Unsafe and then Reset
-                try
-                {
-                    machine = rootPool.GetResourceByName(clientName);
-                    machine.ChangeResourceStatus("Unsafe");
-                    System.Threading.Thread.Sleep(5000);
-                    machine.ChangeResourceStatus("Reset");
-                }
-                catch (Exception e)
-                {
-                    Console.WriteLine("Warning: " + e.Message);
-                }
+                // Reset the machines
+                foreach (string machineName in machines)
+                    ResetMachine(rootPool, machineName, false);
 
                 // Report failures
-                if (numCompleted < submission.GetResults().Length)
+                if (numCompleted < schedule.GetResults().Length)
                     Console.WriteLine("Some jobs did not complete on time.");
                 if (numFailed > 0)
                     Console.WriteLine("Some jobs failed.");
-
-                if (numFailed > 0 || numCompleted < submission.GetResults().Length)
+                if (numFailed > 0 || numCompleted < schedule.GetResults().Length)
                     return 1;
 
                 Console.WriteLine("All jobs completed.");
diff --git a/client/tests/kvm/deps/whql_submission_15.exe b/client/tests/kvm/deps/whql_submission_15.exe
index 4f30aa8..605e2e3 100755
--- a/client/tests/kvm/deps/whql_submission_15.exe
+++ b/client/tests/kvm/deps/whql_submission_15.exe
Binary files differ
diff --git a/client/tests/kvm/get_started.py b/client/tests/kvm/get_started.py
index 6fa6b5f..5ce7349 100755
--- a/client/tests/kvm/get_started.py
+++ b/client/tests/kvm/get_started.py
@@ -5,10 +5,10 @@
 @copyright: Red Hat 2010
 """
 
-import os, sys, optparse, logging, shutil
+import os, sys, logging, shutil
 import common, kvm_utils
 from autotest_lib.client.common_lib import logging_manager
-from autotest_lib.client.bin import utils, os_dep
+from autotest_lib.client.bin import utils
 
 
 def check_iso(url, destination, hash):
@@ -82,11 +82,11 @@
     logging.info("3 - Verifying iso (make sure we have the OS ISO needed for "
                  "the default test set)")
 
-    iso_name = "Fedora-13-x86_64-DVD.iso"
-    fedora_dir = "pub/fedora/linux/releases/13/Fedora/x86_64/iso"
+    iso_name = "Fedora-14-x86_64-DVD.iso"
+    fedora_dir = "pub/fedora/linux/releases/14/Fedora/x86_64/iso"
     url = os.path.join("http://download.fedoraproject.org/", fedora_dir,
                        iso_name)
-    hash = "65c7f1aad3feb888ae3daadaf45d4a2a32b8773a"
+    hash = "38a4078011bac74493db7ecc53c9d9fbc96dbbd5"
     destination = os.path.join(base_dir, 'isos', 'linux')
     check_iso(url, destination, hash)
 
diff --git a/client/tests/kvm/html_report.py b/client/tests/kvm/html_report.py
index ebc9c12..8b4b109 100755
--- a/client/tests/kvm/html_report.py
+++ b/client/tests/kvm/html_report.py
@@ -11,7 +11,7 @@
 import common
 
 
-format_css="""
+format_css = """
 html,body {
     padding:0;
     color:#222;
@@ -180,7 +180,7 @@
 """
 
 
-table_js="""
+table_js = """
 /**
  * Copyright (c)2005-2007 Matt Kruse (javascripttoolbox.com)
  *
@@ -1380,11 +1380,11 @@
 ##  input and create a single html formatted result page.      ##
 #################################################################
 
-stimelist=[]
+stimelist = []
 
 
 def make_html_file(metadata, results, tag, host, output_file_name, dirname):
-    html_prefix="""
+    html_prefix = """
 <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
 <html>
 <head>
@@ -1405,7 +1405,7 @@
 </script>
 </head>
 <body>
-"""%(format_css, table_js, maketree_js)
+""" % (format_css, table_js, maketree_js)
 
 
     if output_file_name:
@@ -1427,13 +1427,13 @@
     total_failed = 0
     total_passed = 0
     for res in results:
-        total_executed+=1
+        total_executed += 1
         if res['status'] == 'GOOD':
-            total_passed+=1
+            total_passed += 1
         else:
-            total_failed+=1
+            total_failed += 1
     stat_str = 'No test cases executed'
-    if total_executed>0:
+    if total_executed > 0:
         failed_perct = int(float(total_failed)/float(total_executed)*100)
         stat_str = ('From %d tests executed, %d have passed (%d%% failures)' %
                     (total_executed, total_passed, failed_perct))
@@ -1486,15 +1486,15 @@
         if res['log']:
             #chop all '\n' from log text (to prevent html errors)
             rx1 = re.compile('(\s+)')
-            log_text = rx1.sub(' ',res['log'])
+            log_text = rx1.sub(' ', res['log'])
 
             # allow only a-zA-Z0-9_ in html title name
             # (due to bug in MS-explorer)
             rx2 = re.compile('([^a-zA-Z_0-9])')
-            updated_tag = rx2.sub('_',res['title'])
+            updated_tag = rx2.sub('_', res['title'])
 
-            html_body_text = '<html><head><title>%s</title></head><body>%s</body></html>'%(str(updated_tag),log_text)
-            print >> output, '<td align=\"left\"><A HREF=\"#\" onClick=\"popup(\'%s\',\'%s\')\">Info</A></td>'%(str(updated_tag),str(html_body_text))
+            html_body_text = '<html><head><title>%s</title></head><body>%s</body></html>' % (str(updated_tag), log_text)
+            print >> output, '<td align=\"left\"><A HREF=\"#\" onClick=\"popup(\'%s\',\'%s\')\">Info</A></td>' % (str(updated_tag), str(html_body_text))
         else:
             print >> output, '<td align=\"left\"></td>'
         # print execution time
@@ -1514,7 +1514,7 @@
     print >> output, '</p>'
 
     print >> output, '<ul class="mktree" id="meta_tree">'
-    counter=0
+    counter = 0
     keys = metadata.keys()
     keys.sort()
     for key in keys:
@@ -1528,7 +1528,7 @@
         output.close()
 
 
-def parse_result(dirname,line):
+def parse_result(dirname, line):
     parts = line.split()
     if len(parts) < 4:
         return None
@@ -1560,7 +1560,7 @@
         result['title'] = str(tag)
         result['status'] = parts[1]
         if result['status'] != 'GOOD':
-            result['log'] = get_exec_log(dirname,tag)
+            result['log'] = get_exec_log(dirname, tag)
         if len(stimelist)>0:
             pair = parts[4].split('=')
             etime = int(pair[1])
@@ -1572,10 +1572,10 @@
 
 
 def get_exec_log(resdir, tag):
-    stdout_file = os.path.join(resdir,tag) + '/debug/stdout'
-    stderr_file = os.path.join(resdir,tag) + '/debug/stderr'
-    status_file = os.path.join(resdir,tag) + '/status'
-    dmesg_file = os.path.join(resdir,tag) + '/sysinfo/dmesg'
+    stdout_file = os.path.join(resdir, tag) + '/debug/stdout'
+    stderr_file = os.path.join(resdir, tag) + '/debug/stderr'
+    status_file = os.path.join(resdir, tag) + '/status'
+    dmesg_file = os.path.join(resdir, tag) + '/sysinfo/dmesg'
     log = ''
     log += '<br><b>STDERR:</b><br>'
     log += get_info_file(stderr_file)
@@ -1589,20 +1589,20 @@
 
 
 def get_info_file(filename):
-    data=''
+    data = ''
     errors = re.compile(r"\b(error|fail|failed)\b", re.IGNORECASE)
     if os.path.isfile(filename):
         f = open('%s' % filename, "r")
-        lines=f.readlines()
+        lines = f.readlines()
         f.close()
         rx = re.compile('(\'|\")')
         for line in lines:
-            new_line = rx.sub('',line)
+            new_line = rx.sub('', line)
             errors_found = errors.findall(new_line)
-            if len(errors_found)>0:
-                data += '<font color=red>%s</font><br>'%str(new_line)
+            if len(errors_found) > 0:
+                data += '<font color=red>%s</font><br>' % str(new_line)
             else:
-                data += '%s<br>'%str(new_line)
+                data += '%s<br>' % str(new_line)
         if not data:
             data = 'No Information Found.<br>'
     else:
@@ -1687,12 +1687,12 @@
             status_file_name = dirname + '/status'
             sysinfo_dir = dirname + '/sysinfo'
             host = get_info_file('%s/hostname' % sysinfo_dir)
-            rx=re.compile('^\s+[END|START].*$')
+            rx = re.compile('^\s+[END|START].*$')
             # create the results set dict
-            results_data=[]
+            results_data = []
             if os.path.exists(status_file_name):
                 f = open(status_file_name, "r")
-                lines=f.readlines()
+                lines = f.readlines()
                 f.close()
                 for line in lines:
                     if rx.match(line):
diff --git a/client/tests/kvm/installer.py b/client/tests/kvm/installer.py
new file mode 100644
index 0000000..a757223
--- /dev/null
+++ b/client/tests/kvm/installer.py
@@ -0,0 +1,781 @@
+import os, logging, datetime, glob
+import shutil
+from autotest_lib.client.bin import utils, os_dep
+from autotest_lib.client.common_lib import error
+import kvm_utils
+
+
+def check_configure_options(script_path):
+    """
+    Return the list of available options (flags) of a given kvm configure build
+    script.
+
+    @param script: Path to the configure script
+    """
+    abspath = os.path.abspath(script_path)
+    help_raw = utils.system_output('%s --help' % abspath, ignore_status=True)
+    help_output = help_raw.split("\n")
+    option_list = []
+    for line in help_output:
+        cleaned_line = line.lstrip()
+        if cleaned_line.startswith("--"):
+            option = cleaned_line.split()[0]
+            option = option.split("=")[0]
+            option_list.append(option)
+
+    return option_list
+
+
+def kill_qemu_processes():
+    """
+    Kills all qemu processes, also kills all processes holding /dev/kvm down.
+    """
+    logging.debug("Killing any qemu processes that might be left behind")
+    utils.system("pkill qemu", ignore_status=True)
+    # Let's double check to see if some other process is holding /dev/kvm
+    if os.path.isfile("/dev/kvm"):
+        utils.system("fuser -k /dev/kvm", ignore_status=True)
+
+
+def cpu_vendor():
+    vendor = "intel"
+    if os.system("grep vmx /proc/cpuinfo 1>/dev/null") != 0:
+        vendor = "amd"
+    logging.debug("Detected CPU vendor as '%s'", vendor)
+    return vendor
+
+
+def _unload_kvm_modules(mod_list):
+    logging.info("Unloading previously loaded KVM modules")
+    for module in reversed(mod_list):
+        utils.unload_module(module)
+
+
+def _load_kvm_modules(mod_list, module_dir=None, load_stock=False):
+    """
+    Just load the KVM modules, without killing Qemu or unloading previous
+    modules.
+
+    Load modules present on any sub directory of module_dir. Function will walk
+    through module_dir until it finds the modules.
+
+    @param module_dir: Directory where the KVM modules are located.
+    @param load_stock: Whether we are going to load system kernel modules.
+    @param extra_modules: List of extra modules to load.
+    """
+    if module_dir:
+        logging.info("Loading the built KVM modules...")
+        kvm_module_path = None
+        kvm_vendor_module_path = None
+        abort = False
+
+        list_modules = ['%s.ko' % (m) for m in mod_list]
+
+        list_module_paths = []
+        for folder, subdirs, files in os.walk(module_dir):
+            for module in list_modules:
+                if module in files:
+                    module_path = os.path.join(folder, module)
+                    list_module_paths.append(module_path)
+
+        # We might need to arrange the modules in the correct order
+        # to avoid module load problems
+        list_modules_load = []
+        for module in list_modules:
+            for module_path in list_module_paths:
+                if os.path.basename(module_path) == module:
+                    list_modules_load.append(module_path)
+
+        if len(list_module_paths) != len(list_modules):
+            logging.error("KVM modules not found. If you don't want to use the "
+                          "modules built by this test, make sure the option "
+                          "load_modules: 'no' is marked on the test control "
+                          "file.")
+            raise error.TestError("The modules %s were requested to be loaded, "
+                                  "but the only modules found were %s" %
+                                  (list_modules, list_module_paths))
+
+        for module_path in list_modules_load:
+            try:
+                utils.system("insmod %s" % module_path)
+            except Exception, e:
+                raise error.TestFail("Failed to load KVM modules: %s" % e)
+
+    if load_stock:
+        logging.info("Loading current system KVM modules...")
+        for module in mod_list:
+            utils.system("modprobe %s" % module)
+
+
+def create_symlinks(test_bindir, prefix=None, bin_list=None, unittest=None):
+    """
+    Create symbolic links for the appropriate qemu and qemu-img commands on
+    the kvm test bindir.
+
+    @param test_bindir: KVM test bindir
+    @param prefix: KVM prefix path
+    @param bin_list: List of qemu binaries to link
+    @param unittest: Path to configuration file unittests.cfg
+    """
+    qemu_path = os.path.join(test_bindir, "qemu")
+    qemu_img_path = os.path.join(test_bindir, "qemu-img")
+    qemu_unittest_path = os.path.join(test_bindir, "unittests")
+    if os.path.lexists(qemu_path):
+        os.unlink(qemu_path)
+    if os.path.lexists(qemu_img_path):
+        os.unlink(qemu_img_path)
+    if unittest and os.path.lexists(qemu_unittest_path):
+        os.unlink(qemu_unittest_path)
+
+    logging.debug("Linking qemu binaries")
+
+    if bin_list:
+        for bin in bin_list:
+            if os.path.basename(bin) == 'qemu-kvm':
+                os.symlink(bin, qemu_path)
+            elif os.path.basename(bin) == 'qemu-img':
+                os.symlink(bin, qemu_img_path)
+
+    elif prefix:
+        kvm_qemu = os.path.join(prefix, "bin", "qemu-system-x86_64")
+        if not os.path.isfile(kvm_qemu):
+            raise error.TestError('Invalid qemu path')
+        kvm_qemu_img = os.path.join(prefix, "bin", "qemu-img")
+        if not os.path.isfile(kvm_qemu_img):
+            raise error.TestError('Invalid qemu-img path')
+        os.symlink(kvm_qemu, qemu_path)
+        os.symlink(kvm_qemu_img, qemu_img_path)
+
+    if unittest:
+        logging.debug("Linking unittest dir")
+        os.symlink(unittest, qemu_unittest_path)
+
+
+def install_roms(rom_dir, prefix):
+    logging.debug("Path to roms specified. Copying roms to install prefix")
+    rom_dst_dir = os.path.join(prefix, 'share', 'qemu')
+    for rom_src in glob.glob('%s/*.bin' % rom_dir):
+        rom_dst = os.path.join(rom_dst_dir, os.path.basename(rom_src))
+        logging.debug("Copying rom file %s to %s", rom_src, rom_dst)
+        shutil.copy(rom_src, rom_dst)
+
+
+def save_build(build_dir, dest_dir):
+    logging.debug('Saving the result of the build on %s', dest_dir)
+    base_name = os.path.basename(build_dir)
+    tarball_name = base_name + '.tar.bz2'
+    os.chdir(os.path.dirname(build_dir))
+    utils.system('tar -cjf %s %s' % (tarball_name, base_name))
+    shutil.move(tarball_name, os.path.join(dest_dir, tarball_name))
+
+
+class KvmInstallException(Exception):
+    pass
+
+
+class FailedKvmInstall(KvmInstallException):
+    pass
+
+
+class KvmNotInstalled(KvmInstallException):
+    pass
+
+
+class BaseInstaller(object):
+    # default value for load_stock argument
+    load_stock_modules = True
+    def __init__(self, mode=None):
+        self.install_mode = mode
+        self._full_module_list = None
+
+    def set_install_params(self, test, params):
+        self.params = params
+
+        load_modules = params.get('load_modules', 'no')
+        if not load_modules or load_modules == 'yes':
+            self.should_load_modules = True
+        elif load_modules == 'no':
+            self.should_load_modules = False
+        default_extra_modules = str(None)
+        self.extra_modules = eval(params.get("extra_modules",
+                                             default_extra_modules))
+
+        self.cpu_vendor = cpu_vendor()
+
+        self.srcdir = test.srcdir
+        if not os.path.isdir(self.srcdir):
+            os.makedirs(self.srcdir)
+
+        self.test_bindir = test.bindir
+        self.results_dir = test.resultsdir
+
+        # KVM build prefix, for the modes that do need it
+        prefix = os.path.join(test.bindir, 'build')
+        self.prefix = os.path.abspath(prefix)
+
+        # Current host kernel directory
+        default_host_kernel_source = '/lib/modules/%s/build' % os.uname()[2]
+        self.host_kernel_srcdir = params.get('host_kernel_source',
+                                             default_host_kernel_source)
+
+        # Extra parameters that can be passed to the configure script
+        self.extra_configure_options = params.get('extra_configure_options',
+                                                  None)
+
+        # Do we want to save the result of the build on test.resultsdir?
+        self.save_results = True
+        save_results = params.get('save_results', 'no')
+        if save_results == 'no':
+            self.save_results = False
+
+        self._full_module_list = list(self._module_list())
+
+
+    def full_module_list(self):
+        """Return the module list used by the installer
+
+        Used by the module_probe test, to avoid using utils.unload_module().
+        """
+        if self._full_module_list is None:
+            raise KvmNotInstalled("KVM modules not installed yet (installer: %s)" % (type(self)))
+        return self._full_module_list
+
+
+    def _module_list(self):
+        """Generate the list of modules that need to be loaded
+        """
+        yield 'kvm'
+        yield 'kvm-%s' % (self.cpu_vendor)
+        if self.extra_modules:
+            for module in self.extra_modules:
+                yield module
+
+
+    def _load_modules(self, mod_list):
+        """
+        Load the KVM modules
+
+        May be overridden by subclasses.
+        """
+        _load_kvm_modules(mod_list, load_stock=self.load_stock_modules)
+
+
+    def load_modules(self, mod_list=None):
+        if mod_list is None:
+            mod_list = self.full_module_list()
+        self._load_modules(mod_list)
+
+
+    def _unload_modules(self, mod_list=None):
+        """
+        Just unload the KVM modules, without trying to kill Qemu
+        """
+        if mod_list is None:
+            mod_list = self.full_module_list()
+        _unload_kvm_modules(mod_list)
+
+
+    def unload_modules(self, mod_list=None):
+        """
+        Kill Qemu and unload the KVM modules
+        """
+        kill_qemu_processes()
+        self._unload_modules(mod_list)
+
+
+    def reload_modules(self):
+        """
+        Reload the KVM modules after killing Qemu and unloading the current modules
+        """
+        self.unload_modules()
+        self.load_modules()
+
+
+    def reload_modules_if_needed(self):
+        if self.should_load_modules:
+            self.reload_modules()
+
+
+class YumInstaller(BaseInstaller):
+    """
+    Class that uses yum to install and remove packages.
+    """
+    load_stock_modules = True
+    def set_install_params(self, test, params):
+        super(YumInstaller, self).set_install_params(test, params)
+        # Checking if all required dependencies are available
+        os_dep.command("rpm")
+        os_dep.command("yum")
+
+        default_pkg_list = str(['qemu-kvm', 'qemu-kvm-tools'])
+        default_qemu_bin_paths = str(['/usr/bin/qemu-kvm', '/usr/bin/qemu-img'])
+        default_pkg_path_list = str(None)
+        self.pkg_list = eval(params.get("pkg_list", default_pkg_list))
+        self.pkg_path_list = eval(params.get("pkg_path_list",
+                                             default_pkg_path_list))
+        self.qemu_bin_paths = eval(params.get("qemu_bin_paths",
+                                              default_qemu_bin_paths))
+
+
+    def _clean_previous_installs(self):
+        kill_qemu_processes()
+        removable_packages = ""
+        for pkg in self.pkg_list:
+            removable_packages += " %s" % pkg
+
+        utils.system("yum remove -y %s" % removable_packages)
+
+
+    def _get_packages(self):
+        for pkg in self.pkg_path_list:
+            utils.get_file(pkg, os.path.join(self.srcdir,
+                                             os.path.basename(pkg)))
+
+
+    def _install_packages(self):
+        """
+        Install all downloaded packages.
+        """
+        os.chdir(self.srcdir)
+        utils.system("yum install --nogpgcheck -y *.rpm")
+
+
+    def install(self):
+        self._clean_previous_installs()
+        self._get_packages()
+        self._install_packages()
+        create_symlinks(test_bindir=self.test_bindir,
+                        bin_list=self.qemu_bin_paths)
+        self.reload_modules_if_needed()
+        if self.save_results:
+            save_build(self.srcdir, self.results_dir)
+
+
+class KojiInstaller(YumInstaller):
+    """
+    Class that handles installing KVM from the fedora build service, koji.
+    It uses yum to install and remove packages.
+    """
+    load_stock_modules = True
+    def set_install_params(self, test, params):
+        """
+        Gets parameters and initializes the package downloader.
+
+        @param test: kvm test object
+        @param params: Dictionary with test arguments
+        """
+        super(KojiInstaller, self).set_install_params(test, params)
+        default_koji_cmd = '/usr/bin/koji'
+        default_src_pkg = 'qemu'
+        self.src_pkg = params.get("src_pkg", default_src_pkg)
+        self.tag = params.get("koji_tag", None)
+        self.build = params.get("koji_build", None)
+        self.koji_cmd = params.get("koji_cmd", default_koji_cmd)
+
+
+    def _get_packages(self):
+        """
+        Downloads the specific arch RPMs for the specific build name.
+        """
+        downloader = kvm_utils.KojiDownloader(cmd=self.koji_cmd)
+        downloader.get(src_package=self.src_pkg, tag=self.tag,
+                            build=self.build, dst_dir=self.srcdir)
+
+
+    def install(self):
+        super(KojiInstaller, self)._clean_previous_installs()
+        self._get_packages()
+        super(KojiInstaller, self)._install_packages()
+        create_symlinks(test_bindir=self.test_bindir,
+                        bin_list=self.qemu_bin_paths)
+        self.reload_modules_if_needed()
+        if self.save_results:
+            save_build(self.srcdir, self.results_dir)
+
+
+class SourceDirInstaller(BaseInstaller):
+    """
+    Class that handles building/installing KVM directly from a tarball or
+    a single source code dir.
+    """
+    def set_install_params(self, test, params):
+        """
+        Initializes class attributes, and retrieves KVM code.
+
+        @param test: kvm test object
+        @param params: Dictionary with test arguments
+        """
+        super(SourceDirInstaller, self).set_install_params(test, params)
+
+        self.mod_install_dir = os.path.join(self.prefix, 'modules')
+        self.installed_kmods = False  # it will be set to True in case we
+                                      # installed our own modules
+
+        srcdir = params.get("srcdir", None)
+        self.path_to_roms = params.get("path_to_rom_images", None)
+
+        if self.install_mode == 'localsrc':
+            if srcdir is None:
+                raise error.TestError("Install from source directory specified"
+                                      "but no source directory provided on the"
+                                      "control file.")
+            else:
+                shutil.copytree(srcdir, self.srcdir)
+
+        if self.install_mode == 'release':
+            release_tag = params.get("release_tag")
+            release_dir = params.get("release_dir")
+            release_listing = params.get("release_listing")
+            logging.info("Installing KVM from release tarball")
+            if not release_tag:
+                release_tag = kvm_utils.get_latest_kvm_release_tag(
+                                                                release_listing)
+            tarball = os.path.join(release_dir, 'kvm', release_tag,
+                                   "kvm-%s.tar.gz" % release_tag)
+            logging.info("Retrieving release kvm-%s" % release_tag)
+            tarball = utils.unmap_url("/", tarball, "/tmp")
+
+        elif self.install_mode == 'snapshot':
+            logging.info("Installing KVM from snapshot")
+            snapshot_dir = params.get("snapshot_dir")
+            if not snapshot_dir:
+                raise error.TestError("Snapshot dir not provided")
+            snapshot_date = params.get("snapshot_date")
+            if not snapshot_date:
+                # Take yesterday's snapshot
+                d = (datetime.date.today() -
+                     datetime.timedelta(1)).strftime("%Y%m%d")
+            else:
+                d = snapshot_date
+            tarball = os.path.join(snapshot_dir, "kvm-snapshot-%s.tar.gz" % d)
+            logging.info("Retrieving kvm-snapshot-%s" % d)
+            tarball = utils.unmap_url("/", tarball, "/tmp")
+
+        elif self.install_mode == 'localtar':
+            tarball = params.get("tarball")
+            if not tarball:
+                raise error.TestError("KVM Tarball install specified but no"
+                                      " tarball provided on control file.")
+            logging.info("Installing KVM from a local tarball")
+            logging.info("Using tarball %s")
+            tarball = utils.unmap_url("/", params.get("tarball"), "/tmp")
+
+        if self.install_mode in ['release', 'snapshot', 'localtar']:
+            utils.extract_tarball_to_dir(tarball, self.srcdir)
+
+        if self.install_mode in ['release', 'snapshot', 'localtar', 'srcdir']:
+            self.repo_type = kvm_utils.check_kvm_source_dir(self.srcdir)
+            configure_script = os.path.join(self.srcdir, 'configure')
+            self.configure_options = check_configure_options(configure_script)
+
+
+    def _build(self):
+        make_jobs = utils.count_cpus()
+        os.chdir(self.srcdir)
+        # For testing purposes, it's better to build qemu binaries with
+        # debugging symbols, so we can extract more meaningful stack traces.
+        cfg = "./configure --prefix=%s" % self.prefix
+        if "--disable-strip" in self.configure_options:
+            cfg += " --disable-strip"
+        steps = [cfg, "make clean", "make -j %s" % make_jobs]
+        logging.info("Building KVM")
+        for step in steps:
+            utils.system(step)
+
+
+    def _install_kmods_old_userspace(self, userspace_path):
+        """
+        Run the module install command.
+
+        This is for the "old userspace" code, that contained a 'kernel' subdirectory
+        with the kmod build code.
+
+        The code would be much simpler if we could specify the module install
+        path as parameter to the toplevel Makefile. As we can't do that and
+        the module install code doesn't use --prefix, we have to call
+        'make -C kernel install' directly, setting the module directory
+        parameters.
+
+        If the userspace tree doens't have a 'kernel' subdirectory, the
+        module install step will be skipped.
+
+        @param userspace_path: the path the kvm-userspace directory
+        """
+        kdir = os.path.join(userspace_path, 'kernel')
+        if os.path.isdir(kdir):
+            os.chdir(kdir)
+            # INSTALLDIR is the target dir for the modules
+            # ORIGMODDIR is the dir where the old modules will be removed. we
+            #            don't want to mess with the system modules, so set it
+            #            to a non-existing directory
+            utils.system('make install INSTALLDIR=%s ORIGMODDIR=/tmp/no-old-modules' % (self.mod_install_dir))
+            self.installed_kmods = True
+
+
+    def _install_kmods(self, kmod_path):
+        """Run the module install command for the kmod-kvm repository
+
+        @param kmod_path: the path to the kmod-kvm.git working copy
+        """
+        os.chdir(kmod_path)
+        utils.system('make modules_install DESTDIR=%s' % (self.mod_install_dir))
+        self.installed_kmods = True
+
+
+    def _install(self):
+        os.chdir(self.srcdir)
+        logging.info("Installing KVM userspace")
+        if self.repo_type == 1:
+            utils.system("make -C qemu install")
+            self._install_kmods_old_userspace(self.srcdir)
+        elif self.repo_type == 2:
+            utils.system("make install")
+        if self.path_to_roms:
+            install_roms(self.path_to_roms, self.prefix)
+        create_symlinks(self.test_bindir, self.prefix)
+
+
+    def _load_modules(self, mod_list):
+        # load the installed KVM modules in case we installed them
+        # ourselves. Otherwise, just load the system modules.
+        if self.installed_kmods:
+            logging.info("Loading installed KVM modules")
+            _load_kvm_modules(mod_list, module_dir=self.mod_install_dir)
+        else:
+            logging.info("Loading stock KVM modules")
+            _load_kvm_modules(mod_list, load_stock=True)
+
+
+    def install(self):
+        self._build()
+        self._install()
+        self.reload_modules_if_needed()
+        if self.save_results:
+            save_build(self.srcdir, self.results_dir)
+
+
+class GitInstaller(SourceDirInstaller):
+    def _pull_code(self):
+        """
+        Retrieves code from git repositories.
+        """
+        params = self.params
+
+        kernel_repo = params.get("git_repo")
+        user_repo = params.get("user_git_repo")
+        kmod_repo = params.get("kmod_repo")
+        test_repo = params.get("test_git_repo")
+
+        kernel_branch = params.get("kernel_branch", "master")
+        user_branch = params.get("user_branch", "master")
+        kmod_branch = params.get("kmod_branch", "master")
+        test_branch = params.get("test_branch", "master")
+
+        kernel_lbranch = params.get("kernel_lbranch", "master")
+        user_lbranch = params.get("user_lbranch", "master")
+        kmod_lbranch = params.get("kmod_lbranch", "master")
+        test_lbranch = params.get("test_lbranch", "master")
+
+        kernel_commit = params.get("kernel_commit", None)
+        user_commit = params.get("user_commit", None)
+        kmod_commit = params.get("kmod_commit", None)
+        test_commit = params.get("test_commit", None)
+
+        kernel_patches = eval(params.get("kernel_patches", "[]"))
+        user_patches = eval(params.get("user_patches", "[]"))
+        kmod_patches = eval(params.get("user_patches", "[]"))
+
+        if not user_repo:
+            message = "KVM user git repository path not specified"
+            logging.error(message)
+            raise error.TestError(message)
+
+        userspace_srcdir = os.path.join(self.srcdir, "kvm_userspace")
+        kvm_utils.get_git_branch(user_repo, user_branch, userspace_srcdir,
+                                 user_commit, user_lbranch)
+        self.userspace_srcdir = userspace_srcdir
+
+        if user_patches:
+            os.chdir(self.userspace_srcdir)
+            for patch in user_patches:
+                utils.get_file(patch, os.path.join(self.userspace_srcdir,
+                                                   os.path.basename(patch)))
+                utils.system('patch -p1 %s' % os.path.basename(patch))
+
+        if test_repo:
+            test_srcdir = os.path.join(self.srcdir, "kvm-unit-tests")
+            kvm_utils.get_git_branch(test_repo, test_branch, test_srcdir,
+                                     test_commit, test_lbranch)
+            unittest_cfg = os.path.join(test_srcdir, 'x86',
+                                        'unittests.cfg')
+            self.test_srcdir = test_srcdir
+        else:
+            unittest_cfg = os.path.join(userspace_srcdir, 'kvm', 'test', 'x86',
+                                        'unittests.cfg')
+
+        self.unittest_cfg = None
+        if os.path.isfile(unittest_cfg):
+            self.unittest_cfg = unittest_cfg
+
+        if kernel_repo:
+            kernel_srcdir = os.path.join(self.srcdir, "kvm")
+            kvm_utils.get_git_branch(kernel_repo, kernel_branch, kernel_srcdir,
+                                     kernel_commit, kernel_lbranch)
+            self.kernel_srcdir = kernel_srcdir
+            if kernel_patches:
+                os.chdir(self.kernel_srcdir)
+                for patch in kernel_patches:
+                    utils.get_file(patch, os.path.join(self.userspace_srcdir,
+                                                       os.path.basename(patch)))
+                    utils.system('patch -p1 %s' % os.path.basename(patch))
+        else:
+            self.kernel_srcdir = None
+
+        if kmod_repo:
+            kmod_srcdir = os.path.join (self.srcdir, "kvm_kmod")
+            kvm_utils.get_git_branch(kmod_repo, kmod_branch, kmod_srcdir,
+                                     kmod_commit, kmod_lbranch)
+            self.kmod_srcdir = kmod_srcdir
+            if kmod_patches:
+                os.chdir(self.kmod_srcdir)
+                for patch in kmod_patches:
+                    utils.get_file(patch, os.path.join(self.userspace_srcdir,
+                                                       os.path.basename(patch)))
+                    utils.system('patch -p1 %s' % os.path.basename(patch))
+        else:
+            self.kmod_srcdir = None
+
+        configure_script = os.path.join(self.userspace_srcdir, 'configure')
+        self.configure_options = check_configure_options(configure_script)
+
+
+    def _build(self):
+        make_jobs = utils.count_cpus()
+        cfg = './configure'
+        if self.kmod_srcdir:
+            logging.info('Building KVM modules')
+            os.chdir(self.kmod_srcdir)
+            module_build_steps = [cfg,
+                                  'make clean',
+                                  'make sync LINUX=%s' % self.kernel_srcdir,
+                                  'make']
+        elif self.kernel_srcdir:
+            logging.info('Building KVM modules')
+            os.chdir(self.userspace_srcdir)
+            cfg += ' --kerneldir=%s' % self.host_kernel_srcdir
+            module_build_steps = [cfg,
+                            'make clean',
+                            'make -C kernel LINUX=%s sync' % self.kernel_srcdir]
+        else:
+            module_build_steps = []
+
+        for step in module_build_steps:
+            utils.run(step)
+
+        logging.info('Building KVM userspace code')
+        os.chdir(self.userspace_srcdir)
+        cfg += ' --prefix=%s' % self.prefix
+        if "--disable-strip" in self.configure_options:
+            cfg += ' --disable-strip'
+        if self.extra_configure_options:
+            cfg += ' %s' % self.extra_configure_options
+        utils.system(cfg)
+        utils.system('make clean')
+        utils.system('make -j %s' % make_jobs)
+
+        self.unittest_prefix = None
+        if self.unittest_cfg:
+            os.chdir(os.path.dirname(os.path.dirname(self.unittest_cfg)))
+            utils.system('./configure --prefix=%s' % self.prefix)
+            utils.system('make')
+            utils.system('make install')
+            self.unittest_prefix = os.path.join(self.prefix, 'share', 'qemu',
+                                                'tests')
+
+
+    def _install(self):
+        if self.kernel_srcdir:
+            os.chdir(self.userspace_srcdir)
+            # the kernel module install with --prefix doesn't work, and DESTDIR
+            # wouldn't work for the userspace stuff, so we clear WANT_MODULE:
+            utils.system('make install WANT_MODULE=')
+            # and install the old-style-kmod modules manually:
+            self._install_kmods_old_userspace(self.userspace_srcdir)
+        elif self.kmod_srcdir:
+            # if we have a kmod repository, it is easier:
+            # 1) install userspace:
+            os.chdir(self.userspace_srcdir)
+            utils.system('make install')
+            # 2) install kmod:
+            self._install_kmods(self.kmod_srcdir)
+        else:
+            # if we don't have kmod sources, we just install
+            # userspace:
+            os.chdir(self.userspace_srcdir)
+            utils.system('make install')
+
+        if self.path_to_roms:
+            install_roms(self.path_to_roms, self.prefix)
+        create_symlinks(test_bindir=self.test_bindir, prefix=self.prefix,
+                        bin_list=None,
+                        unittest=self.unittest_prefix)
+
+
+    def install(self):
+        self._pull_code()
+        self._build()
+        self._install()
+        self.reload_modules_if_needed()
+        if self.save_results:
+            save_build(self.srcdir, self.results_dir)
+
+
+class PreInstalledKvm(BaseInstaller):
+    # load_modules() will use the stock modules:
+    load_stock_modules = True
+    def install(self):
+        logging.info("Expecting KVM to be already installed. Doing nothing")
+
+
+class FailedInstaller:
+    """
+    Class used to be returned instead of the installer if a installation fails
+
+    Useful to make sure no installer object is used if KVM installation fails.
+    """
+    def __init__(self, msg="KVM install failed"):
+        self._msg = msg
+
+
+    def load_modules(self):
+        """Will refuse to load the KVM modules as install failed"""
+        raise FailedKvmInstall("KVM modules not available. reason: %s" % (self._msg))
+
+
+installer_classes = {
+    'localsrc': SourceDirInstaller,
+    'localtar': SourceDirInstaller,
+    'release': SourceDirInstaller,
+    'snapshot': SourceDirInstaller,
+    'git': GitInstaller,
+    'yum': YumInstaller,
+    'koji': KojiInstaller,
+    'preinstalled': PreInstalledKvm,
+}
+
+
+def _installer_class(install_mode):
+    c = installer_classes.get(install_mode)
+    if c is None:
+        raise error.TestError('Invalid or unsupported'
+                              ' install mode: %s' % install_mode)
+    return c
+
+
+def make_installer(params):
+    # priority:
+    # - 'install_mode' param
+    # - 'mode' param
+    mode = params.get("install_mode", params.get("mode"))
+    klass = _installer_class(mode)
+    return klass(mode)
diff --git a/client/tests/kvm/kvm.py b/client/tests/kvm/kvm.py
index f656238..b88fd51 100644
--- a/client/tests/kvm/kvm.py
+++ b/client/tests/kvm/kvm.py
@@ -1,4 +1,4 @@
-import sys, os, time, logging, imp
+import os, logging, imp
 from autotest_lib.client.bin import test
 from autotest_lib.client.common_lib import error
 import kvm_utils, kvm_preprocessing
@@ -21,9 +21,12 @@
             (Online doc - Getting started with KVM testing)
     """
     version = 1
-    env_version = 0
+    env_version = 1
 
     def run_once(self, params):
+        # Convert params to a Params object
+        params = kvm_utils.Params(params)
+
         # Report the parameters we've received and write them as keyvals
         logging.debug("Test parameters:")
         keys = params.keys()
@@ -40,8 +43,7 @@
         logging.info("Unpickling env. You may see some harmless error "
                      "messages.")
         env_filename = os.path.join(self.bindir, params.get("env", "env"))
-        env = kvm_utils.load_env(env_filename, self.env_version)
-        logging.debug("Contents of environment: %s", env)
+        env = kvm_utils.Env(env_filename, self.env_version)
 
         test_passed = False
 
@@ -66,13 +68,13 @@
                     try:
                         kvm_preprocessing.preprocess(self, params, env)
                     finally:
-                        kvm_utils.dump_env(env, env_filename)
+                        env.save()
                     # Run the test function
                     run_func = getattr(test_module, "run_%s" % t_type)
                     try:
                         run_func(self, params, env)
                     finally:
-                        kvm_utils.dump_env(env, env_filename)
+                        env.save()
                     test_passed = True
 
                 except Exception, e:
@@ -82,7 +84,7 @@
                         kvm_preprocessing.postprocess_on_error(
                             self, params, env)
                     finally:
-                        kvm_utils.dump_env(env, env_filename)
+                        env.save()
                     raise
 
             finally:
@@ -96,15 +98,14 @@
                         logging.error("Exception raised during "
                                       "postprocessing: %s", e)
                 finally:
-                    kvm_utils.dump_env(env, env_filename)
-                    logging.debug("Contents of environment: %s", env)
+                    env.save()
 
         except Exception, e:
             if params.get("abort_on_error") != "yes":
                 raise
             # Abort on error
             logging.info("Aborting job (%s)", e)
-            for vm in kvm_utils.env_get_all_vms(env):
+            for vm in env.get_all_vms():
                 if vm.is_dead():
                     continue
                 logging.info("VM '%s' is alive.", vm.name)
diff --git a/client/tests/kvm/kvm_config.py b/client/tests/kvm/kvm_config.py
index 4fc1029..13cdfe2 100755
--- a/client/tests/kvm/kvm_config.py
+++ b/client/tests/kvm/kvm_config.py
@@ -9,7 +9,7 @@
 import common
 import kvm_utils
 from autotest_lib.client.common_lib import error
-from autotest_lib.client.common_lib import logging_config, logging_manager
+from autotest_lib.client.common_lib import logging_manager
 
 
 class config:
@@ -32,7 +32,6 @@
         self.object_cache = []
         self.object_cache_indices = {}
         self.regex_cache = {}
-        self.filename = filename
         self.debug = debug
         if filename:
             self.parse_file(filename)
@@ -46,9 +45,8 @@
         """
         if not os.path.exists(filename):
             raise IOError("File %s not found" % filename)
-        self.filename = filename
         str = open(filename).read()
-        self.list = self.parse(configreader(str), self.list)
+        self.list = self.parse(configreader(filename, str), self.list)
 
 
     def parse_string(self, str):
@@ -57,7 +55,7 @@
 
         @param str: String to parse.
         """
-        self.list = self.parse(configreader(str), self.list)
+        self.list = self.parse(configreader('<string>', str, real_file=False), self.list)
 
 
     def fork_and_parse(self, filename=None, str=None):
@@ -297,7 +295,7 @@
                 # (inside an exception or inside subvariants)
                 if restricted:
                     e_msg = "Using variants in this context is not allowed"
-                    raise error.AutotestError(e_msg)
+                    cr.raise_error(e_msg)
                 if self.debug and not restricted:
                     _debug_print(indented_line,
                                  "Entering variants block (%d dicts in "
@@ -337,20 +335,21 @@
                     continue
                 if self.debug and not restricted:
                     _debug_print(indented_line, "Entering file %s" % words[1])
-                if self.filename:
-                    filename = os.path.join(os.path.dirname(self.filename),
-                                            words[1])
-                    if os.path.exists(filename):
-                        str = open(filename).read()
-                        list = self.parse(configreader(str), list, restricted)
-                        if self.debug and not restricted:
-                            _debug_print("", "Leaving file %s" % words[1])
-                    else:
-                        logging.warning("Cannot include %s -- file not found",
-                                        filename)
-                else:
-                    logging.warning("Cannot include %s because no file is "
-                                    "currently open", words[1])
+
+                cur_filename = cr.real_filename()
+                if cur_filename is None:
+                    cr.raise_error("'include' is valid only when parsing a file")
+
+                filename = os.path.join(os.path.dirname(cur_filename),
+                                        words[1])
+                if not os.path.exists(filename):
+                    cr.raise_error("Cannot include %s -- file not found" % (filename))
+
+                str = open(filename).read()
+                list = self.parse(configreader(filename, str), list, restricted)
+                if self.debug and not restricted:
+                    _debug_print("", "Leaving file %s" % words[1])
+
                 continue
 
             # Parse multi-line exceptions
@@ -539,15 +538,20 @@
     whose readline() and/or seek() methods seem to be slow.
     """
 
-    def __init__(self, str):
+    def __init__(self, filename, str, real_file=True):
         """
         Initialize the reader.
 
+        @param filename: the filename we're parsing
         @param str: The string to parse.
+        @param real_file: Indicates if filename represents a real file. Defaults to True.
         """
+        self.filename = filename
+        self.is_real_file = real_file
         self.line_index = 0
         self.lines = []
-        for line in str.splitlines():
+        self.real_number = []
+        for num, line in enumerate(str.splitlines()):
             line = line.rstrip().expandtabs()
             stripped_line = line.strip()
             indent = len(line) - len(stripped_line)
@@ -556,8 +560,17 @@
                 or stripped_line.startswith("//")):
                 continue
             self.lines.append((line, stripped_line, indent))
+            self.real_number.append(num + 1)
 
 
+    def real_filename(self):
+        """Returns the filename we're reading, in case it is a real file
+
+        @returns the filename we are parsing, or None in case we're not parsing a real file
+        """
+        if self.is_real_file:
+            return self.filename
+
     def get_next_line(self):
         """
         Get the next non-empty, non-comment line in the string.
@@ -588,6 +601,18 @@
         """
         self.line_index = index
 
+    def raise_error(self, msg):
+        """Raise an error related to the last line returned by get_next_line()
+        """
+        if self.line_index == 0: # nothing was read. shouldn't happen, but...
+            line_id = 'BEGIN'
+        elif self.line_index >= len(self.lines): # past EOF
+            line_id = 'EOF'
+        else:
+            # line_index is the _next_ line. get the previous one
+            line_id = str(self.real_number[self.line_index-1])
+        raise error.AutotestError("%s:%s: %s" % (self.filename, line_id, msg))
+
 
 # Array structure:
 # ----------------
@@ -682,18 +707,21 @@
     options, args = parser.parse_args()
     debug = options.debug
     if args:
-        filename = args[0]
+        filenames = args
     else:
-        filename = os.path.join(os.path.dirname(sys.argv[0]), "tests.cfg")
+        filenames = [os.path.join(os.path.dirname(sys.argv[0]), "tests.cfg")]
 
     # Here we configure the stand alone program to use the autotest
     # logging system.
     logging_manager.configure_logging(kvm_utils.KvmLoggingConfig(),
                                       verbose=debug)
-    dicts = config(filename, debug=debug).get_generator()
+    cfg = config(debug=debug)
+    for fn in filenames:
+        cfg.parse_file(fn)
+    dicts = cfg.get_generator()
     for i, dict in enumerate(dicts):
-        logging.info("Dictionary #%d:", i)
+        print "Dictionary #%d:" % (i)
         keys = dict.keys()
         keys.sort()
         for key in keys:
-            logging.info("    %s = %s", key, dict[key])
+            print "    %s = %s" % (key, dict[key])
diff --git a/client/tests/kvm/kvm_monitor.py b/client/tests/kvm/kvm_monitor.py
index 7e6b594..8cf2441 100644
--- a/client/tests/kvm/kvm_monitor.py
+++ b/client/tests/kvm/kvm_monitor.py
@@ -22,7 +22,13 @@
 
 
 class MonitorSocketError(MonitorError):
-    pass
+    def __init__(self, msg, e):
+        Exception.__init__(self, msg, e)
+        self.msg = msg
+        self.e = e
+
+    def __str__(self):
+        return "%s    (%s)" % (self.msg, self.e)
 
 
 class MonitorLockError(MonitorError):
@@ -45,8 +51,8 @@
         self.data = data
 
     def __str__(self):
-        return ("QMP command %r failed (arguments: %r, error message: %r)" %
-                (self.cmd, self.qmp_args, self.data))
+        return ("QMP command %r failed    (arguments: %r,    "
+                "error message: %r)" % (self.cmd, self.qmp_args, self.data))
 
 
 class Monitor:
@@ -119,15 +125,26 @@
         while self._data_available():
             try:
                 data = self._socket.recv(1024)
-            except socket.error, (errno, msg):
-                raise MonitorSocketError("Could not receive data from monitor "
-                                         "(%s)" % msg)
+            except socket.error, e:
+                raise MonitorSocketError("Could not receive data from monitor",
+                                         e)
             if not data:
                 break
             s += data
         return s
 
 
+    def is_responsive(self):
+        """
+        Return True iff the monitor is responsive.
+        """
+        try:
+            self.verify_responsive()
+            return True
+        except MonitorError:
+            return False
+
+
 class HumanMonitor(Monitor):
     """
     Wraps "human monitor" commands.
@@ -201,9 +218,9 @@
         try:
             try:
                 self._socket.sendall(cmd + "\n")
-            except socket.error, (errno, msg):
-                raise MonitorSocketError("Could not send monitor command '%s' "
-                                         "(%s)" % (cmd, msg))
+            except socket.error, e:
+                raise MonitorSocketError("Could not send monitor command %r" %
+                                         cmd, e)
 
         finally:
             self._lock.release()
@@ -248,17 +265,11 @@
             self._lock.release()
 
 
-    def is_responsive(self):
+    def verify_responsive(self):
         """
         Make sure the monitor is responsive by sending a command.
-
-        @return: True if responsive, False otherwise
         """
-        try:
-            self.cmd("info status")
-            return True
-        except MonitorError:
-            return False
+        self.cmd("info status")
 
 
     # Command wrappers
@@ -309,7 +320,6 @@
         @param wait: If true, wait for completion
         @return: The command's output
         """
-        logging.debug("Migrating to: %s" % uri)
         cmd = "migrate"
         if not wait:
             cmd += " -d"
@@ -480,9 +490,8 @@
         """
         try:
             self._socket.sendall(data)
-        except socket.error, (errno, msg):
-            raise MonitorSocketError("Could not send data: %r (%s)" %
-                                     (data, msg))
+        except socket.error, e:
+            raise MonitorSocketError("Could not send data: %r" % data, e)
 
 
     def _get_response(self, id=None, timeout=20):
@@ -615,17 +624,11 @@
         return self.cmd_obj(self._build_cmd(cmd, args, id), timeout)
 
 
-    def is_responsive(self):
+    def verify_responsive(self):
         """
         Make sure the monitor is responsive by sending a command.
-
-        @return: True if responsive, False otherwise
         """
-        try:
-            self.cmd("query-status")
-            return True
-        except MonitorError:
-            return False
+        self.cmd("query-status")
 
 
     def get_events(self):
diff --git a/client/tests/kvm/kvm_preprocessing.py b/client/tests/kvm/kvm_preprocessing.py
index 1ddf99b..9c53f02 100644
--- a/client/tests/kvm/kvm_preprocessing.py
+++ b/client/tests/kvm/kvm_preprocessing.py
@@ -1,7 +1,7 @@
-import sys, os, time, commands, re, logging, signal, glob, threading, shutil
-from autotest_lib.client.bin import test, utils
+import os, time, commands, re, logging, glob, threading, shutil
+from autotest_lib.client.bin import utils
 from autotest_lib.client.common_lib import error
-import kvm_vm, kvm_utils, kvm_subprocess, kvm_monitor, ppm_utils
+import kvm_vm, kvm_utils, kvm_subprocess, kvm_monitor, ppm_utils, test_setup
 try:
     import PIL.Image
 except ImportError:
@@ -50,19 +50,20 @@
     @param name: The name of the VM object.
     """
     logging.debug("Preprocessing VM '%s'..." % name)
-    vm = kvm_utils.env_get_vm(env, name)
-    if vm:
-        logging.debug("VM object found in environment")
-    else:
+    vm = env.get_vm(name)
+    if not vm:
         logging.debug("VM object does not exist; creating it")
         vm = kvm_vm.VM(name, params, test.bindir, env.get("address_cache"))
-        kvm_utils.env_register_vm(env, name, vm)
+        env.register_vm(name, vm)
 
     start_vm = False
 
     if params.get("restart_vm") == "yes":
         logging.debug("'restart_vm' specified; (re)starting VM...")
         start_vm = True
+    elif params.get("migration_mode"):
+        logging.debug("Starting VM in incoming migration mode...")
+        start_vm = True
     elif params.get("start_vm") == "yes":
         if not vm.is_alive():
             logging.debug("VM is not alive; starting it...")
@@ -75,8 +76,8 @@
 
     if start_vm:
         # Start the VM (or restart it if it's already up)
-        if not vm.create(name, params, test.bindir):
-            raise error.TestError("Could not start VM")
+        vm.create(name, params, test.bindir,
+                  migration_mode=params.get("migration_mode"))
     else:
         # Don't start the VM, just update its params
         vm.params = params
@@ -92,11 +93,12 @@
 def postprocess_image(test, params):
     """
     Postprocess a single QEMU image according to the instructions in params.
-    Currently this function just removes an image if requested.
 
     @param test: An Autotest test object.
     @param params: A dict containing image postprocessing parameters.
     """
+    if params.get("check_image") == "yes":
+        kvm_vm.check_image(params, test.bindir)
     if params.get("remove_image") == "yes":
         kvm_vm.remove_image(params, test.bindir)
 
@@ -112,11 +114,8 @@
     @param name: The name of the VM object.
     """
     logging.debug("Postprocessing VM '%s'..." % name)
-    vm = kvm_utils.env_get_vm(env, name)
-    if vm:
-        logging.debug("VM object found in environment")
-    else:
-        logging.debug("VM object does not exist in environment")
+    vm = env.get_vm(name)
+    if not vm:
         return
 
     scrdump_filename = os.path.join(test.debugdir, "post_%s.ppm" % name)
@@ -173,19 +172,18 @@
     @param vm_func: A function to call for each VM.
     """
     # Get list of VMs specified for this test
-    vm_names = kvm_utils.get_sub_dict_names(params, "vms")
-    for vm_name in vm_names:
-        vm_params = kvm_utils.get_sub_dict(params, vm_name)
+    for vm_name in params.objects("vms"):
+        vm_params = params.object_params(vm_name)
         # Get list of images specified for this VM
-        image_names = kvm_utils.get_sub_dict_names(vm_params, "images")
-        for image_name in image_names:
-            image_params = kvm_utils.get_sub_dict(vm_params, image_name)
+        for image_name in vm_params.objects("images"):
+            image_params = vm_params.object_params(image_name)
             # Call image_func for each image
             image_func(test, image_params)
         # Call vm_func for each vm
         vm_func(test, vm_params, env, vm_name)
 
 
+@error.context_aware
 def preprocess(test, params, env):
     """
     Preprocess all VMs and images according to the instructions in params.
@@ -195,6 +193,8 @@
     @param params: A dict containing all VM and image parameters.
     @param env: The environment (a dict-like object).
     """
+    error.context("preprocessing")
+
     # Start tcpdump if it isn't already running
     if "address_cache" not in env:
         env["address_cache"] = {}
@@ -204,7 +204,7 @@
     if "tcpdump" not in env and params.get("run_tcpdump", "yes") == "yes":
         cmd = "%s -npvi any 'dst port 68'" % kvm_utils.find_command("tcpdump")
         logging.debug("Starting tcpdump (%s)...", cmd)
-        env["tcpdump"] = kvm_subprocess.kvm_tail(
+        env["tcpdump"] = kvm_subprocess.Tail(
             command=cmd,
             output_func=_update_address_cache,
             output_params=(env["address_cache"],))
@@ -216,7 +216,7 @@
                 env["tcpdump"].get_output()))
 
     # Destroy and remove VMs that are no longer needed in the environment
-    requested_vms = kvm_utils.get_sub_dict_names(params, "vms")
+    requested_vms = params.objects("vms")
     for key in env.keys():
         vm = env[key]
         if not kvm_utils.is_vm(vm):
@@ -254,6 +254,18 @@
     logging.debug("KVM userspace version: %s" % kvm_userspace_version)
     test.write_test_keyval({"kvm_userspace_version": kvm_userspace_version})
 
+    if params.get("setup_hugepages") == "yes":
+        h = test_setup.HugePageConfig(params)
+        h.setup()
+
+    if params.get("type") == "unattended_install":
+        u = test_setup.UnattendedInstallConfig(test, params)
+        u.setup()
+
+    if params.get("type") == "enospc":
+        e = test_setup.EnospcConfig(test, params)
+        e.setup()
+
     # Execute any pre_commands
     if params.get("pre_command"):
         process_command(test, params, env, params.get("pre_command"),
@@ -273,6 +285,7 @@
         _screendump_thread.start()
 
 
+@error.context_aware
 def postprocess(test, params, env):
     """
     Postprocess all VMs and images according to the instructions in params.
@@ -281,6 +294,8 @@
     @param params: Dict containing all VM and image parameters.
     @param env: The environment (a dict-like object).
     """
+    error.context("postprocessing")
+
     # Postprocess all VMs and images
     process(test, params, env, postprocess_image, postprocess_vm)
 
@@ -291,7 +306,6 @@
         _screendump_thread_termination_event.set()
         _screendump_thread.join(10)
         _screendump_thread = None
-        _screendump_thread_termination_event = None
 
     # Warn about corrupt PPM files
     for f in glob.glob(os.path.join(test.debugdir, "*.ppm")):
@@ -330,23 +344,32 @@
     if params.get("kill_unresponsive_vms") == "yes":
         logging.debug("'kill_unresponsive_vms' specified; killing all VMs "
                       "that fail to respond to a remote login request...")
-        for vm in kvm_utils.env_get_all_vms(env):
+        for vm in env.get_all_vms():
             if vm.is_alive():
-                session = vm.remote_login()
-                if session:
+                try:
+                    session = vm.login()
                     session.close()
-                else:
+                except (kvm_utils.LoginError, kvm_vm.VMError), e:
+                    logging.warn(e)
                     vm.destroy(gracefully=False)
 
     # Kill all kvm_subprocess tail threads
     kvm_subprocess.kill_tail_threads()
 
     # Terminate tcpdump if no VMs are alive
-    living_vms = [vm for vm in kvm_utils.env_get_all_vms(env) if vm.is_alive()]
+    living_vms = [vm for vm in env.get_all_vms() if vm.is_alive()]
     if not living_vms and "tcpdump" in env:
         env["tcpdump"].close()
         del env["tcpdump"]
 
+    if params.get("setup_hugepages") == "yes":
+        h = kvm_utils.HugePageConfig(params)
+        h.cleanup()
+
+    if params.get("type") == "enospc":
+        e = test_setup.EnospcConfig(test, params)
+        e.cleanup()
+
     # Execute any post_commands
     if params.get("post_command"):
         process_command(test, params, env, params.get("post_command"),
@@ -362,7 +385,7 @@
     @param params: A dict containing all VM and image parameters.
     @param env: The environment (a dict-like object).
     """
-    params.update(kvm_utils.get_sub_dict(params, "on_error"))
+    params.update(params.object_params("on_error"))
 
 
 def _update_address_cache(address_cache, line):
@@ -374,9 +397,11 @@
         matches = re.findall(r"\w*:\w*:\w*:\w*:\w*:\w*", line)
         if matches and address_cache.get("last_seen"):
             mac_address = matches[0].lower()
-            logging.debug("(address cache) Adding cache entry: %s ---> %s",
-                          mac_address, address_cache.get("last_seen"))
+            if time.time() - address_cache.get("time_%s" % mac_address, 0) > 5:
+                logging.debug("(address cache) Adding cache entry: %s ---> %s",
+                              mac_address, address_cache.get("last_seen"))
             address_cache[mac_address] = address_cache.get("last_seen")
+            address_cache["time_%s" % mac_address] = time.time()
             del address_cache["last_seen"]
 
 
@@ -398,7 +423,7 @@
     cache = {}
 
     while True:
-        for vm in kvm_utils.env_get_all_vms(env):
+        for vm in env.get_all_vms():
             if not vm.is_alive():
                 continue
             try:
@@ -437,5 +462,6 @@
                     pass
             os.unlink(temp_filename)
         if _screendump_thread_termination_event.isSet():
+            _screendump_thread_termination_event = None
             break
         _screendump_thread_termination_event.wait(delay)
diff --git a/client/tests/kvm/kvm_scheduler.py b/client/tests/kvm/kvm_scheduler.py
index f1adb39..95282e4 100644
--- a/client/tests/kvm/kvm_scheduler.py
+++ b/client/tests/kvm/kvm_scheduler.py
@@ -74,13 +74,13 @@
             # The scheduler wants this worker to free its used resources
             elif cmd[0] == "cleanup":
                 env_filename = os.path.join(self.bindir, self_dict["env"])
-                env = kvm_utils.load_env(env_filename, {})
+                env = kvm_utils.Env(env_filename)
                 for obj in env.values():
                     if isinstance(obj, kvm_vm.VM):
                         obj.destroy()
-                    elif isinstance(obj, kvm_subprocess.kvm_spawn):
+                    elif isinstance(obj, kvm_subprocess.Spawn):
                         obj.close()
-                kvm_utils.dump_env(env, env_filename)
+                env.save()
                 w.write("cleanup_done\n")
                 w.write("ready\n")
 
diff --git a/client/tests/kvm/kvm_subprocess.py b/client/tests/kvm/kvm_subprocess.py
index 8321bb3..0b8734f 100755
--- a/client/tests/kvm/kvm_subprocess.py
+++ b/client/tests/kvm/kvm_subprocess.py
@@ -189,6 +189,89 @@
 import common, kvm_utils
 
 
+class ExpectError(Exception):
+    def __init__(self, patterns, output):
+        Exception.__init__(self, patterns, output)
+        self.patterns = patterns
+        self.output = output
+
+    def _pattern_str(self):
+        if len(self.patterns) == 1:
+            return "pattern %r" % self.patterns[0]
+        else:
+            return "patterns %r" % self.patterns
+
+    def __str__(self):
+        return ("Unknown error occurred while looking for %s    (output: %r)" %
+                (self._pattern_str(), self.output))
+
+
+class ExpectTimeoutError(ExpectError):
+    def __str__(self):
+        return ("Timeout expired while looking for %s    (output: %r)" %
+                (self._pattern_str(), self.output))
+
+
+class ExpectProcessTerminatedError(ExpectError):
+    def __init__(self, patterns, status, output):
+        ExpectError.__init__(self, patterns, output)
+        self.status = status
+
+    def __str__(self):
+        return ("Process terminated while looking for %s    "
+                "(status: %s,    output: %r)" % (self._pattern_str(),
+                                                 self.status, self.output))
+
+
+class ShellError(Exception):
+    def __init__(self, cmd, output):
+        Exception.__init__(self, cmd, output)
+        self.cmd = cmd
+        self.output = output
+
+    def __str__(self):
+        return ("Could not execute shell command %r    (output: %r)" %
+                (self.cmd, self.output))
+
+
+class ShellTimeoutError(ShellError):
+    def __str__(self):
+        return ("Timeout expired while waiting for shell command to "
+                "complete: %r    (output: %r)" % (self.cmd, self.output))
+
+
+class ShellProcessTerminatedError(ShellError):
+    # Raised when the shell process itself (e.g. ssh, netcat, telnet)
+    # terminates unexpectedly
+    def __init__(self, cmd, status, output):
+        ShellError.__init__(self, cmd, output)
+        self.status = status
+
+    def __str__(self):
+        return ("Shell process terminated while waiting for command to "
+                "complete: %r    (status: %s,    output: %r)" %
+                (self.cmd, self.status, self.output))
+
+
+class ShellCmdError(ShellError):
+    # Raised when a command executed in a shell terminates with a nonzero
+    # exit code (status)
+    def __init__(self, cmd, status, output):
+        ShellError.__init__(self, cmd, output)
+        self.status = status
+
+    def __str__(self):
+        return ("Shell command failed: %r    (status: %s,    output: %r)" %
+                (self.cmd, self.status, self.output))
+
+
+class ShellStatusError(ShellError):
+    # Raised when the command's exit status cannot be obtained
+    def __str__(self):
+        return ("Could not get exit status of command: %r    (output: %r)" %
+                (self.cmd, self.output))
+
+
 def run_bg(command, termination_func=None, output_func=None, output_prefix="",
            timeout=1.0):
     """
@@ -210,12 +293,12 @@
     @param timeout: Time duration (in seconds) to wait for the subprocess to
             terminate before returning
 
-    @return: A kvm_tail object.
+    @return: A Tail object.
     """
-    process = kvm_tail(command=command,
-                       termination_func=termination_func,
-                       output_func=output_func,
-                       output_prefix=output_prefix)
+    process = Tail(command=command,
+                   termination_func=termination_func,
+                   output_func=output_func,
+                   output_prefix=output_prefix)
 
     end_time = time.time() + timeout
     while time.time() < end_time and process.is_alive():
@@ -256,7 +339,7 @@
     return (status, output)
 
 
-class kvm_spawn:
+class Spawn:
     """
     This class is used for spawning and controlling a child process.
 
@@ -268,7 +351,7 @@
     The text file can be accessed at any time using get_output().
     In addition, the server opens as many pipes as requested by the client and
     writes the output to them.
-    The pipes are requested and accessed by classes derived from kvm_spawn.
+    The pipes are requested and accessed by classes derived from Spawn.
     These pipes are referred to as "readers".
     The server also receives input from the client and sends it to the child
     process.
@@ -552,7 +635,7 @@
 
 def kill_tail_threads():
     """
-    Kill all kvm_tail threads.
+    Kill all Tail threads.
 
     After calling this function no new threads should be started.
     """
@@ -564,12 +647,12 @@
     _thread_kill_requested = False
 
 
-class kvm_tail(kvm_spawn):
+class Tail(Spawn):
     """
     This class runs a child process in the background and sends its output in
     real time, line-by-line, to a callback function.
 
-    See kvm_spawn's docstring.
+    See Spawn's docstring.
 
     This class uses a single pipe reader to read data in real time from the
     child process and report it to a given callback function.
@@ -610,10 +693,10 @@
         """
         # Add a reader and a close hook
         self._add_reader("tail")
-        self._add_close_hook(kvm_tail._join_thread)
+        self._add_close_hook(Tail._join_thread)
 
         # Init the superclass
-        kvm_spawn.__init__(self, command, id, auto_close, echo, linesep)
+        Spawn.__init__(self, command, id, auto_close, echo, linesep)
 
         # Remember some attributes
         self.termination_func = termination_func
@@ -629,11 +712,11 @@
 
 
     def __getinitargs__(self):
-        return kvm_spawn.__getinitargs__(self) + (self.termination_func,
-                                                  self.termination_params,
-                                                  self.output_func,
-                                                  self.output_params,
-                                                  self.output_prefix)
+        return Spawn.__getinitargs__(self) + (self.termination_func,
+                                              self.termination_params,
+                                              self.output_func,
+                                              self.output_params,
+                                              self.output_prefix)
 
 
     def set_termination_func(self, termination_func):
@@ -765,15 +848,15 @@
             t.join()
 
 
-class kvm_expect(kvm_tail):
+class Expect(Tail):
     """
     This class runs a child process in the background and provides expect-like
     services.
 
-    It also provides all of kvm_tail's functionality.
+    It also provides all of Tail's functionality.
     """
 
-    def __init__(self, command=None, id=None, auto_close=False, echo=False,
+    def __init__(self, command=None, id=None, auto_close=True, echo=False,
                  linesep="\n", termination_func=None, termination_params=(),
                  output_func=None, output_params=(), output_prefix=""):
         """
@@ -806,13 +889,13 @@
         self._add_reader("expect")
 
         # Init the superclass
-        kvm_tail.__init__(self, command, id, auto_close, echo, linesep,
-                          termination_func, termination_params,
-                          output_func, output_params, output_prefix)
+        Tail.__init__(self, command, id, auto_close, echo, linesep,
+                      termination_func, termination_params,
+                      output_func, output_params, output_prefix)
 
 
     def __getinitargs__(self):
-        return kvm_tail.__getinitargs__(self)
+        return Tail.__getinitargs__(self)
 
 
     def read_nonblocking(self, timeout=None):
@@ -858,7 +941,7 @@
 
 
     def read_until_output_matches(self, patterns, filter=lambda x: x,
-                                  timeout=30.0, internal_timeout=None,
+                                  timeout=60, internal_timeout=None,
                                   print_func=None):
         """
         Read using read_nonblocking until a match is found using match_patterns,
@@ -876,13 +959,14 @@
         @param internal_timeout: The timeout to pass to read_nonblocking
         @param print_func: A function to be used to print the data being read
                 (should take a string parameter)
-        @return: Tuple containing the match index (or None if no match was
-                found) and the data read so far.
+        @return: Tuple containing the match index and the data read so far
+        @raise ExpectTimeoutError: Raised if timeout expires
+        @raise ExpectProcessTerminatedError: Raised if the child process
+                terminates while waiting for output
+        @raise ExpectError: Raised if an unknown error occurs
         """
-        match = None
-        data = ""
-
         fd = self._get_fd("expect")
+        o = ""
         end_time = time.time() + timeout
         while True:
             try:
@@ -890,41 +974,31 @@
                                         max(0, end_time - time.time()))
             except (select.error, TypeError):
                 break
-            if fd not in r:
-                break
+            if not r:
+                raise ExpectTimeoutError(patterns, o)
             # Read data from child
-            newdata = self.read_nonblocking(internal_timeout)
+            data = self.read_nonblocking(internal_timeout)
+            if not data:
+                break
             # Print it if necessary
-            if print_func and newdata:
-                str = newdata
-                if str.endswith("\n"):
-                    str = str[:-1]
-                for line in str.split("\n"):
+            if print_func:
+                for line in data.splitlines():
                     print_func(line)
-            data += newdata
-
-            done = False
             # Look for patterns
-            match = self.match_patterns(filter(data), patterns)
+            o += data
+            match = self.match_patterns(filter(o), patterns)
             if match is not None:
-                done = True
-            # Check if child has died
-            if not self.is_alive():
-                logging.debug("Process terminated with status %s" %
-                              self.get_status())
-                done = True
-            # Are we done?
-            if done: break
+                return match, o
 
-        # Print some debugging info
-        if match is None and (self.is_alive() or self.get_status() != 0):
-            logging.debug("Timeout elapsed or process terminated. Output:" +
-                          kvm_utils.format_str_for_message(data.strip()))
-
-        return (match, data)
+        # Check if the child has terminated
+        if kvm_utils.wait_for(lambda: not self.is_alive(), 5, 0, 0.1):
+            raise ExpectProcessTerminatedError(patterns, self.get_status(), o)
+        else:
+            # This shouldn't happen
+            raise ExpectError(patterns, o)
 
 
-    def read_until_last_word_matches(self, patterns, timeout=30.0,
+    def read_until_last_word_matches(self, patterns, timeout=60,
                                      internal_timeout=None, print_func=None):
         """
         Read using read_nonblocking until the last word of the output matches
@@ -936,8 +1010,11 @@
         @param internal_timeout: The timeout to pass to read_nonblocking
         @param print_func: A function to be used to print the data being read
                 (should take a string parameter)
-        @return: A tuple containing the match index (or None if no match was
-                found) and the data read so far.
+        @return: A tuple containing the match index and the data read so far
+        @raise ExpectTimeoutError: Raised if timeout expires
+        @raise ExpectProcessTerminatedError: Raised if the child process
+                terminates while waiting for output
+        @raise ExpectError: Raised if an unknown error occurs
         """
         def get_last_word(str):
             if str:
@@ -950,7 +1027,7 @@
                                               print_func)
 
 
-    def read_until_last_line_matches(self, patterns, timeout=30.0,
+    def read_until_last_line_matches(self, patterns, timeout=60,
                                      internal_timeout=None, print_func=None):
         """
         Read using read_nonblocking until the last non-empty line of the output
@@ -967,6 +1044,11 @@
         @param internal_timeout: The timeout to pass to read_nonblocking
         @param print_func: A function to be used to print the data being read
                 (should take a string parameter)
+        @return: A tuple containing the match index and the data read so far
+        @raise ExpectTimeoutError: Raised if timeout expires
+        @raise ExpectProcessTerminatedError: Raised if the child process
+                terminates while waiting for output
+        @raise ExpectError: Raised if an unknown error occurs
         """
         def get_last_nonempty_line(str):
             nonempty_lines = [l for l in str.splitlines() if l.strip()]
@@ -980,12 +1062,12 @@
                                               print_func)
 
 
-class kvm_shell_session(kvm_expect):
+class ShellSession(Expect):
     """
     This class runs a child process in the background.  It it suited for
     processes that provide an interactive shell, such as SSH and Telnet.
 
-    It provides all services of kvm_expect and kvm_tail.  In addition, it
+    It provides all services of Expect and Tail.  In addition, it
     provides command running services, and a utility function to test the
     process for responsiveness.
     """
@@ -1022,12 +1104,12 @@
         @param prompt: Regular expression describing the shell's prompt line.
         @param status_test_command: Command to be used for getting the last
                 exit status of commands run inside the shell (used by
-                get_command_status_output() and friends).
+                cmd_status_output() and friends).
         """
         # Init the superclass
-        kvm_expect.__init__(self, command, id, auto_close, echo, linesep,
-                            termination_func, termination_params,
-                            output_func, output_params, output_prefix)
+        Expect.__init__(self, command, id, auto_close, echo, linesep,
+                        termination_func, termination_params,
+                        output_func, output_params, output_prefix)
 
         # Remember some attributes
         self.prompt = prompt
@@ -1035,8 +1117,8 @@
 
 
     def __getinitargs__(self):
-        return kvm_expect.__getinitargs__(self) + (self.prompt,
-                                                   self.status_test_command)
+        return Expect.__getinitargs__(self) + (self.prompt,
+                                               self.status_test_command)
 
 
     def set_prompt(self, prompt):
@@ -1085,7 +1167,7 @@
         return False
 
 
-    def read_up_to_prompt(self, timeout=30.0, internal_timeout=None,
+    def read_up_to_prompt(self, timeout=60, internal_timeout=None,
                           print_func=None):
         """
         Read using read_nonblocking until the last non-empty line of the output
@@ -1101,31 +1183,34 @@
         @param print_func: A function to be used to print the data being
                 read (should take a string parameter)
 
-        @return: A tuple containing True/False indicating whether the prompt
-                was found, and the data read so far.
+        @return: The data read so far
+        @raise ExpectTimeoutError: Raised if timeout expires
+        @raise ExpectProcessTerminatedError: Raised if the shell process
+                terminates while waiting for output
+        @raise ExpectError: Raised if an unknown error occurs
         """
-        (match, output) = self.read_until_last_line_matches([self.prompt],
-                                                            timeout,
-                                                            internal_timeout,
-                                                            print_func)
-        return (match is not None, output)
+        m, o = self.read_until_last_line_matches([self.prompt], timeout,
+                                                 internal_timeout, print_func)
+        return o
 
 
-    def get_command_status_output(self, command, timeout=30.0,
-                                  internal_timeout=None, print_func=None):
+    def cmd_output(self, cmd, timeout=60, internal_timeout=None,
+                   print_func=None):
         """
-        Send a command and return its exit status and output.
+        Send a command and return its output.
 
-        @param command: Command to send (must not contain newline characters)
-        @param timeout: The duration (in seconds) to wait until a match is
-                found
+        @param cmd: Command to send (must not contain newline characters)
+        @param timeout: The duration (in seconds) to wait for the prompt to
+                return
         @param internal_timeout: The timeout to pass to read_nonblocking
         @param print_func: A function to be used to print the data being read
                 (should take a string parameter)
 
-        @return: A tuple (status, output) where status is the exit status or
-                None if no exit status is available (e.g. timeout elapsed), and
-                output is the output of command.
+        @return: The output of cmd
+        @raise ShellTimeoutError: Raised if timeout expires
+        @raise ShellProcessTerminatedError: Raised if the shell process
+                terminates while waiting for output
+        @raise ShellError: Raised if an unknown error occurs
         """
         def remove_command_echo(str, cmd):
             if str and str.splitlines()[0] == cmd:
@@ -1135,79 +1220,132 @@
         def remove_last_nonempty_line(str):
             return "".join(str.rstrip().splitlines(True)[:-1])
 
-        # Print some debugging info
-        logging.debug("Sending command: %s" % command)
-
-        # Read everything that's waiting to be read
+        logging.debug("Sending command: %s" % cmd)
         self.read_nonblocking(timeout=0)
+        self.sendline(cmd)
+        try:
+            o = self.read_up_to_prompt(timeout, internal_timeout, print_func)
+        except ExpectError, e:
+            o = remove_command_echo(e.output, cmd)
+            if isinstance(e, ExpectTimeoutError):
+                raise ShellTimeoutError(cmd, o)
+            elif isinstance(e, ExpectProcessTerminatedError):
+                raise ShellProcessTerminatedError(cmd, e.status, o)
+            else:
+                raise ShellError(cmd, o)
 
-        # Send the command and get its output
-        self.sendline(command)
-        (match, output) = self.read_up_to_prompt(timeout, internal_timeout,
-                                                 print_func)
-        # Remove the echoed command from the output
-        output = remove_command_echo(output, command)
-        # If the prompt was not found, return the output so far
-        if not match:
-            return (None, output)
-        # Remove the final shell prompt from the output
-        output = remove_last_nonempty_line(output)
+        # Remove the echoed command and the final shell prompt
+        return remove_last_nonempty_line(remove_command_echo(o, cmd))
 
-        # Send the 'echo ...' command to get the last exit status
-        self.sendline(self.status_test_command)
-        (match, status) = self.read_up_to_prompt(10.0, internal_timeout)
-        if not match:
-            return (None, output)
-        status = remove_command_echo(status, self.status_test_command)
-        status = remove_last_nonempty_line(status)
+
+    def cmd_status_output(self, cmd, timeout=60, internal_timeout=None,
+                          print_func=None):
+        """
+        Send a command and return its exit status and output.
+
+        @param cmd: Command to send (must not contain newline characters)
+        @param timeout: The duration (in seconds) to wait for the prompt to
+                return
+        @param internal_timeout: The timeout to pass to read_nonblocking
+        @param print_func: A function to be used to print the data being read
+                (should take a string parameter)
+
+        @return: A tuple (status, output) where status is the exit status and
+                output is the output of cmd
+        @raise ShellTimeoutError: Raised if timeout expires
+        @raise ShellProcessTerminatedError: Raised if the shell process
+                terminates while waiting for output
+        @raise ShellStatusError: Raised if the exit status cannot be obtained
+        @raise ShellError: Raised if an unknown error occurs
+        """
+        o = self.cmd_output(cmd, timeout, internal_timeout, print_func)
+        try:
+            # Send the 'echo $?' (or equivalent) command to get the exit status
+            s = self.cmd_output(self.status_test_command, 10, internal_timeout)
+        except ShellError:
+            raise ShellStatusError(cmd, o)
+
         # Get the first line consisting of digits only
-        digit_lines = [l for l in status.splitlines() if l.strip().isdigit()]
-        if not digit_lines:
-            return (None, output)
-        status = int(digit_lines[0].strip())
-
-        # Print some debugging info
-        if status != 0:
-            logging.debug("Command failed; status: %d, output:%s", status,
-                          kvm_utils.format_str_for_message(output.strip()))
-
-        return (status, output)
+        digit_lines = [l for l in s.splitlines() if l.strip().isdigit()]
+        if digit_lines:
+            return int(digit_lines[0].strip()), o
+        else:
+            raise ShellStatusError(cmd, o)
 
 
-    def get_command_status(self, command, timeout=30.0, internal_timeout=None,
-                           print_func=None):
+    def cmd_status(self, cmd, timeout=60, internal_timeout=None,
+                   print_func=None):
         """
         Send a command and return its exit status.
 
-        @param command: Command to send
-        @param timeout: The duration (in seconds) to wait until a match is
-                found
+        @param cmd: Command to send (must not contain newline characters)
+        @param timeout: The duration (in seconds) to wait for the prompt to
+                return
         @param internal_timeout: The timeout to pass to read_nonblocking
         @param print_func: A function to be used to print the data being read
                 (should take a string parameter)
 
-        @return: Exit status or None if no exit status is available (e.g.
-                timeout elapsed).
+        @return: The exit status of cmd
+        @raise ShellTimeoutError: Raised if timeout expires
+        @raise ShellProcessTerminatedError: Raised if the shell process
+                terminates while waiting for output
+        @raise ShellStatusError: Raised if the exit status cannot be obtained
+        @raise ShellError: Raised if an unknown error occurs
         """
-        (status, output) = self.get_command_status_output(command, timeout,
-                                                          internal_timeout,
-                                                          print_func)
-        return status
+        s, o = self.cmd_status_output(cmd, timeout, internal_timeout,
+                                      print_func)
+        return s
 
 
-    def get_command_output(self, command, timeout=30.0, internal_timeout=None,
+    def cmd(self, cmd, timeout=60, internal_timeout=None, print_func=None):
+        """
+        Send a command and return its output. If the command's exit status is
+        nonzero, raise an exception.
+
+        @param cmd: Command to send (must not contain newline characters)
+        @param timeout: The duration (in seconds) to wait for the prompt to
+                return
+        @param internal_timeout: The timeout to pass to read_nonblocking
+        @param print_func: A function to be used to print the data being read
+                (should take a string parameter)
+
+        @return: The output of cmd
+        @raise ShellTimeoutError: Raised if timeout expires
+        @raise ShellProcessTerminatedError: Raised if the shell process
+                terminates while waiting for output
+        @raise ShellError: Raised if the exit status cannot be obtained or if
+                an unknown error occurs
+        @raise ShellStatusError: Raised if the exit status cannot be obtained
+        @raise ShellError: Raised if an unknown error occurs
+        @raise ShellCmdError: Raised if the exit status is nonzero
+        """
+        s, o = self.cmd_status_output(cmd, timeout, internal_timeout,
+                                      print_func)
+        if s != 0:
+            raise ShellCmdError(cmd, s, o)
+        return o
+
+
+    def get_command_output(self, cmd, timeout=60, internal_timeout=None,
                            print_func=None):
         """
-        Send a command and return its output.
-
-        @param command: Command to send
-        @param timeout: The duration (in seconds) to wait until a match is
-                found
-        @param internal_timeout: The timeout to pass to read_nonblocking
-        @param print_func: A function to be used to print the data being read
-                (should take a string parameter)
+        Alias for cmd_output() for backward compatibility.
         """
-        (status, output) = self.get_command_status_output(command, timeout,
-                                                          internal_timeout,
-                                                          print_func)
-        return output
+        return self.cmd_output(cmd, timeout, internal_timeout, print_func)
+
+
+    def get_command_status_output(self, cmd, timeout=60, internal_timeout=None,
+                                  print_func=None):
+        """
+        Alias for cmd_status_output() for backward compatibility.
+        """
+        return self.cmd_status_output(cmd, timeout, internal_timeout,
+                                      print_func)
+
+
+    def get_command_status(self, cmd, timeout=60, internal_timeout=None,
+                           print_func=None):
+        """
+        Alias for cmd_status() for backward compatibility.
+        """
+        return self.cmd_status(cmd, timeout, internal_timeout, print_func)
diff --git a/client/tests/kvm/kvm_test_utils.py b/client/tests/kvm/kvm_test_utils.py
index 014f265..b5c4a24 100644
--- a/client/tests/kvm/kvm_test_utils.py
+++ b/client/tests/kvm/kvm_test_utils.py
@@ -21,7 +21,7 @@
 @copyright: 2008-2009 Red Hat Inc.
 """
 
-import time, os, logging, re, commands, signal
+import time, os, logging, re, signal
 from autotest_lib.client.common_lib import error
 from autotest_lib.client.bin import utils
 import kvm_utils, kvm_vm, kvm_subprocess, scan_results
@@ -35,7 +35,7 @@
     @param vm_name: Name of the desired VM object.
     @return: A VM object.
     """
-    vm = kvm_utils.env_get_vm(env, vm_name)
+    vm = env.get_vm(vm_name)
     if not vm:
         raise error.TestError("VM '%s' not found in environment" % vm_name)
     if not vm.is_alive():
@@ -44,21 +44,47 @@
     return vm
 
 
-def wait_for_login(vm, nic_index=0, timeout=240, start=0, step=2):
+def wait_for_login(vm, nic_index=0, timeout=240, start=0, step=2, serial=None):
     """
     Try logging into a VM repeatedly.  Stop on success or when timeout expires.
 
     @param vm: VM object.
     @param nic_index: Index of NIC to access in the VM.
     @param timeout: Time to wait before giving up.
+    @param serial: Whether to use a serial connection instead of a remote
+            (ssh, rss) one.
     @return: A shell session object.
     """
-    logging.info("Trying to log into guest '%s', timeout %ds", vm.name, timeout)
-    session = kvm_utils.wait_for(lambda: vm.remote_login(nic_index=nic_index),
-                                 timeout, start, step)
+    end_time = time.time() + timeout
+    session = None
+    if serial:
+        type = 'serial'
+        logging.info("Trying to log into guest %s using serial connection,"
+                     " timeout %ds", vm.name, timeout)
+        time.sleep(start)
+        while time.time() < end_time:
+            try:
+                session = vm.serial_login()
+                break
+            except kvm_utils.LoginError, e:
+                logging.debug(e)
+            time.sleep(step)
+    else:
+        type = 'remote'
+        logging.info("Trying to log into guest %s using remote connection,"
+                     " timeout %ds", vm.name, timeout)
+        time.sleep(start)
+        while time.time() < end_time:
+            try:
+                session = vm.login(nic_index=nic_index)
+                break
+            except (kvm_utils.LoginError, kvm_vm.VMError), e:
+                logging.debug(e)
+            time.sleep(step)
     if not session:
-        raise error.TestFail("Could not log into guest '%s'" % vm.name)
-    logging.info("Logged into guest '%s'" % vm.name)
+        raise error.TestFail("Could not log into guest %s using %s connection" %
+                             (vm.name, type))
+    logging.info("Logged into guest %s using %s connection", vm.name, type)
     return session
 
 
@@ -112,16 +138,14 @@
     # Try logging into the guest until timeout expires
     logging.info("Guest is down. Waiting for it to go up again, timeout %ds",
                  timeout)
-    session = kvm_utils.wait_for(lambda: vm.remote_login(nic_index=nic_index),
-                                 timeout, 0, 2)
-    if not session:
-        raise error.TestFail("Could not log into guest after reboot")
+    session = vm.wait_for_login(nic_index, timeout=timeout)
     logging.info("Guest is up again")
     return session
 
 
 def migrate(vm, env=None, mig_timeout=3600, mig_protocol="tcp",
-            mig_cancel=False):
+            mig_cancel=False, offline=False, stable_check=False,
+            clean=False, save_path=None, dest_host='localhost', mig_port=None):
     """
     Migrate a VM locally and re-register it in the environment.
 
@@ -131,7 +155,10 @@
     @param mig_timeout: timeout value for migration.
     @param mig_protocol: migration protocol
     @param mig_cancel: Test migrate_cancel or not when protocol is tcp.
-    @return: The post-migration VM.
+    @param dest_host: Destination host (defaults to 'localhost').
+    @param mig_port: Port that will be used for migration.
+    @return: The post-migration VM, in case of same host migration, True in
+            case of multi-host migration.
     """
     def mig_finished():
         o = vm.monitor.info("migrate")
@@ -169,38 +196,31 @@
             raise error.TestFail("Timeout expired while waiting for migration "
                                  "to finish")
 
-    dest_vm = vm.clone()
+    if dest_host == 'localhost':
+        dest_vm = vm.clone()
 
-    if mig_protocol == "exec":
-        # Exec is a little different from other migrate methods - first we
-        # ask the monitor the migration, then the vm state is dumped to a
-        # compressed file, then we start the dest vm with -incoming pointing
-        # to it
-        try:
-            exec_file = "/tmp/exec-%s.gz" % kvm_utils.generate_random_string(8)
-            exec_cmd = "gzip -c -d %s" % exec_file
-            uri = '"exec:gzip -c > %s"' % exec_file
-            vm.monitor.cmd("stop")
-            vm.monitor.migrate(uri)
-            wait_for_migration()
+    if (dest_host == 'localhost') and stable_check:
+        # Pause the dest vm after creation
+        dest_vm.params['extra_params'] = (dest_vm.params.get('extra_params','')
+                                          + ' -S')
 
-            if not dest_vm.create(migration_mode=mig_protocol,
-                                  migration_exec_cmd=exec_cmd, mac_source=vm):
-                raise error.TestError("Could not create dest VM")
-        finally:
-            logging.debug("Removing migration file %s", exec_file)
-            try:
-                os.remove(exec_file)
-            except OSError:
-                pass
-    else:
-        if not dest_vm.create(migration_mode=mig_protocol, mac_source=vm):
-            raise error.TestError("Could not create dest VM")
+    if dest_host == 'localhost':
+        dest_vm.create(migration_mode=mig_protocol, mac_source=vm)
+
+    try:
         try:
             if mig_protocol == "tcp":
-                uri = "tcp:localhost:%d" % dest_vm.migration_port
+                if dest_host == 'localhost':
+                    uri = "tcp:localhost:%d" % dest_vm.migration_port
+                else:
+                    uri = 'tcp:%s:%d' % (dest_host, mig_port)
             elif mig_protocol == "unix":
                 uri = "unix:%s" % dest_vm.migration_file
+            elif mig_protocol == "exec":
+                uri = '"exec:nc localhost %s"' % dest_vm.migration_port
+
+            if offline:
+                vm.monitor.cmd("stop")
             vm.monitor.migrate(uri)
 
             if mig_cancel:
@@ -210,14 +230,43 @@
                                           "Waiting for migration "
                                           "cancellation"):
                     raise error.TestFail("Failed to cancel migration")
-                dest_vm.destroy(gracefully=False)
+                if offline:
+                    vm.monitor.cmd("cont")
+                if dest_host == 'localhost':
+                    dest_vm.destroy(gracefully=False)
                 return vm
             else:
                 wait_for_migration()
+                if (dest_host == 'localhost') and stable_check:
+                    save_path = None or "/tmp"
+                    save1 = os.path.join(save_path, "src")
+                    save2 = os.path.join(save_path, "dst")
+
+                    vm.save_to_file(save1)
+                    dest_vm.save_to_file(save2)
+
+                    # Fail if we see deltas
+                    md5_save1 = utils.hash_file(save1)
+                    md5_save2 = utils.hash_file(save2)
+                    if md5_save1 != md5_save2:
+                        raise error.TestFail("Mismatch of VM state before "
+                                             "and after migration")
+
+                if (dest_host == 'localhost') and offline:
+                    dest_vm.monitor.cmd("cont")
         except:
-            dest_vm.destroy()
+            if dest_host == 'localhost':
+                dest_vm.destroy()
             raise
 
+    finally:
+        if (dest_host == 'localhost') and stable_check and clean:
+            logging.debug("Cleaning the state files")
+            if os.path.isfile(save1):
+                os.remove(save1)
+            if os.path.isfile(save2):
+                os.remove(save2)
+
     # Report migration status
     if mig_succeeded():
         logging.info("Migration finished successfully")
@@ -226,19 +275,23 @@
     else:
         raise error.TestFail("Migration ended with unknown status")
 
-    if "paused" in dest_vm.monitor.info("status"):
-        logging.debug("Destination VM is paused, resuming it...")
-        dest_vm.monitor.cmd("cont")
+    if dest_host == 'localhost':
+        if "paused" in dest_vm.monitor.info("status"):
+            logging.debug("Destination VM is paused, resuming it...")
+            dest_vm.monitor.cmd("cont")
 
     # Kill the source VM
     vm.destroy(gracefully=False)
 
     # Replace the source VM with the new cloned VM
-    if env is not None:
-        kvm_utils.env_register_vm(env, vm.name, dest_vm)
+    if (dest_host == 'localhost') and (env is not None):
+        env.register_vm(vm.name, dest_vm)
 
     # Return the new cloned VM
-    return dest_vm
+    if dest_host == 'localhost':
+        return dest_vm
+    else:
+        return vm
 
 
 def stop_windows_service(session, service, timeout=120):
@@ -252,7 +305,7 @@
     """
     end_time = time.time() + timeout
     while time.time() < end_time:
-        o = session.get_command_output("sc stop %s" % service, timeout=60)
+        o = session.cmd_output("sc stop %s" % service, timeout=60)
         # FAILED 1060 means the service isn't installed.
         # FAILED 1062 means the service hasn't been started.
         if re.search(r"\bFAILED (1060|1062)\b", o, re.I):
@@ -274,7 +327,7 @@
     """
     end_time = time.time() + timeout
     while time.time() < end_time:
-        o = session.get_command_output("sc start %s" % service, timeout=60)
+        o = session.cmd_output("sc start %s" % service, timeout=60)
         # FAILED 1060 means the service isn't installed.
         if re.search(r"\bFAILED 1060\b", o, re.I):
             raise error.TestError("Could not start service '%s' "
@@ -306,31 +359,26 @@
     """
     if len(re.findall("ntpdate|w32tm", time_command)) == 0:
         host_time = time.time()
-        session.sendline(time_command)
-        (match, s) = session.read_up_to_prompt()
-        if not match:
-            raise error.TestError("Could not get guest time")
+        s = session.cmd_output(time_command)
 
         try:
             s = re.findall(time_filter_re, s)[0]
         except IndexError:
-            logging.debug("The time string from guest is:\n%s" % s)
+            logging.debug("The time string from guest is:\n%s", s)
             raise error.TestError("The time string from guest is unexpected.")
         except Exception, e:
-            logging.debug("(time_filter_re, time_string): (%s, %s)" %
-                           (time_filter_re, s))
+            logging.debug("(time_filter_re, time_string): (%s, %s)",
+                          time_filter_re, s)
             raise e
 
         guest_time = time.mktime(time.strptime(s, time_format))
     else:
-        s , o = session.get_command_status_output(time_command)
-        if s != 0:
-            raise error.TestError("Could not get guest time")
+        o = session.cmd(time_command)
         if re.match('ntpdate', time_command):
-            offset = re.findall('offset (.*) sec',o)[0]
+            offset = re.findall('offset (.*) sec', o)[0]
             host_main, host_mantissa = re.findall(time_filter_re, o)[0]
-            host_time = time.mktime(time.strptime(host_main, time_format)) \
-                        + float("0.%s" % host_mantissa)
+            host_time = (time.mktime(time.strptime(host_main, time_format)) +
+                         float("0.%s" % host_mantissa))
             guest_time = host_time + float(offset)
         else:
             guest_time =  re.findall(time_filter_re, o)[0]
@@ -381,7 +429,7 @@
     return meminfo
 
 
-def run_autotest(vm, session, control_path, timeout, outputdir):
+def run_autotest(vm, session, control_path, timeout, outputdir, params):
     """
     Run an autotest control file inside a guest (linux only utility).
 
@@ -391,6 +439,9 @@
     @param timeout: Timeout under which the autotest control file must complete.
     @param outputdir: Path on host where we should copy the guest autotest
             results to.
+
+    The following params is used by the migration
+    @param params: Test params used in the migration test
     """
     def copy_if_hash_differs(vm, local_path, remote_path):
         """
@@ -400,10 +451,9 @@
         @param local_path: Local path.
         @param remote_path: Remote path.
         """
-        copy = False
         local_hash = utils.hash_file(local_path)
         basename = os.path.basename(local_path)
-        output = session.get_command_output("md5sum %s" % remote_path)
+        output = session.cmd_output("md5sum %s" % remote_path)
         if "such file" in output:
             remote_hash = "0"
         elif output:
@@ -414,14 +464,9 @@
             # Let's be a little more lenient here and see if it wasn't a
             # temporary problem
             remote_hash = "0"
-
         if remote_hash != local_hash:
             logging.debug("Copying %s to guest", basename)
-            copy = True
-
-        if copy:
-            if not vm.copy_files_to(local_path, remote_path):
-                raise error.TestFail("Could not copy %s to guest" % local_path)
+            vm.copy_files_to(local_path, remote_path)
 
 
     def extract(vm, remote_path, dest_dir="."):
@@ -435,10 +480,7 @@
         basename = os.path.basename(remote_path)
         logging.info("Extracting %s...", basename)
         e_cmd = "tar xjvf %s -C %s" % (remote_path, dest_dir)
-        s, o = session.get_command_status_output(e_cmd, timeout=120)
-        if s != 0:
-            logging.error("Uncompress output:\n%s", o)
-            raise error.TestFail("Failed to extract %s on guest" % basename)
+        session.cmd(e_cmd, timeout=120)
 
 
     def get_results():
@@ -449,9 +491,8 @@
         guest_results_dir = os.path.join(outputdir, "guest_autotest_results")
         if not os.path.exists(guest_results_dir):
             os.mkdir(guest_results_dir)
-        if not vm.copy_files_from("%s/results/default/*" % autotest_path,
-                                  guest_results_dir):
-            logging.error("Could not copy autotest results from guest")
+        vm.copy_files_from("%s/results/default/*" % autotest_path,
+                           guest_results_dir)
 
 
     def get_results_summary():
@@ -459,7 +500,7 @@
         Get the status of the tests that were executed on the host and close
         the session where autotest was being executed.
         """
-        output = session.get_command_output("cat results/*/status")
+        output = session.cmd_output("cat results/*/status")
         try:
             results = scan_results.parse_results(output)
             # Report test results
@@ -477,6 +518,11 @@
         raise error.TestError("Invalid path to autotest control file: %s" %
                               control_path)
 
+    migrate_background = params.get("migrate_background") == "yes"
+    if migrate_background:
+        mig_timeout = float(params.get("mig_timeout", "3600"))
+        mig_protocol = params.get("migration_protocol", "tcp")
+
     compressed_autotest_path = "/tmp/autotest.tar.bz2"
 
     # To avoid problems, let's make the test use the current AUTODIR
@@ -501,33 +547,56 @@
     # Extract autotest.tar.bz2
     extract(vm, compressed_autotest_path, "/")
 
-    if not vm.copy_files_to(control_path,
-                            os.path.join(autotest_path, 'control')):
-        raise error.TestFail("Could not copy the test control file to guest")
+    vm.copy_files_to(control_path, os.path.join(autotest_path, 'control'))
 
     # Run the test
     logging.info("Running autotest control file %s on guest, timeout %ss",
                  os.path.basename(control_path), timeout)
-    session.get_command_output("cd %s" % autotest_path)
-    session.get_command_output("rm -f control.state")
-    session.get_command_output("rm -rf results/*")
-    logging.info("---------------- Test output ----------------")
-    status = session.get_command_status("bin/autotest control",
-                                        timeout=timeout,
-                                        print_func=logging.info)
-    logging.info("------------- End of test output ------------")
-    if status is None:
-        if not vm.is_alive():
+    session.cmd("cd %s" % autotest_path)
+    try:
+        session.cmd("rm -f control.state")
+        session.cmd("rm -rf results/*")
+    except kvm_subprocess.ShellError:
+        pass
+    try:
+        bg = None
+        try:
+            logging.info("---------------- Test output ----------------")
+            if migrate_background:
+                mig_timeout = float(params.get("mig_timeout", "3600"))
+                mig_protocol = params.get("migration_protocol", "tcp")
+
+                bg = kvm_utils.Thread(session.cmd_output,
+                                      kwargs={'cmd': "bin/autotest control",
+                                              'timeout': timeout,
+                                              'print_func': logging.info})
+
+                bg.start()
+
+                while bg.is_alive():
+                    logging.info("Tests is not ended, start a round of"
+                                 "migration ...")
+                    vm.migrate(timeout=mig_timeout, protocol=mig_protocol)
+            else:
+                session.cmd_output("bin/autotest control", timeout=timeout,
+                                   print_func=logging.info)
+        finally:
+            logging.info("------------- End of test output ------------")
+            if migrate_background and bg:
+                bg.join()
+    except kvm_subprocess.ShellTimeoutError:
+        if vm.is_alive():
+            get_results()
+            get_results_summary()
+            raise error.TestError("Timeout elapsed while waiting for job to "
+                                  "complete")
+        else:
             raise error.TestError("Autotest job on guest failed "
                                   "(VM terminated during job)")
-        if not session.is_alive():
-            get_results()
-            raise error.TestError("Autotest job on guest failed "
-                                  "(Remote session terminated during job)")
+    except kvm_subprocess.ShellProcessTerminatedError:
         get_results()
-        get_results_summary()
-        raise error.TestError("Timeout elapsed while waiting for job to "
-                              "complete")
+        raise error.TestError("Autotest job on guest failed "
+                              "(Remote session terminated during job)")
 
     results = get_results_summary()
     get_results()
@@ -589,21 +658,25 @@
         process.close()
         return status, output
     else:
-        session.sendline(command)
-        status, output = session.read_up_to_prompt(timeout=timeout,
-                                                   print_func=output_func)
-        if not status:
+        output = ""
+        try:
+            output = session.cmd_output(command, timeout=timeout,
+                                        print_func=output_func)
+        except kvm_subprocess.ShellTimeoutError:
             # Send ctrl+c (SIGINT) through ssh session
             session.send("\003")
-            status, output2 = session.read_up_to_prompt(print_func=output_func)
-            output += output2
-            if not status:
+            try:
+                output2 = session.read_up_to_prompt(print_func=output_func)
+                output += output2
+            except kvm_subprocess.ExpectTimeoutError, e:
+                output += e.output
                 # We also need to use this session to query the return value
                 session.send("\003")
 
         session.sendline(session.status_test_command)
-        s2, o2 = session.read_up_to_prompt()
-        if not s2:
+        try:
+            o2 = session.read_up_to_prompt()
+        except kvm_subprocess.ExpectError:
             status = -1
         else:
             try:
@@ -670,7 +743,7 @@
     @mac_address: the macaddress of nic
     """
 
-    output = session.get_command_output("ifconfig -a")
+    output = session.cmd_output("ifconfig -a")
 
     try:
         ethname = re.findall("(\w+)\s+Link.*%s" % mac_address, output,
diff --git a/client/tests/kvm/kvm_utils.py b/client/tests/kvm/kvm_utils.py
index b849b37..44ebb88 100644
--- a/client/tests/kvm/kvm_utils.py
+++ b/client/tests/kvm/kvm_utils.py
@@ -5,7 +5,7 @@
 """
 
 import time, string, random, socket, os, signal, re, logging, commands, cPickle
-import fcntl, shelve, ConfigParser
+import fcntl, shelve, ConfigParser, rss_file_transfer, threading, sys, UserDict
 from autotest_lib.client.bin import utils, os_dep
 from autotest_lib.client.common_lib import error, logging_config
 import kvm_subprocess
@@ -27,74 +27,152 @@
     f.close()
 
 
-def dump_env(obj, filename):
+def is_vm(obj):
     """
-    Dump KVM test environment to a file.
+    Tests whether a given object is a VM object.
 
-    @param filename: Path to a file where the environment will be dumped to.
+    @param obj: Python object.
     """
-    file = open(filename, "w")
-    cPickle.dump(obj, file)
-    file.close()
+    return obj.__class__.__name__ == "VM"
 
 
-def load_env(filename, version):
+class Env(UserDict.IterableUserDict):
     """
-    Load KVM test environment from an env file.
-    If the version recorded in the file is lower than version, return an empty
-    env.  If some other error occurs during unpickling, return an empty env.
-
-    @param filename: Path to an env file.
+    A dict-like object containing global objects used by tests.
     """
-    default = {"version": version}
-    try:
-        file = open(filename, "r")
-        env = cPickle.load(file)
-        file.close()
-        if env.get("version", 0) < version:
-            logging.warn("Incompatible env file found. Not using it.")
-            return default
-        return env
-    # Almost any exception can be raised during unpickling, so let's catch
-    # them all
-    except Exception, e:
-        logging.warn(e)
-        return default
+    def __init__(self, filename=None, version=0):
+        """
+        Create an empty Env object or load an existing one from a file.
+
+        If the version recorded in the file is lower than version, or if some
+        error occurs during unpickling, or if filename is not supplied,
+        create an empty Env object.
+
+        @param filename: Path to an env file.
+        @param version: Required env version (int).
+        """
+        UserDict.IterableUserDict.__init__(self)
+        empty = {"version": version}
+        if filename:
+            self._filename = filename
+            try:
+                f = open(filename, "r")
+                env = cPickle.load(f)
+                f.close()
+                if env.get("version", 0) >= version:
+                    self.data = env
+                else:
+                    logging.warn("Incompatible env file found. Not using it.")
+                    self.data = empty
+            # Almost any exception can be raised during unpickling, so let's
+            # catch them all
+            except Exception, e:
+                logging.warn(e)
+                self.data = empty
+        else:
+            self.data = empty
 
 
-def get_sub_dict(dict, name):
+    def save(self, filename=None):
+        """
+        Pickle the contents of the Env object into a file.
+
+        @param filename: Filename to pickle the dict into.  If not supplied,
+                use the filename from which the dict was loaded.
+        """
+        filename = filename or self._filename
+        f = open(filename, "w")
+        cPickle.dump(self.data, f)
+        f.close()
+
+
+    def get_all_vms(self):
+        """
+        Return a list of all VM objects in this Env object.
+        """
+        return [o for o in self.values() if is_vm(o)]
+
+
+    def get_vm(self, name):
+        """
+        Return a VM object by its name.
+
+        @param name: VM name.
+        """
+        return self.get("vm__%s" % name)
+
+
+    def register_vm(self, name, vm):
+        """
+        Register a VM in this Env object.
+
+        @param name: VM name.
+        @param vm: VM object.
+        """
+        self["vm__%s" % name] = vm
+
+
+    def unregister_vm(self, name):
+        """
+        Remove a given VM.
+
+        @param name: VM name.
+        """
+        del self["vm__%s" % name]
+
+
+    def register_installer(self, installer):
+        """
+        Register a installer that was just run
+
+        The installer will be available for other tests, so that
+        information about the installed KVM modules and qemu-kvm can be used by
+        them.
+        """
+        self['last_installer'] = installer
+
+
+    def previous_installer(self):
+        """
+        Return the last installer that was registered
+        """
+        return self.get('last_installer')
+
+
+class Params(UserDict.IterableUserDict):
     """
-    Return a "sub-dict" corresponding to a specific object.
-
-    Operate on a copy of dict: for each key that ends with the suffix
-    "_" + name, strip the suffix from the key, and set the value of
-    the stripped key to that of the key. Return the resulting dict.
-
-    @param name: Suffix of the key we want to set the value.
+    A dict-like object passed to every test.
     """
-    suffix = "_" + name
-    new_dict = dict.copy()
-    for key in dict.keys():
-        if key.endswith(suffix):
-            new_key = key.split(suffix)[0]
-            new_dict[new_key] = dict[key]
-    return new_dict
+    def objects(self, key):
+        """
+        Return the names of objects defined using a given key.
+
+        @param key: The name of the key whose value lists the objects
+                (e.g. 'nics').
+        """
+        return self.get(key, "").split()
 
 
-def get_sub_dict_names(dict, keyword):
-    """
-    Return a list of "sub-dict" names that may be extracted with get_sub_dict.
+    def object_params(self, obj_name):
+        """
+        Return a dict-like object containing the parameters of an individual
+        object.
 
-    This function may be modified to change the behavior of all functions that
-    deal with multiple objects defined in dicts (e.g. VMs, images, NICs).
+        This method behaves as follows: the suffix '_' + obj_name is removed
+        from all key names that have it.  Other key names are left unchanged.
+        The values of keys with the suffix overwrite the values of their
+        suffixless versions.
 
-    @param keyword: A key in dict (e.g. "vms", "images", "nics").
-    """
-    names = dict.get(keyword)
-    if names:
-        return names.split()
-    else:
-        return []
+        @param obj_name: The name of the object (objects are listed by the
+                objects() method).
+        """
+        suffix = "_" + obj_name
+        new_dict = self.copy()
+        for key in self:
+            if key.endswith(suffix):
+                new_key = key.split(suffix)[0]
+                new_dict[new_key] = self[key]
+        return new_dict
 
 
 # Functions related to MAC/IP addresses
@@ -240,60 +318,6 @@
     return bool(regex.search(o))
 
 
-# Functions for working with the environment (a dict-like object)
-
-def is_vm(obj):
-    """
-    Tests whether a given object is a VM object.
-
-    @param obj: Python object (pretty much everything on python).
-    """
-    return obj.__class__.__name__ == "VM"
-
-
-def env_get_all_vms(env):
-    """
-    Return a list of all VM objects on a given environment.
-
-    @param env: Dictionary with environment items.
-    """
-    vms = []
-    for obj in env.values():
-        if is_vm(obj):
-            vms.append(obj)
-    return vms
-
-
-def env_get_vm(env, name):
-    """
-    Return a VM object by its name.
-
-    @param name: VM name.
-    """
-    return env.get("vm__%s" % name)
-
-
-def env_register_vm(env, name, vm):
-    """
-    Register a given VM in a given env.
-
-    @param env: Environment where we will register the VM.
-    @param name: VM name.
-    @param vm: VM object.
-    """
-    env["vm__%s" % name] = vm
-
-
-def env_unregister_vm(env, name):
-    """
-    Remove a given VM from a given env.
-
-    @param env: Environment where we will un-register the VM.
-    @param name: VM name.
-    """
-    del env["vm__%s" % name]
-
-
 # Utility functions for dealing with external processes
 
 def find_command(cmd):
@@ -403,8 +427,7 @@
     except error.CmdError:
         desc = "no tag found"
 
-    logging.info("Commit hash for %s is %s (%s)" % (repository, h.strip(),
-                                                    desc))
+    logging.info("Commit hash for %s is %s (%s)", repository, h.strip(), desc)
     return srcdir
 
 
@@ -421,7 +444,7 @@
     os.chdir(source_dir)
     has_qemu_dir = os.path.isdir('qemu')
     has_kvm_dir = os.path.isdir('kvm')
-    if has_qemu_dir and not has_kvm_dir:
+    if has_qemu_dir:
         logging.debug("qemu directory detected, source dir layout 1")
         return 1
     if has_kvm_dir and not has_qemu_dir:
@@ -431,8 +454,80 @@
         raise error.TestError("Unknown source dir layout, cannot proceed.")
 
 
-# The following are functions used for SSH, SCP and Telnet communication with
-# guests.
+# Functions and classes used for logging into guests and transferring files
+
+class LoginError(Exception):
+    def __init__(self, msg, output):
+        Exception.__init__(self, msg, output)
+        self.msg = msg
+        self.output = output
+
+    def __str__(self):
+        return "%s    (output: %r)" % (self.msg, self.output)
+
+
+class LoginAuthenticationError(LoginError):
+    pass
+
+
+class LoginTimeoutError(LoginError):
+    def __init__(self, output):
+        LoginError.__init__(self, "Login timeout expired", output)
+
+
+class LoginProcessTerminatedError(LoginError):
+    def __init__(self, status, output):
+        LoginError.__init__(self, None, output)
+        self.status = status
+
+    def __str__(self):
+        return ("Client process terminated    (status: %s,    output: %r)" %
+                (self.status, self.output))
+
+
+class LoginBadClientError(LoginError):
+    def __init__(self, client):
+        LoginError.__init__(self, None, None)
+        self.client = client
+
+    def __str__(self):
+        return "Unknown remote shell client: %r" % self.client
+
+
+class SCPError(Exception):
+    def __init__(self, msg, output):
+        Exception.__init__(self, msg, output)
+        self.msg = msg
+        self.output = output
+
+    def __str__(self):
+        return "%s    (output: %r)" % (self.msg, self.output)
+
+
+class SCPAuthenticationError(SCPError):
+    pass
+
+
+class SCPAuthenticationTimeoutError(SCPAuthenticationError):
+    def __init__(self, output):
+        SCPAuthenticationError.__init__(self, "Authentication timeout expired",
+                                        output)
+
+
+class SCPTransferTimeoutError(SCPError):
+    def __init__(self, output):
+        SCPError.__init__(self, "Transfer timeout expired", output)
+
+
+class SCPTransferFailedError(SCPError):
+    def __init__(self, status, output):
+        SCPError.__init__(self, None, output)
+        self.status = status
+
+    def __str__(self):
+        return ("SCP transfer failed    (status: %s,    output: %r)" %
+                (self.status, self.output))
+
 
 def _remote_login(session, username, password, prompt, timeout=10):
     """
@@ -442,116 +537,68 @@
 
     @brief: Log into a remote host (guest) using SSH or Telnet.
 
-    @param session: A kvm_expect or kvm_shell_session instance to operate on
+    @param session: An Expect or ShellSession instance to operate on
     @param username: The username to send in reply to a login prompt
     @param password: The password to send in reply to a password prompt
     @param prompt: The shell prompt that indicates a successful login
     @param timeout: The maximal time duration (in seconds) to wait for each
             step of the login procedure (i.e. the "Are you sure" prompt, the
             password prompt, the shell prompt, etc)
-
-    @return: True on success and False otherwise.
+    @raise LoginTimeoutError: If timeout expires
+    @raise LoginAuthenticationError: If authentication fails
+    @raise LoginProcessTerminatedError: If the client terminates during login
+    @raise LoginError: If some other error occurs
     """
     password_prompt_count = 0
     login_prompt_count = 0
 
     while True:
-        (match, text) = session.read_until_last_line_matches(
+        try:
+            match, text = session.read_until_last_line_matches(
                 [r"[Aa]re you sure", r"[Pp]assword:\s*$", r"[Ll]ogin:\s*$",
                  r"[Cc]onnection.*closed", r"[Cc]onnection.*refused",
                  r"[Pp]lease wait", prompt],
-                 timeout=timeout, internal_timeout=0.5)
-        if match == 0:  # "Are you sure you want to continue connecting"
-            logging.debug("Got 'Are you sure...'; sending 'yes'")
-            session.sendline("yes")
-            continue
-        elif match == 1:  # "password:"
-            if password_prompt_count == 0:
-                logging.debug("Got password prompt; sending '%s'" % password)
-                session.sendline(password)
-                password_prompt_count += 1
-                continue
-            else:
-                logging.debug("Got password prompt again")
-                return False
-        elif match == 2:  # "login:"
-            if login_prompt_count == 0:
-                logging.debug("Got username prompt; sending '%s'" % username)
-                session.sendline(username)
-                login_prompt_count += 1
-                continue
-            else:
-                logging.debug("Got username prompt again")
-                return False
-        elif match == 3:  # "Connection closed"
-            logging.debug("Got 'Connection closed'")
-            return False
-        elif match == 4:  # "Connection refused"
-            logging.debug("Got 'Connection refused'")
-            return False
-        elif match == 5:  # "Please wait"
-            logging.debug("Got 'Please wait'")
-            timeout = 30
-            continue
-        elif match == 6:  # prompt
-            logging.debug("Got shell prompt -- logged in")
-            return session
-        else:  # match == None
-            logging.debug("Timeout elapsed or process terminated")
-            return False
-
-
-def _remote_scp(session, password, transfer_timeout=600, login_timeout=10):
-    """
-    Transfer file(s) to a remote host (guest) using SCP.  Wait for questions
-    and provide answers.  If login_timeout expires while waiting for output
-    from the child (e.g. a password prompt), fail.  If transfer_timeout expires
-    while waiting for the transfer to complete, fail.
-
-    @brief: Transfer files using SCP, given a command line.
-
-    @param session: A kvm_expect or kvm_shell_session instance to operate on
-    @param password: The password to send in reply to a password prompt.
-    @param transfer_timeout: The time duration (in seconds) to wait for the
-            transfer to complete.
-    @param login_timeout: The maximal time duration (in seconds) to wait for
-            each step of the login procedure (i.e. the "Are you sure" prompt or
-            the password prompt)
-
-    @return: True if the transfer succeeds and False on failure.
-    """
-    password_prompt_count = 0
-    timeout = login_timeout
-
-    while True:
-        (match, text) = session.read_until_last_line_matches(
-                [r"[Aa]re you sure", r"[Pp]assword:\s*$", r"lost connection"],
                 timeout=timeout, internal_timeout=0.5)
-        if match == 0:  # "Are you sure you want to continue connecting"
-            logging.debug("Got 'Are you sure...'; sending 'yes'")
-            session.sendline("yes")
-            continue
-        elif match == 1:  # "password:"
-            if password_prompt_count == 0:
-                logging.debug("Got password prompt; sending '%s'" % password)
-                session.sendline(password)
-                password_prompt_count += 1
-                timeout = transfer_timeout
+            if match == 0:  # "Are you sure you want to continue connecting"
+                logging.debug("Got 'Are you sure...'; sending 'yes'")
+                session.sendline("yes")
                 continue
-            else:
-                logging.debug("Got password prompt again")
-                return False
-        elif match == 2:  # "lost connection"
-            logging.debug("Got 'lost connection'")
-            return False
-        else:  # match == None
-            if session.is_alive():
-                logging.debug("Timeout expired")
-                return False
-            else:
-                status = session.get_status()
-                logging.debug("SCP process terminated with status %s", status)
-                return status == 0
+            elif match == 1:  # "password:"
+                if password_prompt_count == 0:
+                    logging.debug("Got password prompt; sending '%s'", password)
+                    session.sendline(password)
+                    password_prompt_count += 1
+                    continue
+                else:
+                    raise LoginAuthenticationError("Got password prompt twice",
+                                                   text)
+            elif match == 2:  # "login:"
+                if login_prompt_count == 0 and password_prompt_count == 0:
+                    logging.debug("Got username prompt; sending '%s'", username)
+                    session.sendline(username)
+                    login_prompt_count += 1
+                    continue
+                else:
+                    if login_prompt_count > 0:
+                        msg = "Got username prompt twice"
+                    else:
+                        msg = "Got username prompt after password prompt"
+                    raise LoginAuthenticationError(msg, text)
+            elif match == 3:  # "Connection closed"
+                raise LoginError("Client said 'connection closed'", text)
+            elif match == 4:  # "Connection refused"
+                raise LoginError("Client said 'connection refused'", text)
+            elif match == 5:  # "Please wait"
+                logging.debug("Got 'Please wait'")
+                timeout = 30
+                continue
+            elif match == 6:  # prompt
+                logging.debug("Got shell prompt -- logged in")
+                break
+        except kvm_subprocess.ExpectTimeoutError, e:
+            raise LoginTimeoutError(e.output)
+        except kvm_subprocess.ExpectProcessTerminatedError, e:
+            raise LoginProcessTerminatedError(e.status, e.output)
 
 
 def remote_login(client, host, port, username, password, prompt, linesep="\n",
@@ -571,8 +618,9 @@
     @param timeout: The maximal time duration (in seconds) to wait for
             each step of the login procedure (i.e. the "Are you sure" prompt
             or the password prompt)
-
-    @return: kvm_shell_session object on success and None on failure.
+    @raise LoginBadClientError: If an unknown client is requested
+    @raise: Whatever _remote_login() raises
+    @return: A ShellSession object.
     """
     if client == "ssh":
         cmd = ("ssh -o UserKnownHostsFile=/dev/null "
@@ -583,19 +631,109 @@
     elif client == "nc":
         cmd = "nc %s %s" % (host, port)
     else:
-        logging.error("Unknown remote shell client: %s" % client)
-        return
+        raise LoginBadClientError(client)
 
-    logging.debug("Trying to login with command '%s'" % cmd)
-    session = kvm_subprocess.kvm_shell_session(cmd, linesep=linesep,
-                                               prompt=prompt)
-    if _remote_login(session, username, password, prompt, timeout):
-        if log_filename:
-            session.set_output_func(log_line)
-            session.set_output_params((log_filename,))
-        return session
-    else:
+    logging.debug("Trying to login with command '%s'", cmd)
+    session = kvm_subprocess.ShellSession(cmd, linesep=linesep, prompt=prompt)
+    try:
+        _remote_login(session, username, password, prompt, timeout)
+    except:
         session.close()
+        raise
+    if log_filename:
+        session.set_output_func(log_line)
+        session.set_output_params((log_filename,))
+    return session
+
+
+def wait_for_login(client, host, port, username, password, prompt, linesep="\n",
+                   log_filename=None, timeout=240, internal_timeout=10):
+    """
+    Make multiple attempts to log into a remote host (guest) until one succeeds
+    or timeout expires.
+
+    @param timeout: Total time duration to wait for a successful login
+    @param internal_timeout: The maximal time duration (in seconds) to wait for
+            each step of the login procedure (e.g. the "Are you sure" prompt
+            or the password prompt)
+    @see: remote_login()
+    @raise: Whatever remote_login() raises
+    @return: A ShellSession object.
+    """
+    logging.debug("Attempting to log into %s:%s using %s (timeout %ds)",
+                  host, port, client, timeout)
+    end_time = time.time() + timeout
+    while time.time() < end_time:
+        try:
+            return remote_login(client, host, port, username, password, prompt,
+                                linesep, log_filename, internal_timeout)
+        except LoginError, e:
+            logging.debug(e)
+        time.sleep(2)
+    # Timeout expired; try one more time but don't catch exceptions
+    return remote_login(client, host, port, username, password, prompt,
+                        linesep, log_filename, internal_timeout)
+
+
+def _remote_scp(session, password, transfer_timeout=600, login_timeout=10):
+    """
+    Transfer file(s) to a remote host (guest) using SCP.  Wait for questions
+    and provide answers.  If login_timeout expires while waiting for output
+    from the child (e.g. a password prompt), fail.  If transfer_timeout expires
+    while waiting for the transfer to complete, fail.
+
+    @brief: Transfer files using SCP, given a command line.
+
+    @param session: An Expect or ShellSession instance to operate on
+    @param password: The password to send in reply to a password prompt.
+    @param transfer_timeout: The time duration (in seconds) to wait for the
+            transfer to complete.
+    @param login_timeout: The maximal time duration (in seconds) to wait for
+            each step of the login procedure (i.e. the "Are you sure" prompt or
+            the password prompt)
+    @raise SCPAuthenticationError: If authentication fails
+    @raise SCPTransferTimeoutError: If the transfer fails to complete in time
+    @raise SCPTransferFailedError: If the process terminates with a nonzero
+            exit code
+    @raise SCPError: If some other error occurs
+    """
+    password_prompt_count = 0
+    timeout = login_timeout
+    authentication_done = False
+
+    while True:
+        try:
+            match, text = session.read_until_last_line_matches(
+                [r"[Aa]re you sure", r"[Pp]assword:\s*$", r"lost connection"],
+                timeout=timeout, internal_timeout=0.5)
+            if match == 0:  # "Are you sure you want to continue connecting"
+                logging.debug("Got 'Are you sure...'; sending 'yes'")
+                session.sendline("yes")
+                continue
+            elif match == 1:  # "password:"
+                if password_prompt_count == 0:
+                    logging.debug("Got password prompt; sending '%s'", password)
+                    session.sendline(password)
+                    password_prompt_count += 1
+                    timeout = transfer_timeout
+                    authentication_done = True
+                    continue
+                else:
+                    raise SCPAuthenticationError("Got password prompt twice",
+                                                 text)
+            elif match == 2:  # "lost connection"
+                raise SCPError("SCP client said 'lost connection'", text)
+        except kvm_subprocess.ExpectTimeoutError, e:
+            if authentication_done:
+                raise SCPTransferTimeoutError(e.output)
+            else:
+                raise SCPAuthenticationTimeoutError(e.output)
+        except kvm_subprocess.ExpectProcessTerminatedError, e:
+            if e.status == 0:
+                logging.debug("SCP process terminated with status 0")
+                break
+            else:
+                raise SCPTransferFailedError(e.status, e.output)
 
 
 def remote_scp(command, password, log_filename=None, transfer_timeout=600,
@@ -614,24 +752,21 @@
     @param login_timeout: The maximal time duration (in seconds) to wait for
             each step of the login procedure (i.e. the "Are you sure" prompt
             or the password prompt)
-
-    @return: True if the transfer succeeds and False on failure.
+    @raise: Whatever _remote_scp() raises
     """
     logging.debug("Trying to SCP with command '%s', timeout %ss",
                   command, transfer_timeout)
-
     if log_filename:
         output_func = log_line
         output_params = (log_filename,)
     else:
         output_func = None
         output_params = ()
-
-    session = kvm_subprocess.kvm_expect(command,
-                                        output_func=output_func,
-                                        output_params=output_params)
+    session = kvm_subprocess.Expect(command,
+                                    output_func=output_func,
+                                    output_params=output_params)
     try:
-        return _remote_scp(session, password, transfer_timeout, login_timeout)
+        _remote_scp(session, password, transfer_timeout, login_timeout)
     finally:
         session.close()
 
@@ -639,7 +774,7 @@
 def scp_to_remote(host, port, username, password, local_path, remote_path,
                   log_filename=None, timeout=600):
     """
-    Copy files to a remote host (guest).
+    Copy files to a remote host (guest) through scp.
 
     @param host: Hostname or IP address
     @param username: Username (if required)
@@ -649,13 +784,12 @@
     @param log_filename: If specified, log all output to this file
     @param timeout: The time duration (in seconds) to wait for the transfer
             to complete.
-
-    @return: True on success and False on failure.
+    @raise: Whatever remote_scp() raises
     """
     command = ("scp -v -o UserKnownHostsFile=/dev/null "
                "-o PreferredAuthentications=password -r -P %s %s %s@%s:%s" %
                (port, local_path, username, host, remote_path))
-    return remote_scp(command, password, log_filename, timeout)
+    remote_scp(command, password, log_filename, timeout)
 
 
 def scp_from_remote(host, port, username, password, remote_path, local_path,
@@ -671,13 +805,70 @@
     @param log_filename: If specified, log all output to this file
     @param timeout: The time duration (in seconds) to wait for the transfer
             to complete.
-
-    @return: True on success and False on failure.
+    @raise: Whatever remote_scp() raises
     """
     command = ("scp -v -o UserKnownHostsFile=/dev/null "
                "-o PreferredAuthentications=password -r -P %s %s@%s:%s %s" %
                (port, username, host, remote_path, local_path))
-    return remote_scp(command, password, log_filename, timeout)
+    remote_scp(command, password, log_filename, timeout)
+
+
+def copy_files_to(address, client, username, password, port, local_path,
+                  remote_path, log_filename=None, verbose=False, timeout=600):
+    """
+    Copy files to a remote host (guest) using the selected client.
+
+    @param client: Type of transfer client
+    @param username: Username (if required)
+    @param password: Password (if requried)
+    @param local_path: Path on the local machine where we are copying from
+    @param remote_path: Path on the remote machine where we are copying to
+    @param address: Address of remote host(guest)
+    @param log_filename: If specified, log all output to this file (SCP only)
+    @param verbose: If True, log some stats using logging.debug (RSS only)
+    @param timeout: The time duration (in seconds) to wait for the transfer to
+            complete.
+    @raise: Whatever remote_scp() raises
+    """
+    if client == "scp":
+        scp_to_remote(address, port, username, password, local_path,
+                      remote_path, log_filename, timeout)
+    elif client == "rss":
+        log_func = None
+        if verbose:
+            log_func = logging.debug
+        c = rss_file_transfer.FileUploadClient(address, port, log_func)
+        c.upload(local_path, remote_path, timeout)
+        c.close()
+
+
+def copy_files_from(address, client, username, password, port, remote_path,
+                    local_path, log_filename=None, verbose=False, timeout=600):
+    """
+    Copy files from a remote host (guest) using the selected client.
+
+    @param client: Type of transfer client
+    @param username: Username (if required)
+    @param password: Password (if requried)
+    @param remote_path: Path on the remote machine where we are copying from
+    @param local_path: Path on the local machine where we are copying to
+    @param address: Address of remote host(guest)
+    @param log_filename: If specified, log all output to this file (SCP only)
+    @param verbose: If True, log some stats using logging.debug (RSS only)
+    @param timeout: The time duration (in seconds) to wait for the transfer to
+    complete.
+    @raise: Whatever remote_scp() raises
+    """
+    if client == "scp":
+        scp_from_remote(address, port, username, password, remote_path,
+                        local_path, log_filename, timeout)
+    elif client == "rss":
+        log_func = None
+        if verbose:
+            log_func = logging.debug
+        c = rss_file_transfer.FileDownloadClient(address, port, log_func)
+        c.download(remote_path, local_path, timeout)
+        c.close()
 
 
 # The following are utility functions related to ports.
@@ -866,7 +1057,7 @@
 
     while time.time() < end_time:
         if text:
-            logging.debug("%s (%f secs)" % (text, time.time() - start_time))
+            logging.debug("%s (%f secs)", text, (time.time() - start_time))
 
         output = func()
         if output:
@@ -978,6 +1169,93 @@
     return re.sub(":", " ", commands.getoutput(cmd))
 
 
+class Thread(threading.Thread):
+    """
+    Run a function in a background thread.
+    """
+    def __init__(self, target, args=(), kwargs={}):
+        """
+        Initialize the instance.
+
+        @param target: Function to run in the thread.
+        @param args: Arguments to pass to target.
+        @param kwargs: Keyword arguments to pass to target.
+        """
+        threading.Thread.__init__(self)
+        self._target = target
+        self._args = args
+        self._kwargs = kwargs
+
+
+    def run(self):
+        """
+        Run target (passed to the constructor).  No point in calling this
+        function directly.  Call start() to make this function run in a new
+        thread.
+        """
+        self._e = None
+        self._retval = None
+        try:
+            try:
+                self._retval = self._target(*self._args, **self._kwargs)
+            except:
+                self._e = sys.exc_info()
+                raise
+        finally:
+            # Avoid circular references (start() may be called only once so
+            # it's OK to delete these)
+            del self._target, self._args, self._kwargs
+
+
+    def join(self, timeout=None, suppress_exception=False):
+        """
+        Join the thread.  If target raised an exception, re-raise it.
+        Otherwise, return the value returned by target.
+
+        @param timeout: Timeout value to pass to threading.Thread.join().
+        @param suppress_exception: If True, don't re-raise the exception.
+        """
+        threading.Thread.join(self, timeout)
+        try:
+            if self._e:
+                if not suppress_exception:
+                    # Because the exception was raised in another thread, we
+                    # need to explicitly insert the current context into it
+                    s = error.exception_context(self._e[1])
+                    s = error.join_contexts(error.get_context(), s)
+                    error.set_exception_context(self._e[1], s)
+                    raise self._e[0], self._e[1], self._e[2]
+            else:
+                return self._retval
+        finally:
+            # Avoid circular references (join() may be called multiple times
+            # so we can't delete these)
+            self._e = None
+            self._retval = None
+
+
+def parallel(targets):
+    """
+    Run multiple functions in parallel.
+
+    @param targets: A sequence of tuples or functions.  If it's a sequence of
+            tuples, each tuple will be interpreted as (target, args, kwargs) or
+            (target, args) or (target,) depending on its length.  If it's a
+            sequence of functions, the functions will be called without
+            arguments.
+    @return: A list of the values returned by the functions called.
+    """
+    threads = []
+    for target in targets:
+        if isinstance(target, tuple) or isinstance(target, list):
+            t = Thread(*target)
+        else:
+            t = Thread(target)
+        threads.append(t)
+        t.start()
+    return [t.join() for t in threads]
+
+
 class KvmLoggingConfig(logging_config.LoggingConfig):
     """
     Used with the sole purpose of providing convenient logging setup
@@ -1176,8 +1454,8 @@
         # Re-probe driver with proper number of VFs
         if re_probe:
             cmd = "modprobe %s %s" % (self.driver, self.driver_option)
-            logging.info("Loading the driver '%s' with option '%s'" %
-                                   (self.driver, self.driver_option))
+            logging.info("Loading the driver '%s' with option '%s'",
+                         self.driver, self.driver_option)
             s, o = commands.getstatusoutput(cmd)
             if s:
                 return False
@@ -1205,8 +1483,8 @@
             if not full_id:
                 continue
             drv_path = os.path.join(base_dir, "devices/%s/driver" % full_id)
-            dev_prev_driver= os.path.realpath(os.path.join(drv_path,
-                                              os.readlink(drv_path)))
+            dev_prev_driver = os.path.realpath(os.path.join(drv_path,
+                                               os.readlink(drv_path)))
             self.dev_drivers[pci_id] = dev_prev_driver
 
             # Judge whether the device driver has been binded to stub
@@ -1347,7 +1625,7 @@
                              "provide an appropriate tag or build name.")
 
         if not build:
-            builds = self.session.listTagged(tag, latest=True,
+            builds = self.session.listTagged(tag, latest=True, inherit=True,
                                              package=src_package)
             if not builds:
                 raise ValueError("Tag %s has no builds of %s" % (tag,
@@ -1390,3 +1668,58 @@
                 rpm_paths.append(r)
 
         return rpm_paths
+
+
+def umount(src, mount_point, type):
+    """
+    Umount the src mounted in mount_point.
+
+    @src: mount source
+    @mount_point: mount point
+    @type: file system type
+    """
+
+    mount_string = "%s %s %s" % (src, mount_point, type)
+    if mount_string in file("/etc/mtab").read():
+        umount_cmd = "umount %s" % mount_point
+        try:
+            utils.system(umount_cmd)
+            return True
+        except error.CmdError:
+            return False
+    else:
+        logging.debug("%s is not mounted under %s", src, mount_point)
+        return True
+
+
+def mount(src, mount_point, type, perm="rw"):
+    """
+    Mount the src into mount_point of the host.
+
+    @src: mount source
+    @mount_point: mount point
+    @type: file system type
+    @perm: mount premission
+    """
+    umount(src, mount_point, type)
+    mount_string = "%s %s %s %s" % (src, mount_point, type, perm)
+
+    if mount_string in file("/etc/mtab").read():
+        logging.debug("%s is already mounted in %s with %s",
+                      src, mount_point, perm)
+        return True
+
+    mount_cmd = "mount -t %s %s %s -o %s" % (type, src, mount_point, perm)
+    try:
+        utils.system(mount_cmd)
+    except error.CmdError:
+        return False
+
+    logging.debug("Verify the mount through /etc/mtab")
+    if mount_string in file("/etc/mtab").read():
+        logging.debug("%s is successfully mounted", src)
+        return True
+    else:
+        logging.error("Can't find mounted NFS share - /etc/mtab contents \n%s",
+                      file("/etc/mtab").read())
+        return False
diff --git a/client/tests/kvm/kvm_vm.py b/client/tests/kvm/kvm_vm.py
index a860437..969558b 100755
--- a/client/tests/kvm/kvm_vm.py
+++ b/client/tests/kvm/kvm_vm.py
@@ -5,12 +5,179 @@
 @copyright: 2008-2009 Red Hat Inc.
 """
 
-import time, socket, os, logging, fcntl, re, commands, shelve, glob
-import kvm_utils, kvm_subprocess, kvm_monitor, rss_file_transfer
+import time, os, logging, fcntl, re, commands, glob
+import kvm_utils, kvm_subprocess, kvm_monitor
 from autotest_lib.client.common_lib import error
 from autotest_lib.client.bin import utils
 
 
+class VMError(Exception):
+    pass
+
+
+class VMCreateError(VMError):
+    def __init__(self, cmd, status, output):
+        VMError.__init__(self, cmd, status, output)
+        self.cmd = cmd
+        self.status = status
+        self.output = output
+
+    def __str__(self):
+        return ("VM creation command failed:    %r    (status: %s,    "
+                "output: %r)" % (self.cmd, self.status, self.output))
+
+
+class VMHashMismatchError(VMError):
+    def __init__(self, actual, expected):
+        VMError.__init__(self, actual, expected)
+        self.actual_hash = actual
+        self.expected_hash = expected
+
+    def __str__(self):
+        return ("CD image hash (%s) differs from expected one (%s)" %
+                (self.actual_hash, self.expected_hash))
+
+
+class VMImageMissingError(VMError):
+    def __init__(self, filename):
+        VMError.__init__(self, filename)
+        self.filename = filename
+
+    def __str__(self):
+        return "CD image file not found: %r" % self.filename
+
+
+class VMImageCheckError(VMError):
+    def __init__(self, filename):
+        VMError.__init__(self, filename)
+        self.filename = filename
+
+    def __str__(self):
+        return "Errors found on image: %r" % self.filename
+
+
+class VMBadPATypeError(VMError):
+    def __init__(self, pa_type):
+        VMError.__init__(self, pa_type)
+        self.pa_type = pa_type
+
+    def __str__(self):
+        return "Unsupported PCI assignable type: %r" % self.pa_type
+
+
+class VMPAError(VMError):
+    def __init__(self, pa_type):
+        VMError.__init__(self, pa_type)
+        self.pa_type = pa_type
+
+    def __str__(self):
+        return ("No PCI assignable devices could be assigned "
+                "(pci_assignable=%r)" % self.pa_type)
+
+
+class VMPostCreateError(VMError):
+    def __init__(self, cmd, output):
+        VMError.__init__(self, cmd, output)
+        self.cmd = cmd
+        self.output = output
+
+
+class VMHugePageError(VMPostCreateError):
+    def __str__(self):
+        return ("Cannot allocate hugepage memory    (command: %r,    "
+                "output: %r)" % (self.cmd, self.output))
+
+
+class VMKVMInitError(VMPostCreateError):
+    def __str__(self):
+        return ("Cannot initialize KVM    (command: %r,    output: %r)" %
+                (self.cmd, self.output))
+
+
+class VMDeadError(VMError):
+    def __init__(self, status, output):
+        VMError.__init__(self, status, output)
+        self.status = status
+        self.output = output
+
+    def __str__(self):
+        return ("VM process is dead    (status: %s,    output: %r)" %
+                (self.status, self.output))
+
+
+class VMAddressError(VMError):
+    pass
+
+
+class VMPortNotRedirectedError(VMAddressError):
+    def __init__(self, port):
+        VMAddressError.__init__(self, port)
+        self.port = port
+
+    def __str__(self):
+        return "Port not redirected: %s" % self.port
+
+
+class VMAddressVerificationError(VMAddressError):
+    def __init__(self, mac, ip):
+        VMAddressError.__init__(self, mac, ip)
+        self.mac = mac
+        self.ip = ip
+
+    def __str__(self):
+        return ("Cannot verify MAC-IP address mapping using arping: "
+                "%s ---> %s" % (self.mac, self.ip))
+
+
+class VMMACAddressMissingError(VMAddressError):
+    def __init__(self, nic_index):
+        VMAddressError.__init__(self, nic_index)
+        self.nic_index = nic_index
+
+    def __str__(self):
+        return "No MAC address defined for NIC #%s" % self.nic_index
+
+
+class VMIPAddressMissingError(VMAddressError):
+    def __init__(self, mac):
+        VMAddressError.__init__(self, mac)
+        self.mac = mac
+
+    def __str__(self):
+        return "Cannot find IP address for MAC address %s" % self.mac
+
+
+class VMMigrateError(VMError):
+    pass
+
+
+class VMMigrateTimeoutError(VMMigrateError):
+    pass
+
+
+class VMMigrateCancelError(VMMigrateError):
+    pass
+
+
+class VMMigrateFailedError(VMMigrateError):
+    pass
+
+
+class VMMigrateStateMismatchError(VMMigrateError):
+    def __init__(self, src_hash, dst_hash):
+        VMMigrateError.__init__(self, src_hash, dst_hash)
+        self.src_hash = src_hash
+        self.dst_hash = dst_hash
+
+    def __str__(self):
+        return ("Mismatch of VM state before and after migration (%s != %s)" %
+                (self.src_hash, self.dst_hash))
+
+
+class VMRebootError(VMError):
+    pass
+
+
 def get_image_filename(params, root_dir):
     """
     Generate an image path from params and root_dir.
@@ -24,6 +191,8 @@
     """
     image_name = params.get("image_name", "image")
     image_format = params.get("image_format", "qcow2")
+    if params.get("image_raw_device") == "yes":
+        return image_name
     image_filename = "%s.%s" % (image_name, image_format)
     image_filename = kvm_utils.get_path(root_dir, image_filename)
     return image_filename
@@ -55,19 +224,8 @@
     size = params.get("image_size", "10G")
     qemu_img_cmd += " %s" % size
 
-    try:
-        utils.system(qemu_img_cmd)
-    except error.CmdError, e:
-        logging.error("Could not create image; qemu-img command failed:\n%s",
-                      str(e))
-        return None
-
-    if not os.path.exists(image_filename):
-        logging.error("Image could not be created for some reason; "
-                      "qemu-img command:\n%s" % qemu_img_cmd)
-        return None
-
-    logging.info("Image created in %s" % image_filename)
+    utils.system(qemu_img_cmd)
+    logging.info("Image created in %r", image_filename)
     return image_filename
 
 
@@ -83,19 +241,70 @@
            image_format -- the format of the image (qcow2, raw etc)
     """
     image_filename = get_image_filename(params, root_dir)
-    logging.debug("Removing image file %s..." % image_filename)
+    logging.debug("Removing image file %s...", image_filename)
     if os.path.exists(image_filename):
         os.unlink(image_filename)
     else:
         logging.debug("Image file %s not found")
 
 
+def check_image(params, root_dir):
+    """
+    Check an image using qemu-img.
+
+    @param params: Dictionary containing the test parameters.
+    @param root_dir: Base directory for relative filenames.
+
+    @note: params should contain:
+           image_name -- the name of the image file, without extension
+           image_format -- the format of the image (qcow2, raw etc)
+
+    @raise VMImageCheckError: In case qemu-img check fails on the image.
+    """
+    image_filename = get_image_filename(params, root_dir)
+    logging.debug("Checking image file %s...", image_filename)
+    qemu_img_cmd = kvm_utils.get_path(root_dir,
+                                      params.get("qemu_img_binary", "qemu-img"))
+    image_is_qcow2 = params.get("image_format") == 'qcow2'
+    if os.path.exists(image_filename) and image_is_qcow2:
+        # Verifying if qemu-img supports 'check'
+        q_result = utils.run(qemu_img_cmd, ignore_status=True)
+        q_output = q_result.stdout
+        check_img = True
+        if not "check" in q_output:
+            logging.error("qemu-img does not support 'check', "
+                          "skipping check...")
+            check_img = False
+        if not "info" in q_output:
+            logging.error("qemu-img does not support 'info', "
+                          "skipping check...")
+            check_img = False
+        if check_img:
+            try:
+                utils.system("%s info %s" % (qemu_img_cmd, image_filename))
+            except error.CmdError:
+                logging.error("Error getting info from image %s",
+                              image_filename)
+            try:
+                utils.system("%s check %s" % (qemu_img_cmd, image_filename))
+            except error.CmdError:
+                raise VMImageCheckError(image_filename)
+
+    else:
+        if not os.path.exists(image_filename):
+            logging.debug("Image file %s not found, skipping check...",
+                          image_filename)
+        elif not image_is_qcow2:
+            logging.debug("Image file %s not qcow2, skipping check...",
+                          image_filename)
+
+
 class VM:
     """
     This class handles all basic VM operations.
     """
 
-    def __init__(self, name, params, root_dir, address_cache):
+    def __init__(self, name, params, root_dir, address_cache, state=None):
         """
         Initialize the object and set a few attributes.
 
@@ -104,30 +313,35 @@
                 (see method make_qemu_command for a full description)
         @param root_dir: Base directory for relative filenames
         @param address_cache: A dict that maps MAC addresses to IP addresses
+        @param state: If provided, use this as self.__dict__
         """
-        self.process = None
-        self.serial_console = None
-        self.redirs = {}
-        self.vnc_port = 5900
-        self.monitors = []
-        self.pci_assignable = None
-        self.netdev_id = []
-        self.uuid = None
+        if state:
+            self.__dict__ = state
+        else:
+            self.process = None
+            self.serial_console = None
+            self.redirs = {}
+            self.vnc_port = 5900
+            self.monitors = []
+            self.pci_assignable = None
+            self.netdev_id = []
+            self.uuid = None
+
+            # Find a unique identifier for this VM
+            while True:
+                self.instance = (time.strftime("%Y%m%d-%H%M%S-") +
+                                 kvm_utils.generate_random_string(4))
+                if not glob.glob("/tmp/*%s" % self.instance):
+                    break
 
         self.name = name
         self.params = params
         self.root_dir = root_dir
         self.address_cache = address_cache
 
-        # Find a unique identifier for this VM
-        while True:
-            self.instance = (time.strftime("%Y%m%d-%H%M%S-") +
-                             kvm_utils.generate_random_string(4))
-            if not glob.glob("/tmp/*%s" % self.instance):
-                break
 
-
-    def clone(self, name=None, params=None, root_dir=None, address_cache=None):
+    def clone(self, name=None, params=None, root_dir=None, address_cache=None,
+              copy_state=False):
         """
         Return a clone of the VM object with optionally modified parameters.
         The clone is initially not alive and needs to be started using create().
@@ -138,6 +352,8 @@
         @param params: Optional new VM creation parameters
         @param root_dir: Optional new base directory for relative filenames
         @param address_cache: A dict that maps MAC addresses to IP addresses
+        @param copy_state: If True, copy the original VM's state to the clone.
+                Mainly useful for make_qemu_command().
         """
         if name is None:
             name = self.name
@@ -147,7 +363,11 @@
             root_dir = self.root_dir
         if address_cache is None:
             address_cache = self.address_cache
-        return VM(name, params, root_dir, address_cache)
+        if copy_state:
+            state = self.__dict__.copy()
+        else:
+            state = None
+        return VM(name, params, root_dir, address_cache, state)
 
 
     def make_qemu_command(self, name=None, params=None, root_dir=None):
@@ -225,36 +445,40 @@
         def add_drive(help, filename, index=None, format=None, cache=None,
                       werror=None, serial=None, snapshot=False, boot=False):
             cmd = " -drive file='%s'" % filename
-            if index is not None: cmd += ",index=%s" % index
-            if format: cmd += ",if=%s" % format
-            if cache: cmd += ",cache=%s" % cache
-            if werror: cmd += ",werror=%s" % werror
-            if serial: cmd += ",serial='%s'" % serial
-            if snapshot: cmd += ",snapshot=on"
-            if boot: cmd += ",boot=on"
+            if index is not None:
+                cmd += ",index=%s" % index
+            if format:
+                cmd += ",if=%s" % format
+            if cache:
+                cmd += ",cache=%s" % cache
+            if werror:
+                cmd += ",werror=%s" % werror
+            if serial:
+                cmd += ",serial='%s'" % serial
+            if snapshot:
+                cmd += ",snapshot=on"
+            if boot:
+                cmd += ",boot=on"
             return cmd
 
         def add_nic(help, vlan, model=None, mac=None, netdev_id=None,
                     nic_extra_params=None):
+            if has_option(help, "netdev"):
+                netdev_vlan_str = ",netdev=%s" % netdev_id
+            else:
+                netdev_vlan_str = ",vlan=%d" % vlan
             if has_option(help, "device"):
-                if model == "virtio":
-                    model="virtio-net-pci"
                 if not model:
-                    model= "rtl8139"
-                cmd = " -device %s" % model
+                    model = "rtl8139"
+                elif model == "virtio":
+                    model = "virtio-net-pci"
+                cmd = " -device %s" % model + netdev_vlan_str
                 if mac:
-                    cmd += ",mac=%s" % mac
-                if has_option(help, "netdev"):
-                    cmd += ",netdev=%s" % netdev_id
-                else:
-                    cmd += "vlan=%d,"  % vlan
+                    cmd += ",mac='%s'" % mac
                 if nic_extra_params:
                     cmd += ",%s" % nic_extra_params
             else:
-                if has_option(help, "netdev"):
-                    cmd = " -net nic,netdev=%s" % netdev_id
-                else:
-                    cmd = " -net nic,vlan=%d" % vlan
+                cmd = " -net nic" + netdev_vlan_str
                 if model:
                     cmd += ",model=%s" % model
                 if mac:
@@ -263,11 +487,11 @@
 
         def add_net(help, vlan, mode, ifname=None, script=None,
                     downscript=None, tftp=None, bootfile=None, hostfwd=[],
-                    netdev_id=None, vhost=False):
+                    netdev_id=None, netdev_extra_params=None):
             if has_option(help, "netdev"):
                 cmd = " -netdev %s,id=%s" % (mode, netdev_id)
-                if vhost:
-                    cmd +=",vhost=on"
+                if netdev_extra_params:
+                    cmd += ",%s" % netdev_extra_params
             else:
                 cmd = " -net %s,vlan=%d" % (mode, vlan)
             if mode == "tap":
@@ -347,9 +571,15 @@
 
         # End of command line option wrappers
 
-        if name is None: name = self.name
-        if params is None: params = self.params
-        if root_dir is None: root_dir = self.root_dir
+        if name is None:
+            name = self.name
+        if params is None:
+            params = self.params
+        if root_dir is None:
+            root_dir = self.root_dir
+
+        # Clone this VM using the new params
+        vm = self.clone(name, params, root_dir, copy_state=True)
 
         qemu_binary = kvm_utils.get_path(root_dir, params.get("qemu_binary",
                                                               "qemu"))
@@ -368,19 +598,19 @@
         # Add the VM's name
         qemu_cmd += add_name(help, name)
         # Add monitors
-        for monitor_name in kvm_utils.get_sub_dict_names(params, "monitors"):
-            monitor_params = kvm_utils.get_sub_dict(params, monitor_name)
-            monitor_filename = self.get_monitor_filename(monitor_name)
+        for monitor_name in params.objects("monitors"):
+            monitor_params = params.object_params(monitor_name)
+            monitor_filename = vm.get_monitor_filename(monitor_name)
             if monitor_params.get("monitor_type") == "qmp":
                 qemu_cmd += add_qmp_monitor(help, monitor_filename)
             else:
                 qemu_cmd += add_human_monitor(help, monitor_filename)
 
         # Add serial console redirection
-        qemu_cmd += add_serial(help, self.get_serial_console_filename())
+        qemu_cmd += add_serial(help, vm.get_serial_console_filename())
 
-        for image_name in kvm_utils.get_sub_dict_names(params, "images"):
-            image_params = kvm_utils.get_sub_dict(params, image_name)
+        for image_name in params.objects("images"):
+            image_params = params.object_params(image_name)
             if image_params.get("boot_drive") == "no":
                 continue
             qemu_cmd += add_drive(help,
@@ -394,20 +624,23 @@
                                   image_params.get("image_boot") == "yes")
 
         redirs = []
-        for redir_name in kvm_utils.get_sub_dict_names(params, "redirs"):
-            redir_params = kvm_utils.get_sub_dict(params, redir_name)
+        for redir_name in params.objects("redirs"):
+            redir_params = params.object_params(redir_name)
             guest_port = int(redir_params.get("guest_port"))
-            host_port = self.redirs.get(guest_port)
+            host_port = vm.redirs.get(guest_port)
             redirs += [(host_port, guest_port)]
 
         vlan = 0
-        for nic_name in kvm_utils.get_sub_dict_names(params, "nics"):
-            nic_params = kvm_utils.get_sub_dict(params, nic_name)
+        for nic_name in params.objects("nics"):
+            nic_params = params.object_params(nic_name)
+            try:
+                netdev_id = vm.netdev_id[vlan]
+            except IndexError:
+                netdev_id = None
             # Handle the '-net nic' part
-            mac = self.get_mac_address(vlan)
+            mac = vm.get_mac_address(vlan)
             qemu_cmd += add_nic(help, vlan, nic_params.get("nic_model"), mac,
-                                self.netdev_id[vlan],
-                                nic_params.get("nic_extra_params"))
+                                netdev_id, nic_params.get("nic_extra_params"))
             # Handle the '-net tap' or '-net user' part
             script = nic_params.get("nic_script")
             downscript = nic_params.get("nic_downscript")
@@ -419,11 +652,10 @@
             if tftp:
                 tftp = kvm_utils.get_path(root_dir, tftp)
             qemu_cmd += add_net(help, vlan, nic_params.get("nic_mode", "user"),
-                                self.get_ifname(vlan),
+                                vm.get_ifname(vlan),
                                 script, downscript, tftp,
-                                nic_params.get("bootp"), redirs,
-                                self.netdev_id[vlan],
-                                nic_params.get("vhost")=="yes")
+                                nic_params.get("bootp"), redirs, netdev_id,
+                                nic_params.get("netdev_extra_params"))
             # Proceed to next NIC
             vlan += 1
 
@@ -435,9 +667,8 @@
         if smp:
             qemu_cmd += add_smp(help, smp)
 
-        cdroms = kvm_utils.get_sub_dict_names(params, "cdroms")
-        for cdrom in cdroms:
-            cdrom_params = kvm_utils.get_sub_dict(params, cdrom)
+        for cdrom in params.objects("cdroms"):
+            cdrom_params = params.object_params(cdrom)
             iso = cdrom_params.get("cdrom")
             if iso:
                 qemu_cmd += add_cdrom(help, kvm_utils.get_path(root_dir, iso),
@@ -477,27 +708,27 @@
             qemu_cmd += add_tcp_redir(help, host_port, guest_port)
 
         if params.get("display") == "vnc":
-            qemu_cmd += add_vnc(help, self.vnc_port)
+            qemu_cmd += add_vnc(help, vm.vnc_port)
         elif params.get("display") == "sdl":
             qemu_cmd += add_sdl(help)
         elif params.get("display") == "nographic":
             qemu_cmd += add_nographic(help)
 
         if params.get("uuid") == "random":
-            qemu_cmd += add_uuid(help, self.uuid)
+            qemu_cmd += add_uuid(help, vm.uuid)
         elif params.get("uuid"):
             qemu_cmd += add_uuid(help, params.get("uuid"))
 
         if params.get("testdev") == "yes":
-            qemu_cmd += add_testdev(help, self.get_testlog_filename())
+            qemu_cmd += add_testdev(help, vm.get_testlog_filename())
 
         if params.get("disable_hpet") == "yes":
             qemu_cmd += add_no_hpet(help)
 
         # If the PCI assignment step went OK, add each one of the PCI assigned
         # devices to the qemu command line.
-        if self.pci_assignable:
-            for pci_id in self.pa_pci_ids:
+        if vm.pci_assignable:
+            for pci_id in vm.pa_pci_ids:
                 qemu_cmd += add_pcidevice(help, pci_id)
 
         extra_params = params.get("extra_params")
@@ -507,8 +738,9 @@
         return qemu_cmd
 
 
+    @error.context_aware
     def create(self, name=None, params=None, root_dir=None, timeout=5.0,
-               migration_mode=None, migration_exec_cmd=None, mac_source=None):
+               migration_mode=None, mac_source=None):
         """
         Start the VM by running a qemu command.
         All parameters are optional. If name, params or root_dir are not
@@ -523,8 +755,19 @@
                 (e.g. 'gzip -c -d filename') if migration_mode is 'exec'
         @param mac_source: A VM object from which to copy MAC addresses. If not
                 specified, new addresses will be generated.
+
+        @raise VMCreateError: If qemu terminates unexpectedly
+        @raise VMKVMInitError: If KVM initialization fails
+        @raise VMHugePageError: If hugepage initialization fails
+        @raise VMImageMissingError: If a CD image is missing
+        @raise VMHashMismatchError: If a CD image hash has doesn't match the
+                expected hash
+        @raise VMBadPATypeError: If an unsupported PCI assignment type is
+                requested
+        @raise VMPAError: If no PCI assignable devices could be assigned
         """
-        self.destroy()
+        error.context("creating '%s'" % self.name)
+        self.destroy(free_mac_addresses=False)
 
         if name is not None:
             self.name = name
@@ -536,38 +779,38 @@
         params = self.params
         root_dir = self.root_dir
 
-        # Verify the md5sum of the ISO image
-        iso = params.get("cdrom")
-        if iso:
-            iso = kvm_utils.get_path(root_dir, iso)
-            if not os.path.exists(iso):
-                logging.error("ISO file not found: %s" % iso)
-                return False
-            compare = False
-            if params.get("md5sum_1m"):
-                logging.debug("Comparing expected MD5 sum with MD5 sum of "
-                              "first MB of ISO file...")
-                actual_hash = utils.hash_file(iso, 1048576, method="md5")
-                expected_hash = params.get("md5sum_1m")
-                compare = True
-            elif params.get("md5sum"):
-                logging.debug("Comparing expected MD5 sum with MD5 sum of ISO "
-                              "file...")
-                actual_hash = utils.hash_file(iso, method="md5")
-                expected_hash = params.get("md5sum")
-                compare = True
-            elif params.get("sha1sum"):
-                logging.debug("Comparing expected SHA1 sum with SHA1 sum of "
-                              "ISO file...")
-                actual_hash = utils.hash_file(iso, method="sha1")
-                expected_hash = params.get("sha1sum")
-                compare = True
-            if compare:
-                if actual_hash == expected_hash:
-                    logging.debug("Hashes match")
-                else:
-                    logging.error("Actual hash differs from expected one")
-                    return False
+        # Verify the md5sum of the ISO images
+        for cdrom in params.objects("cdroms"):
+            cdrom_params = params.object_params(cdrom)
+            iso = cdrom_params.get("cdrom")
+            if iso:
+                iso = kvm_utils.get_path(root_dir, iso)
+                if not os.path.exists(iso):
+                    raise VMImageMissingError(iso)
+                compare = False
+                if cdrom_params.get("md5sum_1m"):
+                    logging.debug("Comparing expected MD5 sum with MD5 sum of "
+                                  "first MB of ISO file...")
+                    actual_hash = utils.hash_file(iso, 1048576, method="md5")
+                    expected_hash = cdrom_params.get("md5sum_1m")
+                    compare = True
+                elif cdrom_params.get("md5sum"):
+                    logging.debug("Comparing expected MD5 sum with MD5 sum of "
+                                  "ISO file...")
+                    actual_hash = utils.hash_file(iso, method="md5")
+                    expected_hash = cdrom_params.get("md5sum")
+                    compare = True
+                elif cdrom_params.get("sha1sum"):
+                    logging.debug("Comparing expected SHA1 sum with SHA1 sum "
+                                  "of ISO file...")
+                    actual_hash = utils.hash_file(iso, method="sha1")
+                    expected_hash = cdrom_params.get("sha1sum")
+                    compare = True
+                if compare:
+                    if actual_hash == expected_hash:
+                        logging.debug("Hashes match")
+                    else:
+                        raise VMHashMismatchError(actual_hash, expected_hash)
 
         # Make sure the following code is not executed by more than one thread
         # at the same time
@@ -576,15 +819,17 @@
 
         try:
             # Handle port redirections
-            redir_names = kvm_utils.get_sub_dict_names(params, "redirs")
+            redir_names = params.objects("redirs")
             host_ports = kvm_utils.find_free_ports(5000, 6000, len(redir_names))
             self.redirs = {}
             for i in range(len(redir_names)):
-                redir_params = kvm_utils.get_sub_dict(params, redir_names[i])
+                redir_params = params.object_params(redir_names[i])
                 guest_port = int(redir_params.get("guest_port"))
                 self.redirs[guest_port] = host_ports[i]
 
-            for nic in kvm_utils.get_sub_dict_names(params, "nics"):
+            # Generate netdev IDs for all NICs
+            self.netdev_id = []
+            for nic in params.objects("nics"):
                 self.netdev_id.append(kvm_utils.generate_random_id())
 
             # Find available VNC port, if needed
@@ -598,18 +843,24 @@
                 f.close()
 
             # Generate or copy MAC addresses for all NICs
-            num_nics = len(kvm_utils.get_sub_dict_names(params, "nics"))
+            num_nics = len(params.objects("nics"))
             for vlan in range(num_nics):
-                mac = mac_source and mac_source.get_mac_address(vlan)
-                if mac:
+                nic_name = params.objects("nics")[vlan]
+                nic_params = params.object_params(nic_name)
+                if nic_params.get("nic_mac", None):
+                    mac = nic_params.get("nic_mac")
                     kvm_utils.set_mac_address(self.instance, vlan, mac)
                 else:
-                    kvm_utils.generate_mac_address(self.instance, vlan)
+                    mac = mac_source and mac_source.get_mac_address(vlan)
+                    if mac:
+                        kvm_utils.set_mac_address(self.instance, vlan, mac)
+                    else:
+                        kvm_utils.generate_mac_address(self.instance, vlan)
 
             # Assign a PCI assignable device
             self.pci_assignable = None
             pa_type = params.get("pci_assignable")
-            if pa_type in ["vf", "pf", "mixed"]:
+            if pa_type and pa_type != "no":
                 pa_devices_requested = params.get("devices_requested")
 
                 # Virtual Functions (VF) assignable devices
@@ -633,6 +884,8 @@
                         driver_option=params.get("driver_option"),
                         names=params.get("device_names"),
                         devices_requested=pa_devices_requested)
+                else:
+                    raise VMBadPATypeError(pa_type)
 
                 self.pa_pci_ids = self.pci_assignable.request_devs()
 
@@ -640,14 +893,7 @@
                     logging.debug("Successfuly assigned devices: %s",
                                   self.pa_pci_ids)
                 else:
-                    logging.error("No PCI assignable devices were assigned "
-                                  "and 'pci_assignable' is defined to %s "
-                                  "on your config file. Aborting VM creation.",
-                                  pa_type)
-                    return False
-
-            elif pa_type and pa_type != "no":
-                logging.warn("Unsupported pci_assignable type: %s", pa_type)
+                    raise VMPAError(pa_type)
 
             # Make qemu command
             qemu_command = self.make_qemu_command()
@@ -660,27 +906,26 @@
                 self.migration_file = "/tmp/migration-unix-%s" % self.instance
                 qemu_command += " -incoming unix:%s" % self.migration_file
             elif migration_mode == "exec":
-                qemu_command += ' -incoming "exec:%s"' % migration_exec_cmd
+                self.migration_port = kvm_utils.find_free_port(5200, 6000)
+                qemu_command += (' -incoming "exec:nc -l %s"' %
+                                 self.migration_port)
 
-            logging.debug("Running qemu command:\n%s", qemu_command)
+            logging.info("Running qemu command:\n%s", qemu_command)
             self.process = kvm_subprocess.run_bg(qemu_command, None,
-                                                 logging.debug, "(qemu) ")
+                                                 logging.info, "(qemu) ")
 
             # Make sure the process was started successfully
             if not self.process.is_alive():
-                logging.error("VM could not be created; "
-                              "qemu command failed:\n%s" % qemu_command)
-                logging.error("Status: %s" % self.process.get_status())
-                logging.error("Output:" + kvm_utils.format_str_for_message(
-                    self.process.get_output()))
+                e = VMCreateError(qemu_command,
+                                  self.process.get_status(),
+                                  self.process.get_output())
                 self.destroy()
-                return False
+                raise e
 
             # Establish monitor connections
             self.monitors = []
-            for monitor_name in kvm_utils.get_sub_dict_names(params,
-                                                             "monitors"):
-                monitor_params = kvm_utils.get_sub_dict(params, monitor_name)
+            for monitor_name in params.objects("monitors"):
+                monitor_params = params.object_params(monitor_name)
                 # Wait for monitor connection to succeed
                 end_time = time.time() + timeout
                 while time.time() < end_time:
@@ -695,17 +940,14 @@
                             monitor = kvm_monitor.HumanMonitor(
                                 monitor_name,
                                 self.get_monitor_filename(monitor_name))
+                        monitor.verify_responsive()
+                        break
                     except kvm_monitor.MonitorError, e:
                         logging.warn(e)
-                    else:
-                        if monitor.is_responsive():
-                            break
-                    time.sleep(1)
+                        time.sleep(1)
                 else:
-                    logging.error("Could not connect to monitor '%s'" %
-                                  monitor_name)
                     self.destroy()
-                    return False
+                    raise e
                 # Add this monitor to the list
                 self.monitors += [monitor]
 
@@ -714,39 +956,31 @@
             output = self.process.get_output()
 
             if re.search("Could not initialize KVM", output, re.IGNORECASE):
-                logging.error("Could not initialize KVM; "
-                              "qemu command:\n%s" % qemu_command)
-                logging.error("Output:" + kvm_utils.format_str_for_message(
-                              self.process.get_output()))
+                e = VMKVMInitError(qemu_command, self.process.get_output())
                 self.destroy()
-                return False
+                raise e
 
             if "alloc_mem_area" in output:
-                logging.error("Could not allocate hugepage memory; "
-                              "qemu command:\n%s" % qemu_command)
-                logging.error("Output:" + kvm_utils.format_str_for_message(
-                              self.process.get_output()))
+                e = VMHugePageError(qemu_command, self.process.get_output())
                 self.destroy()
-                return False
+                raise e
 
             logging.debug("VM appears to be alive with PID %s", self.get_pid())
 
             # Establish a session with the serial console -- requires a version
             # of netcat that supports -U
-            self.serial_console = kvm_subprocess.kvm_shell_session(
+            self.serial_console = kvm_subprocess.ShellSession(
                 "nc -U %s" % self.get_serial_console_filename(),
                 auto_close=False,
                 output_func=kvm_utils.log_line,
                 output_params=("serial-%s.log" % name,))
 
-            return True
-
         finally:
             fcntl.lockf(lockfile, fcntl.LOCK_UN)
             lockfile.close()
 
 
-    def destroy(self, gracefully=True):
+    def destroy(self, gracefully=True, free_mac_addresses=True):
         """
         Destroy the VM.
 
@@ -754,14 +988,15 @@
         command.  Then, attempt to destroy the VM via the monitor with a 'quit'
         command.  If that fails, send SIGKILL to the qemu process.
 
-        @param gracefully: Whether an attempt will be made to end the VM
+        @param gracefully: If True, an attempt will be made to end the VM
                 using a shell command before trying to end the qemu process
                 with a 'quit' or a kill signal.
+        @param free_mac_addresses: If True, the MAC addresses used by the VM
+                will be freed.
         """
         try:
             # Is it already dead?
             if self.is_dead():
-                logging.debug("VM is already down")
                 return
 
             logging.debug("Destroying VM with PID %s...", self.get_pid())
@@ -769,15 +1004,18 @@
             if gracefully and self.params.get("shutdown_command"):
                 # Try to destroy with shell command
                 logging.debug("Trying to shutdown VM with shell command...")
-                session = self.remote_login()
-                if session:
+                try:
+                    session = self.login()
+                except (kvm_utils.LoginError, VMError), e:
+                    logging.debug(e)
+                else:
                     try:
                         # Send the shutdown command
                         session.sendline(self.params.get("shutdown_command"))
                         logging.debug("Shutdown command sent; waiting for VM "
                                       "to go down...")
                         if kvm_utils.wait_for(self.is_dead, 60, 1, 1):
-                            logging.debug("VM is down, freeing mac address.")
+                            logging.debug("VM is down")
                             return
                     finally:
                         session.close()
@@ -804,7 +1042,7 @@
                 logging.debug("VM is down")
                 return
 
-            logging.error("Process %s is a zombie!" % self.process.get_pid())
+            logging.error("Process %s is a zombie!", self.process.get_pid())
 
         finally:
             self.monitors = []
@@ -826,9 +1064,10 @@
                     os.unlink(self.migration_file)
                 except OSError:
                     pass
-            num_nics = len(kvm_utils.get_sub_dict_names(self.params, "nics"))
-            for vlan in range(num_nics):
-                self.free_mac_address(vlan)
+            if free_mac_addresses:
+                num_nics = len(self.params.objects("nics"))
+                for vlan in range(num_nics):
+                    self.free_mac_address(vlan)
 
 
     @property
@@ -846,15 +1085,26 @@
             return self.monitors[0]
 
 
+    def verify_alive(self):
+        """
+        Make sure the VM is alive and that the main monitor is responsive.
+
+        @raise VMDeadError: If the VM is dead
+        @raise: Various monitor exceptions if the monitor is unresponsive
+        """
+        if self.is_dead():
+            raise VMDeadError(self.process.get_status(),
+                              self.process.get_output())
+        if self.monitors:
+            self.monitor.verify_responsive()
+
+
     def is_alive(self):
         """
         Return True if the VM is alive and its monitor is responsive.
         """
-        # Check if the process is running
-        if self.is_dead():
-            return False
-        # Try sending a monitor command
-        return bool(self.monitor) and self.monitor.is_responsive()
+        return not self.is_dead() and (not self.monitors or
+                                       self.monitor.is_responsive())
 
 
     def is_dead(self):
@@ -885,7 +1135,7 @@
         params).
         """
         return [self.get_monitor_filename(m) for m in
-                kvm_utils.get_sub_dict_names(self.params, "monitors")]
+                self.params.objects("monitors")]
 
 
     def get_serial_console_filename(self):
@@ -910,28 +1160,26 @@
         address of its own).  Otherwise return the NIC's IP address.
 
         @param index: Index of the NIC whose address is requested.
+        @raise VMMACAddressMissingError: If no MAC address is defined for the
+                requested NIC
+        @raise VMIPAddressMissingError: If no IP address is found for the the
+                NIC's MAC address
+        @raise VMAddressVerificationError: If the MAC-IP address mapping cannot
+                be verified (using arping)
         """
-        nics = kvm_utils.get_sub_dict_names(self.params, "nics")
+        nics = self.params.objects("nics")
         nic_name = nics[index]
-        nic_params = kvm_utils.get_sub_dict(self.params, nic_name)
+        nic_params = self.params.object_params(nic_name)
         if nic_params.get("nic_mode") == "tap":
-            mac = self.get_mac_address(index)
-            if not mac:
-                logging.debug("MAC address unavailable")
-                return None
-            mac = mac.lower()
+            mac = self.get_mac_address(index).lower()
             # Get the IP address from the cache
             ip = self.address_cache.get(mac)
             if not ip:
-                logging.debug("Could not find IP address for MAC address: %s" %
-                              mac)
-                return None
+                raise VMIPAddressMissingError(mac)
             # Make sure the IP address is assigned to this guest
             macs = [self.get_mac_address(i) for i in range(len(nics))]
             if not kvm_utils.verify_ip_address_ownership(ip, macs):
-                logging.debug("Could not verify MAC-IP address mapping: "
-                              "%s ---> %s" % (mac, ip))
-                return None
+                raise VMAddressVerificationError(mac, ip)
             return ip
         else:
             return "localhost"
@@ -945,16 +1193,18 @@
         @param nic_index: Index of the NIC.
         @return: If port redirection is used, return the host port redirected
                 to guest port port. Otherwise return port.
+        @raise VMPortNotRedirectedError: If an unredirected port is requested
+                in user mode
         """
-        nic_name = kvm_utils.get_sub_dict_names(self.params, "nics")[nic_index]
-        nic_params = kvm_utils.get_sub_dict(self.params, nic_name)
+        nic_name = self.params.objects("nics")[nic_index]
+        nic_params = self.params.object_params(nic_name)
         if nic_params.get("nic_mode") == "tap":
             return port
         else:
-            if not self.redirs.has_key(port):
-                logging.warn("Warning: guest port %s requested but not "
-                             "redirected" % port)
-            return self.redirs.get(port)
+            try:
+                return self.redirs[port]
+            except KeyError:
+                raise VMPortNotRedirectedError(port)
 
 
     def get_ifname(self, nic_index=0):
@@ -963,9 +1213,9 @@
 
         @param nic_index: Index of the NIC
         """
-        nics = kvm_utils.get_sub_dict_names(self.params, "nics")
+        nics = self.params.objects("nics")
         nic_name = nics[nic_index]
-        nic_params = kvm_utils.get_sub_dict(self.params, nic_name)
+        nic_params = self.params.object_params(nic_name)
         if nic_params.get("nic_ifname"):
             return nic_params.get("nic_ifname")
         else:
@@ -977,8 +1227,13 @@
         Return the MAC address of a NIC.
 
         @param nic_index: Index of the NIC
+        @raise VMMACAddressMissingError: If no MAC address is defined for the
+                requested NIC
         """
-        return kvm_utils.get_mac_address(self.instance, nic_index)
+        mac = kvm_utils.get_mac_address(self.instance, nic_index)
+        if not mac:
+            raise VMMACAddressMissingError(nic_index)
+        return mac
 
 
     def free_mac_address(self, nic_index=0):
@@ -1031,7 +1286,8 @@
         return shm * 4.0 / 1024
 
 
-    def remote_login(self, nic_index=0, timeout=10):
+    @error.context_aware
+    def login(self, nic_index=0, timeout=10):
         """
         Log into the guest via SSH/Telnet/Netcat.
         If timeout expires while waiting for output from the guest (e.g. a
@@ -1040,8 +1296,9 @@
         @param nic_index: The index of the NIC to connect to.
         @param timeout: Time (seconds) before giving up logging into the
                 guest.
-        @return: kvm_spawn object on success and None on failure.
+        @return: A ShellSession object.
         """
+        error.context("logging into '%s'" % self.name)
         username = self.params.get("username", "")
         password = self.params.get("password", "")
         prompt = self.params.get("shell_prompt", "[\#\$]")
@@ -1051,87 +1308,98 @@
         port = self.get_port(int(self.params.get("shell_port")))
         log_filename = ("session-%s-%s.log" %
                         (self.name, kvm_utils.generate_random_string(4)))
-
-        if not address or not port:
-            logging.debug("IP address or port unavailable")
-            return None
-
         session = kvm_utils.remote_login(client, address, port, username,
                                          password, prompt, linesep,
                                          log_filename, timeout)
-
-        if session:
-            session.set_status_test_command(self.params.get("status_test_"
-                                                            "command", ""))
+        session.set_status_test_command(self.params.get("status_test_command",
+                                                        ""))
         return session
 
 
-    def copy_files_to(self, local_path, remote_path, nic_index=0, timeout=600):
+    def remote_login(self, nic_index=0, timeout=10):
         """
-        Transfer files to the guest.
+        Alias for login() for backward compatibility.
+        """
+        return self.login(nic_index, timeout)
 
-        @param local_path: Host path
-        @param remote_path: Guest path
+
+    def wait_for_login(self, nic_index=0, timeout=240, internal_timeout=10):
+        """
+        Make multiple attempts to log into the guest via SSH/Telnet/Netcat.
+
         @param nic_index: The index of the NIC to connect to.
+        @param timeout: Time (seconds) to keep trying to log in.
+        @param internal_timeout: Timeout to pass to login().
+        @return: A ShellSession object.
+        """
+        logging.debug("Attempting to log into '%s' (timeout %ds)", self.name,
+                      timeout)
+        end_time = time.time() + timeout
+        while time.time() < end_time:
+            try:
+                return self.login(nic_index, internal_timeout)
+            except (kvm_utils.LoginError, VMError), e:
+                logging.debug(e)
+            time.sleep(2)
+        # Timeout expired; try one more time but don't catch exceptions
+        return self.login(nic_index, internal_timeout)
+
+
+    @error.context_aware
+    def copy_files_to(self, host_path, guest_path, nic_index=0, verbose=False,
+                      timeout=600):
+        """
+        Transfer files to the remote host(guest).
+
+        @param host_path: Host path
+        @param guest_path: Guest path
+        @param nic_index: The index of the NIC to connect to.
+        @param verbose: If True, log some stats using logging.debug (RSS only)
         @param timeout: Time (seconds) before giving up on doing the remote
                 copy.
         """
+        error.context("sending file(s) to '%s'" % self.name)
         username = self.params.get("username", "")
         password = self.params.get("password", "")
         client = self.params.get("file_transfer_client")
         address = self.get_address(nic_index)
         port = self.get_port(int(self.params.get("file_transfer_port")))
-
-        if not address or not port:
-            logging.debug("IP address or port unavailable")
-            return None
-
-        if client == "scp":
-            log_filename = ("scp-%s-%s.log" %
-                            (self.name, kvm_utils.generate_random_string(4)))
-            return kvm_utils.scp_to_remote(address, port, username, password,
-                                           local_path, remote_path,
-                                           log_filename, timeout)
-        elif client == "rss":
-            c = rss_file_transfer.FileUploadClient(address, port)
-            c.upload(local_path, remote_path, timeout)
-            c.close()
-            return True
+        log_filename = ("transfer-%s-to-%s-%s.log" %
+                        (self.name, address,
+                        kvm_utils.generate_random_string(4)))
+        kvm_utils.copy_files_to(address, client, username, password, port,
+                                host_path, guest_path, log_filename, verbose,
+                                timeout)
 
 
-    def copy_files_from(self, remote_path, local_path, nic_index=0, timeout=600):
+    @error.context_aware
+    def copy_files_from(self, guest_path, host_path, nic_index=0,
+                        verbose=False, timeout=600):
         """
         Transfer files from the guest.
 
-        @param local_path: Guest path
-        @param remote_path: Host path
+        @param host_path: Guest path
+        @param guest_path: Host path
         @param nic_index: The index of the NIC to connect to.
+        @param verbose: If True, log some stats using logging.debug (RSS only)
         @param timeout: Time (seconds) before giving up on doing the remote
                 copy.
         """
+        error.context("receiving file(s) from '%s'" % self.name)
         username = self.params.get("username", "")
         password = self.params.get("password", "")
         client = self.params.get("file_transfer_client")
         address = self.get_address(nic_index)
         port = self.get_port(int(self.params.get("file_transfer_port")))
-
-        if not address or not port:
-            logging.debug("IP address or port unavailable")
-            return None
-
-        if client == "scp":
-            log_filename = ("scp-%s-%s.log" %
-                            (self.name, kvm_utils.generate_random_string(4)))
-            return kvm_utils.scp_from_remote(address, port, username, password,
-                                             remote_path, local_path,
-                                             log_filename, timeout)
-        elif client == "rss":
-            c = rss_file_transfer.FileDownloadClient(address, port)
-            c.download(remote_path, local_path, timeout)
-            c.close()
-            return True
+        log_filename = ("transfer-%s-from-%s-%s.log" %
+                        (self.name, address,
+                        kvm_utils.generate_random_string(4)))
+        kvm_utils.copy_files_from(address, client, username, password, port,
+                                  guest_path, host_path, log_filename,
+                                  verbose, timeout)
 
 
+    @error.context_aware
     def serial_login(self, timeout=10):
         """
         Log into the guest via the serial console.
@@ -1139,26 +1407,247 @@
         password prompt or a shell prompt) -- fail.
 
         @param timeout: Time (seconds) before giving up logging into the guest.
-        @return: kvm_spawn object on success and None on failure.
+        @return: ShellSession object on success and None on failure.
         """
+        error.context("logging into '%s' via serial console" % self.name)
         username = self.params.get("username", "")
         password = self.params.get("password", "")
         prompt = self.params.get("shell_prompt", "[\#\$]")
         linesep = eval("'%s'" % self.params.get("shell_linesep", r"\n"))
         status_test_command = self.params.get("status_test_command", "")
 
-        if self.serial_console:
-            self.serial_console.set_linesep(linesep)
-            self.serial_console.set_status_test_command(status_test_command)
-        else:
-            return None
+        self.serial_console.set_linesep(linesep)
+        self.serial_console.set_status_test_command(status_test_command)
 
-        # Make sure we get a login prompt
+        # Try to get a login prompt
         self.serial_console.sendline()
 
-        if kvm_utils._remote_login(self.serial_console, username, password,
-                                   prompt, timeout):
-            return self.serial_console
+        kvm_utils._remote_login(self.serial_console, username, password,
+                                prompt, timeout)
+        return self.serial_console
+
+
+    def wait_for_serial_login(self, timeout=240, internal_timeout=10):
+        """
+        Make multiple attempts to log into the guest via serial console.
+
+        @param timeout: Time (seconds) to keep trying to log in.
+        @param internal_timeout: Timeout to pass to serial_login().
+        @return: A ShellSession object.
+        """
+        logging.debug("Attempting to log into '%s' via serial console "
+                      "(timeout %ds)", self.name, timeout)
+        end_time = time.time() + timeout
+        while time.time() < end_time:
+            try:
+                return self.serial_login(internal_timeout)
+            except kvm_utils.LoginError, e:
+                logging.debug(e)
+            time.sleep(2)
+        # Timeout expired; try one more time but don't catch exceptions
+        return self.serial_login(internal_timeout)
+
+
+    @error.context_aware
+    def migrate(self, timeout=3600, protocol="tcp", cancel_delay=None,
+                offline=False, stable_check=False, clean=True,
+                save_path="/tmp", dest_host="localhost", remote_port=None):
+        """
+        Migrate the VM.
+
+        If the migration is local, the VM object's state is switched with that
+        of the destination VM.  Otherwise, the state is switched with that of
+        a dead VM (returned by self.clone()).
+
+        @param timeout: Time to wait for migration to complete.
+        @param protocol: Migration protocol ('tcp', 'unix' or 'exec').
+        @param cancel_delay: If provided, specifies a time duration after which
+                migration will be canceled.  Used for testing migrate_cancel.
+        @param offline: If True, pause the source VM before migration.
+        @param stable_check: If True, compare the VM's state after migration to
+                its state before migration and raise an exception if they
+                differ.
+        @param clean: If True, delete the saved state files (relevant only if
+                stable_check is also True).
+        @save_path: The path for state files.
+        @param dest_host: Destination host (defaults to 'localhost').
+        @param remote_port: Port to use for remote migration.
+        """
+        error.base_context("migrating '%s'" % self.name)
+
+        def mig_finished():
+            o = self.monitor.info("migrate")
+            if isinstance(o, str):
+                return "status: active" not in o
+            else:
+                return o.get("status") != "active"
+
+        def mig_succeeded():
+            o = self.monitor.info("migrate")
+            if isinstance(o, str):
+                return "status: completed" in o
+            else:
+                return o.get("status") == "completed"
+
+        def mig_failed():
+            o = self.monitor.info("migrate")
+            if isinstance(o, str):
+                return "status: failed" in o
+            else:
+                return o.get("status") == "failed"
+
+        def mig_cancelled():
+            o = self.monitor.info("migrate")
+            if isinstance(o, str):
+                return ("Migration status: cancelled" in o or
+                        "Migration status: canceled" in o)
+            else:
+                return (o.get("status") == "cancelled" or
+                        o.get("status") == "canceled")
+
+        def wait_for_migration():
+            if not kvm_utils.wait_for(mig_finished, timeout, 2, 2,
+                                      "Waiting for migration to complete"):
+                raise VMMigrateTimeoutError("Timeout expired while waiting "
+                                            "for migration to finish")
+
+        local = dest_host == "localhost"
+
+        clone = self.clone()
+        if local:
+            error.context("creating destination VM")
+            if stable_check:
+                # Pause the dest vm after creation
+                extra_params = clone.params.get("extra_params", "") + " -S"
+                clone.params["extra_params"] = extra_params
+            clone.create(migration_mode=protocol, mac_source=self)
+            error.context()
+
+        try:
+            if protocol == "tcp":
+                if local:
+                    uri = "tcp:localhost:%d" % clone.migration_port
+                else:
+                    uri = "tcp:%s:%d" % (dest_host, remote_port)
+            elif protocol == "unix":
+                uri = "unix:%s" % clone.migration_file
+            elif protocol == "exec":
+                uri = '"exec:nc localhost %s"' % clone.migration_port
+
+            if offline:
+                self.monitor.cmd("stop")
+
+            logging.info("Migrating to %s", uri)
+            self.monitor.migrate(uri)
+
+            if cancel_delay:
+                time.sleep(cancel_delay)
+                self.monitor.cmd("migrate_cancel")
+                if not kvm_utils.wait_for(mig_cancelled, 60, 2, 2,
+                                          "Waiting for migration "
+                                          "cancellation"):
+                    raise VMMigrateCancelError("Cannot cancel migration")
+                return
+
+            wait_for_migration()
+
+            # Report migration status
+            if mig_succeeded():
+                logging.info("Migration completed successfully")
+            elif mig_failed():
+                raise VMMigrateFailedError("Migration failed")
+            else:
+                raise VMMigrateFailedError("Migration ended with unknown "
+                                           "status")
+
+            # Switch self <-> clone
+            temp = self.clone(copy_state=True)
+            self.__dict__ = clone.__dict__
+            clone = temp
+
+            # From now on, clone is the source VM that will soon be destroyed
+            # and self is the destination VM that will remain alive.  If this
+            # is remote migration, self is a dead VM object.
+
+            error.context("after migration")
+            if local:
+                time.sleep(1)
+                self.verify_alive()
+
+            if local and stable_check:
+                try:
+                    save1 = os.path.join(save_path, "src-" + clone.instance)
+                    save2 = os.path.join(save_path, "dst-" + self.instance)
+                    clone.save_to_file(save1)
+                    self.save_to_file(save2)
+                    # Fail if we see deltas
+                    md5_save1 = utils.hash_file(save1)
+                    md5_save2 = utils.hash_file(save2)
+                    if md5_save1 != md5_save2:
+                        raise VMMigrateStateMismatchError(md5_save1, md5_save2)
+                finally:
+                    if clean:
+                        if os.path.isfile(save1):
+                            os.remove(save1)
+                        if os.path.isfile(save2):
+                            os.remove(save2)
+
+        finally:
+            # If we're doing remote migration and it's completed successfully,
+            # self points to a dead VM object
+            if self.is_alive():
+                self.monitor.cmd("cont")
+            clone.destroy(gracefully=False)
+
+
+    @error.context_aware
+    def reboot(self, session=None, method="shell", nic_index=0, timeout=240):
+        """
+        Reboot the VM and wait for it to come back up by trying to log in until
+        timeout expires.
+
+        @param session: A shell session object or None.
+        @param method: Reboot method.  Can be "shell" (send a shell reboot
+                command) or "system_reset" (send a system_reset monitor command).
+        @param nic_index: Index of NIC to access in the VM, when logging in
+                after rebooting.
+        @param timeout: Time to wait for login to succeed (after rebooting).
+        @return: A new shell session object.
+        """
+        error.base_context("rebooting '%s'" % self.name, logging.info)
+        error.context("before reboot")
+        session = session or self.login()
+        error.context()
+
+        if method == "shell":
+            session.sendline(self.params.get("reboot_command"))
+        elif method == "system_reset":
+            # Clear the event list of all QMP monitors
+            qmp_monitors = [m for m in self.monitors if m.protocol == "qmp"]
+            for m in qmp_monitors:
+                m.clear_events()
+            # Send a system_reset monitor command
+            self.monitor.cmd("system_reset")
+            # Look for RESET QMP events
+            time.sleep(1)
+            for m in qmp_monitors:
+                if m.get_event("RESET"):
+                    logging.info("RESET QMP event received")
+                else:
+                    raise VMRebootError("RESET QMP event not received after "
+                                        "system_reset (monitor '%s')" % m.name)
+        else:
+            raise VMRebootError("Unknown reboot method: %s" % method)
+
+        error.context("waiting for guest to go down", logging.info)
+        if not kvm_utils.wait_for(lambda:
+                                  not session.is_responsive(timeout=30),
+                                  120, 0, 1):
+            raise VMRebootError("Guest refuses to go down")
+        session.close()
+
+        error.context("logging in after reboot", logging.info)
+        return self.wait_for_login(nic_index, timeout=timeout)
 
 
     def send_key(self, keystr):
@@ -1209,15 +1698,9 @@
         """
         Get the cpu count of the VM.
         """
-        session = self.remote_login()
-        if not session:
-            return None
+        session = self.login()
         try:
-            cmd = self.params.get("cpu_chk_cmd")
-            s, count = session.get_command_status_output(cmd)
-            if s == 0:
-                return int(count)
-            return None
+            return int(session.cmd(self.params.get("cpu_chk_cmd")))
         finally:
             session.close()
 
@@ -1229,15 +1712,11 @@
         @param check_cmd: Command used to check memory. If not provided,
                 self.params.get("mem_chk_cmd") will be used.
         """
-        session = self.remote_login()
-        if not session:
-            return None
+        session = self.login()
         try:
             if not cmd:
                 cmd = self.params.get("mem_chk_cmd")
-            s, mem_str = session.get_command_status_output(cmd)
-            if s != 0:
-                return None
+            mem_str = session.cmd(cmd)
             mem = re.findall("([0-9]+)", mem_str)
             mem_size = 0
             for m in mem:
@@ -1259,3 +1738,17 @@
         """
         cmd = self.params.get("mem_chk_cur_cmd")
         return self.get_memory_size(cmd)
+
+
+    def save_to_file(self, path):
+        """
+        Save the state of virtual machine to a file through migrate to
+        exec
+        """
+        # Make sure we only get one iteration
+        self.monitor.cmd("migrate_set_speed 1000g")
+        self.monitor.cmd("migrate_set_downtime 100000000")
+        self.monitor.migrate('"exec:cat>%s"' % path)
+        # Restore the speed and downtime of migration
+        self.monitor.cmd("migrate_set_speed %d" % (32<<20))
+        self.monitor.cmd("migrate_set_downtime 0.03")
diff --git a/client/tests/kvm/migration_control.srv b/client/tests/kvm/migration_control.srv
new file mode 100644
index 0000000..16ada36
--- /dev/null
+++ b/client/tests/kvm/migration_control.srv
@@ -0,0 +1,122 @@
+AUTHOR = "Yolkfull Chow <yzhou@redhat.com>"
+TIME = "SHORT"
+NAME = "Migration across multiple hosts"
+TEST_CATEGORY = "Functional"
+TEST_CLASS = "Virtualization"
+TEST_TYPE = "Server"
+DOC = """
+Migrate KVM guest between two hosts. It parses the base config file, restricts
+it with appropriate parameters, generates the test dicts, modify the test_dicts
+so there's a distinction between the migration roles ('dest' or 'source').
+"""
+
+import sys, os, commands, glob, shutil, logging, random
+from autotest_lib.server import utils
+
+# Specify the directory of autotest before you start this test
+AUTOTEST_DIR = '/usr/local/autotest'
+
+# Specify the root directory that on client machines
+rootdir = '/tmp/kvm_autotest_root'
+
+# Make possible to import the KVM test APIs
+KVM_DIR = os.path.join(AUTOTEST_DIR, 'client/tests/kvm')
+sys.path.append(KVM_DIR)
+
+import common, kvm_config
+
+def generate_mac_address():
+    r = random.SystemRandom()
+    mac = "9a:%02x:%02x:%02x:%02x:%02x" % (r.randint(0x00, 0xff),
+                                           r.randint(0x00, 0xff),
+                                           r.randint(0x00, 0xff),
+                                           r.randint(0x00, 0xff),
+                                           r.randint(0x00, 0xff))
+    return mac
+
+
+def run(pair):
+    logging.info("KVM migration running on source host [%s] and destination "
+                 "host [%s]\n", pair[0], pair[1])
+
+    source = hosts.create_host(pair[0])
+    dest = hosts.create_host(pair[1])
+    source_at = autotest.Autotest(source)
+    dest_at = autotest.Autotest(dest)
+
+    cfg_file = os.path.join(KVM_DIR, "tests_base.cfg")
+
+    if not os.path.exists(cfg_file):
+        raise error.JobError("Config file %s was not found", cfg_file)
+
+    # Get test set (dictionary list) from the configuration file
+    cfg = kvm_config.config()
+    test_variants = """
+image_name(_.*)? ?<= /tmp/kvm_autotest_root/images/
+cdrom(_.*)? ?<= /tmp/kvm_autotest_root/
+floppy ?<= /tmp/kvm_autotest_root/
+Linux:
+    unattended_install:
+        kernel ?<= /tmp/kvm_autotest_root/
+        initrd ?<= /tmp/kvm_autotest_root/
+qemu_binary = /usr/libexec/qemu-kvm
+qemu_img_binary = /usr/bin/qemu-img
+only qcow2
+only virtio_net
+only virtio_blk
+only smp2
+only no_pci_assignable
+only smallpages
+only Fedora.13.64
+only migrate_multi_host
+nic_mode = tap
+nic_mac_nic1 = %s
+""" % (generate_mac_address())
+    cfg.fork_and_parse(cfg_file, test_variants)
+    test_dicts = cfg.get_list()
+
+    source_control_file = dest_control_file = """
+kvm_test_dir = os.path.join(os.environ['AUTODIR'],'tests/kvm')
+sys.path.append(kvm_test_dir)\n
+"""
+    for params in test_dicts:
+        params['srchost'] = source.ip
+        params['dsthost'] = dest.ip
+        params['rootdir'] = rootdir
+
+        source_params = params.copy()
+        source_params['role'] = "source"
+
+        dest_params = params.copy()
+        dest_params['role'] = "destination"
+        dest_params['migration_mode'] = "tcp"
+
+        # Report the parameters we've received
+        print "Test parameters:"
+        keys = params.keys()
+        keys.sort()
+        for key in keys:
+            logging.debug("    %s = %s", key, params[key])
+
+        source_control_file += "job.run_test('kvm', tag='%s', params=%s)" % (source_params['shortname'], source_params)
+        dest_control_file += "job.run_test('kvm', tag='%s', params=%s)" % (dest_params['shortname'], dest_params)
+
+        logging.info('Source control file:\n%s', source_control_file)
+        logging.info('Destination control file:\n%s', dest_control_file)
+        dest_command = subcommand(dest_at.run,
+                                  [dest_control_file, dest.hostname])
+
+        source_command = subcommand(source_at.run,
+                                    [source_control_file, source.hostname])
+
+        parallel([dest_command, source_command])
+
+# Grab the pairs (and failures)
+(pairs, failures) = utils.form_ntuples_from_machines(machines, 2)
+
+# Log the failures
+for failure in failures:
+    job.record("FAIL", failure[0], "kvm", failure[1])
+
+# Now run through each pair and run
+job.parallel_simple(run, pairs, log=False)
diff --git a/client/tests/kvm/rss_file_transfer.py b/client/tests/kvm/rss_file_transfer.py
index 3de8259..4d00d17 100755
--- a/client/tests/kvm/rss_file_transfer.py
+++ b/client/tests/kvm/rss_file_transfer.py
@@ -27,7 +27,21 @@
 
 
 class FileTransferError(Exception):
-    pass
+    def __init__(self, msg, e=None, filename=None):
+        Exception.__init__(self, msg, e, filename)
+        self.msg = msg
+        self.e = e
+        self.filename = filename
+
+    def __str__(self):
+        s = self.msg
+        if self.e and self.filename:
+            s += "    (error: %s,    filename: %s)" % (self.e, self.filename)
+        elif self.e:
+            s += "    (%s)" % self.e
+        elif self.filename:
+            s += "    (filename: %s)" % self.filename
+        return s
 
 
 class FileTransferConnectError(FileTransferError):
@@ -42,12 +56,19 @@
     pass
 
 
-class FileTransferSendError(FileTransferError):
+class FileTransferSocketError(FileTransferError):
     pass
 
 
 class FileTransferServerError(FileTransferError):
-    pass
+    def __init__(self, errmsg):
+        FileTransferError.__init__(self, None, errmsg)
+
+    def __str__(self):
+        s = "Server said: %r" % self.e
+        if self.filename:
+            s += "    (filename: %s)" % self.filename
+        return s
 
 
 class FileTransferNotFoundError(FileTransferError):
@@ -59,23 +80,24 @@
     Connect to a RSS (remote shell server) and transfer files.
     """
 
-    def __init__(self, address, port, timeout=10):
+    def __init__(self, address, port, log_func=None, timeout=20):
         """
         Connect to a server.
 
         @param address: The server's address
         @param port: The server's port
+        @param log_func: If provided, transfer stats will be passed to this
+                function during the transfer
         @param timeout: Time duration to wait for connection to succeed
         @raise FileTransferConnectError: Raised if the connection fails
-        @raise FileTransferProtocolError: Raised if an incorrect magic number
-                is received
         """
         self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
         self._socket.settimeout(timeout)
         try:
             self._socket.connect((address, port))
-        except socket.error:
-            raise FileTransferConnectError("Could not connect to server")
+        except socket.error, e:
+            raise FileTransferConnectError("Cannot connect to server at "
+                                           "%s:%s" % (address, port), e)
         try:
             if self._receive_msg(timeout) != RSS_MAGIC:
                 raise FileTransferConnectError("Received wrong magic number")
@@ -83,6 +105,10 @@
             raise FileTransferConnectError("Timeout expired while waiting to "
                                            "receive magic number")
         self._send(struct.pack("=i", CHUNKSIZE))
+        self._log_func = log_func
+        self._last_time = time.time()
+        self._last_transferred = 0
+        self.transferred = 0
 
 
     def __del__(self):
@@ -96,86 +122,116 @@
         self._socket.close()
 
 
-    def _send(self, str):
+    def _send(self, str, timeout=60):
         try:
+            if timeout <= 0:
+                raise socket.timeout
+            self._socket.settimeout(timeout)
             self._socket.sendall(str)
-        except socket.error:
-            raise FileTransferSendError("Could not send data to server")
+        except socket.timeout:
+            raise FileTransferTimeoutError("Timeout expired while sending "
+                                           "data to server")
+        except socket.error, e:
+            raise FileTransferSocketError("Could not send data to server", e)
 
 
-    def _receive(self, size, timeout=10):
+    def _receive(self, size, timeout=60):
         strs = []
         end_time = time.time() + timeout
-        while size > 0:
-            try:
-                self._socket.settimeout(max(0.0001, end_time - time.time()))
+        try:
+            while size > 0:
+                timeout = end_time - time.time()
+                if timeout <= 0:
+                    raise socket.timeout
+                self._socket.settimeout(timeout)
                 data = self._socket.recv(size)
-            except socket.timeout:
-                raise FileTransferTimeoutError("Timeout expired while "
-                                               "receiving data from server")
-            except socket.error:
-                raise FileTransferProtocolError("Error receiving data from "
-                                                "server")
-            if not data:
-                raise FileTransferProtocolError("Connection closed "
-                                                "unexpectedly")
-            strs.append(data)
-            size -= len(data)
+                if not data:
+                    raise FileTransferProtocolError("Connection closed "
+                                                    "unexpectedly while "
+                                                    "receiving data from "
+                                                    "server")
+                strs.append(data)
+                size -= len(data)
+        except socket.timeout:
+            raise FileTransferTimeoutError("Timeout expired while receiving "
+                                           "data from server")
+        except socket.error, e:
+            raise FileTransferSocketError("Error receiving data from server",
+                                          e)
         return "".join(strs)
 
 
-    def _send_packet(self, str):
+    def _report_stats(self, str):
+        if self._log_func:
+            dt = time.time() - self._last_time
+            if dt >= 1:
+                transferred = self.transferred / 1048576.
+                speed = (self.transferred - self._last_transferred) / dt
+                speed /= 1048576.
+                self._log_func("%s %.3f MB (%.3f MB/sec)" %
+                               (str, transferred, speed))
+                self._last_time = time.time()
+                self._last_transferred = self.transferred
+
+
+    def _send_packet(self, str, timeout=60):
         self._send(struct.pack("=I", len(str)))
-        self._send(str)
+        self._send(str, timeout)
+        self.transferred += len(str) + 4
+        self._report_stats("Sent")
 
 
-    def _receive_packet(self, timeout=10):
+    def _receive_packet(self, timeout=60):
         size = struct.unpack("=I", self._receive(4))[0]
-        return self._receive(size, timeout)
+        str = self._receive(size, timeout)
+        self.transferred += len(str) + 4
+        self._report_stats("Received")
+        return str
 
 
-    def _send_file_chunks(self, filename, timeout=30):
+    def _send_file_chunks(self, filename, timeout=60):
+        if self._log_func:
+            self._log_func("Sending file %s" % filename)
         f = open(filename, "rb")
         try:
-            end_time = time.time() + timeout
-            while time.time() < end_time:
-                data = f.read(CHUNKSIZE)
-                self._send_packet(data)
-                if len(data) < CHUNKSIZE:
-                    break
-            else:
-                raise FileTransferTimeoutError("Timeout expired while sending "
-                                               "file %s" % filename)
+            try:
+                end_time = time.time() + timeout
+                while True:
+                    data = f.read(CHUNKSIZE)
+                    self._send_packet(data, end_time - time.time())
+                    if len(data) < CHUNKSIZE:
+                        break
+            except FileTransferError, e:
+                e.filename = filename
+                raise
         finally:
             f.close()
 
 
-    def _receive_file_chunks(self, filename, timeout=30):
+    def _receive_file_chunks(self, filename, timeout=60):
+        if self._log_func:
+            self._log_func("Receiving file %s" % filename)
         f = open(filename, "wb")
         try:
-            end_time = time.time() + timeout
-            while True:
-                try:
+            try:
+                end_time = time.time() + timeout
+                while True:
                     data = self._receive_packet(end_time - time.time())
-                except FileTransferTimeoutError:
-                    raise FileTransferTimeoutError("Timeout expired while "
-                                                   "receiving file %s" %
-                                                   filename)
-                except FileTransferProtocolError:
-                    raise FileTransferProtocolError("Error receiving file %s" %
-                                                    filename)
-                f.write(data)
-                if len(data) < CHUNKSIZE:
-                    break
+                    f.write(data)
+                    if len(data) < CHUNKSIZE:
+                        break
+            except FileTransferError, e:
+                e.filename = filename
+                raise
         finally:
             f.close()
 
 
-    def _send_msg(self, msg, timeout=10):
+    def _send_msg(self, msg, timeout=60):
         self._send(struct.pack("=I", msg))
 
 
-    def _receive_msg(self, timeout=10):
+    def _receive_msg(self, timeout=60):
         s = self._receive(4, timeout)
         return struct.unpack("=I", s)[0]
 
@@ -191,7 +247,7 @@
             raise e[0], e[1], e[2]
         if msg == RSS_ERROR:
             errmsg = self._receive_packet()
-            raise FileTransferServerError("Server said: %s" % errmsg)
+            raise FileTransferServerError(errmsg)
         raise e[0], e[1], e[2]
 
 
@@ -200,20 +256,22 @@
     Connect to a RSS (remote shell server) and upload files or directory trees.
     """
 
-    def __init__(self, address, port, timeout=10):
+    def __init__(self, address, port, log_func=None, timeout=20):
         """
         Connect to a server.
 
         @param address: The server's address
         @param port: The server's port
+        @param log_func: If provided, transfer stats will be passed to this
+                function during the transfer
         @param timeout: Time duration to wait for connection to succeed
         @raise FileTransferConnectError: Raised if the connection fails
         @raise FileTransferProtocolError: Raised if an incorrect magic number
                 is received
-        @raise FileTransferSendError: Raised if the RSS_UPLOAD message cannot
+        @raise FileTransferSocketError: Raised if the RSS_UPLOAD message cannot
                 be sent to the server
         """
-        super(FileUploadClient, self).__init__(address, port, timeout)
+        super(FileUploadClient, self).__init__(address, port, log_func, timeout)
         self._send_msg(RSS_UPLOAD)
 
 
@@ -221,7 +279,7 @@
         if os.path.isfile(path):
             self._send_msg(RSS_CREATE_FILE)
             self._send_packet(os.path.basename(path))
-            self._send_file_chunks(path, max(0, end_time - time.time()))
+            self._send_file_chunks(path, end_time - time.time())
         elif os.path.isdir(path):
             self._send_msg(RSS_CREATE_DIR)
             self._send_packet(os.path.basename(path))
@@ -277,12 +335,12 @@
                                                     "directories" %
                                                     src_pattern)
                 # Look for RSS_OK or RSS_ERROR
-                msg = self._receive_msg(max(0, end_time - time.time()))
+                msg = self._receive_msg(end_time - time.time())
                 if msg == RSS_OK:
                     return
                 elif msg == RSS_ERROR:
                     errmsg = self._receive_packet()
-                    raise FileTransferServerError("Server said: %s" % errmsg)
+                    raise FileTransferServerError(errmsg)
                 else:
                     # Neither RSS_OK nor RSS_ERROR found
                     raise FileTransferProtocolError("Received unexpected msg")
@@ -297,12 +355,14 @@
     Connect to a RSS (remote shell server) and download files or directory trees.
     """
 
-    def __init__(self, address, port, timeout=10):
+    def __init__(self, address, port, log_func=None, timeout=20):
         """
         Connect to a server.
 
         @param address: The server's address
         @param port: The server's port
+        @param log_func: If provided, transfer stats will be passed to this
+                function during the transfer
         @param timeout: Time duration to wait for connection to succeed
         @raise FileTransferConnectError: Raised if the connection fails
         @raise FileTransferProtocolError: Raised if an incorrect magic number
@@ -310,7 +370,7 @@
         @raise FileTransferSendError: Raised if the RSS_UPLOAD message cannot
                 be sent to the server
         """
-        super(FileDownloadClient, self).__init__(address, port, timeout)
+        super(FileDownloadClient, self).__init__(address, port, log_func, timeout)
         self._send_msg(RSS_DOWNLOAD)
 
 
@@ -358,8 +418,7 @@
                     filename = self._receive_packet()
                     if os.path.isdir(dst_path):
                         dst_path = os.path.join(dst_path, filename)
-                    self._receive_file_chunks(
-                            dst_path, max(0, end_time - time.time()))
+                    self._receive_file_chunks(dst_path, end_time - time.time())
                     dst_path = os.path.dirname(dst_path)
                     file_count += 1
                 elif msg == RSS_CREATE_DIR:
@@ -385,7 +444,7 @@
                 elif msg == RSS_ERROR:
                     # Receive error message and abort
                     errmsg = self._receive_packet()
-                    raise FileTransferServerError("Server said: %s" % errmsg)
+                    raise FileTransferServerError(errmsg)
                 else:
                     # Unexpected msg
                     raise FileTransferProtocolError("Received unexpected msg")
@@ -395,26 +454,26 @@
             raise
 
 
-def upload(address, port, src_pattern, dst_path, timeout=60,
-           connect_timeout=10):
+def upload(address, port, src_pattern, dst_path, log_func=None, timeout=60,
+           connect_timeout=20):
     """
     Connect to server and upload files.
 
     @see: FileUploadClient
     """
-    client = FileUploadClient(address, port, connect_timeout)
+    client = FileUploadClient(address, port, log_func, connect_timeout)
     client.upload(src_pattern, dst_path, timeout)
     client.close()
 
 
-def download(address, port, src_pattern, dst_path, timeout=60,
-             connect_timeout=10):
+def download(address, port, src_pattern, dst_path, log_func=None, timeout=60,
+             connect_timeout=20):
     """
     Connect to server and upload files.
 
     @see: FileDownloadClient
     """
-    client = FileDownloadClient(address, port, connect_timeout)
+    client = FileDownloadClient(address, port, log_func, connect_timeout)
     client.download(src_pattern, dst_path, timeout)
     client.close()
 
@@ -430,6 +489,9 @@
     parser.add_option("-u", "--upload",
                       action="store_true", dest="upload",
                       help="upload files to server")
+    parser.add_option("-v", "--verbose",
+                      action="store_true", dest="verbose",
+                      help="be verbose")
     parser.add_option("-t", "--timeout",
                       type="int", dest="timeout", default=3600,
                       help="transfer timeout")
@@ -441,10 +503,16 @@
     address, port, src_pattern, dst_path = args
     port = int(port)
 
+    logger = None
+    if options.verbose:
+        def p(s):
+            print s
+        logger = p
+
     if options.download:
-        download(address, port, src_pattern, dst_path, options.timeout)
+        download(address, port, src_pattern, dst_path, logger, options.timeout)
     elif options.upload:
-        upload(address, port, src_pattern, dst_path, options.timeout)
+        upload(address, port, src_pattern, dst_path, logger, options.timeout)
 
 
 if __name__ == "__main__":
diff --git a/client/tests/kvm/scan_results.py b/client/tests/kvm/scan_results.py
index a339a85..be825f6 100755
--- a/client/tests/kvm/scan_results.py
+++ b/client/tests/kvm/scan_results.py
@@ -38,7 +38,7 @@
             test_status = parts[0].split()[1]
             # Remove "kvm." prefix
             if test_name.startswith("kvm."):
-                test_name = test_name.split("kvm.")[1]
+                test_name = test_name[4:]
             result_list.append((test_name, test_status,
                                 int(end_time - start_time), info))
 
@@ -86,7 +86,7 @@
 
 
 if __name__ == "__main__":
-    import sys, os, glob
+    import sys, glob
 
     resfiles = glob.glob("../../results/default/status*")
     if len(sys.argv) > 1:
diff --git a/client/tests/kvm/scripts/check_image.py b/client/tests/kvm/scripts/check_image.py
deleted file mode 100644
index 2b5c227..0000000
--- a/client/tests/kvm/scripts/check_image.py
+++ /dev/null
@@ -1,99 +0,0 @@
-import os, sys, commands
-
-
-class ImageCheckError(Exception):
-    """
-    Simple wrapper for the builtin Exception class.
-    """
-    pass
-
-
-class ImageCheck(object):
-    """
-    Check qcow2 image by qemu-img info/check command.
-    """
-    def __init__(self):
-        """
-        Gets params from environment variables and sets class attributes.
-        """
-        self.image_path_list = []
-        client_dir =  os.environ['AUTODIR']
-        self.kvm_dir = os.path.join(client_dir, 'tests/kvm')
-        img_to_check = os.environ['KVM_TEST_images'].split()
-
-        for img in img_to_check:
-            img_name_str = "KVM_TEST_image_name_%s" % img
-            if not os.environ.has_key(img_name_str):
-                img_name_str = "KVM_TEST_image_name"
-            img_format_str = "KVM_TEST_image_format_%s" % img
-            if os.environ.has_key(img_format_str):
-                image_format = os.environ[img_format_str]
-            else:
-                image_format = os.environ['KVM_TEST_image_format']
-            if image_format != "qcow2":
-                continue
-            image_name = os.environ[img_name_str]
-            image_filename = "%s.%s" % (image_name, image_format)
-            image_filename = os.path.join(self.kvm_dir, image_filename)
-            self.image_path_list.append(image_filename)
-        if os.environ.has_key('KVM_TEST_qemu_img_binary'):
-            self.qemu_img_path = os.environ['KVM_TEST_qemu_img_binary']
-        else:
-            self.qemu_img_path = os.path.join(self.kvm_dir, 'qemu-img')
-        self.qemu_img_check = True
-        cmd = "%s |grep check" % self.qemu_img_path
-        (s1, output) = commands.getstatusoutput(cmd)
-        if s1:
-            self.qemu_img_check = False
-            print "Command qemu-img check not available, not checking..."
-        cmd = "%s |grep info" % self.qemu_img_path
-        (s2, output) = commands.getstatusoutput(cmd)
-        if s2:
-            self.qemu_img_check = False
-            print "Command qemu-img info not available, not checking..."
-
-    def exec_img_cmd(self, cmd_type, image_path):
-        """
-        Run qemu-img info/check on given image.
-
-        @param cmd_type: Sub command used together with qemu.
-        @param image_path: Real path of the image.
-        """
-        cmd = ' '.join([self.qemu_img_path, cmd_type, image_path])
-        print "Checking image with command %s" % cmd
-        (status, output) = commands.getstatusoutput(cmd)
-        print output
-        if status or (cmd_type == "check" and not "No errors" in output):
-            msg = "Command %s failed" % cmd
-            return False, msg
-        else:
-            return True, ''
-
-
-    def check_image(self):
-        """
-        Run qemu-img info/check to check the image in list.
-
-        If the image checking is failed, raise an exception.
-        """
-        # Check all the image in list.
-        errmsg = []
-        for image_path in self.image_path_list:
-            if not os.path.exists(image_path):
-                print "Image %s does not exist!" % image_path
-                continue
-            s, o = self.exec_img_cmd('info', image_path)
-            if not s:
-                errmsg.append(o)
-            s, o = self.exec_img_cmd('check', image_path)
-            if not s:
-                errmsg.append(o)
-
-        if len(errmsg) > 0:
-            raise ImageCheckError('Errors were found, please check log!')
-
-
-if __name__ == "__main__":
-    image_check = ImageCheck()
-    if image_check.qemu_img_check:
-        image_check.check_image()
diff --git a/client/tests/kvm/scripts/hugepage.py b/client/tests/kvm/scripts/hugepage.py
deleted file mode 100755
index 8a1b0f6..0000000
--- a/client/tests/kvm/scripts/hugepage.py
+++ /dev/null
@@ -1,118 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-import os, sys, time
-
-"""
-Simple script to allocate enough hugepages for KVM testing purposes.
-"""
-
-class HugePageError(Exception):
-    """
-    Simple wrapper for the builtin Exception class.
-    """
-    pass
-
-
-class HugePage:
-    def __init__(self, hugepage_path=None):
-        """
-        Gets environment variable values and calculates the target number
-        of huge memory pages.
-
-        @param hugepage_path: Path where to mount hugetlbfs path, if not
-                yet configured.
-        """
-        self.vms = len(os.environ['KVM_TEST_vms'].split())
-        self.mem = int(os.environ['KVM_TEST_mem'])
-        try:
-            self.max_vms = int(os.environ['KVM_TEST_max_vms'])
-        except KeyError:
-            self.max_vms = 0
-
-        if hugepage_path:
-            self.hugepage_path = hugepage_path
-        else:
-            self.hugepage_path = '/mnt/kvm_hugepage'
-
-        self.hugepage_size = self.get_hugepage_size()
-        self.target_hugepages = self.get_target_hugepages()
-        print "Number of VMs this test will use: %d" % self.vms
-        print "Amount of memory used by each vm: %s" % self.mem
-        print ("System setting for large memory page size: %s" %
-               self.hugepage_size)
-        print ("Number of large memory pages needed for this test: %s" %
-               self.target_hugepages)
-
-
-    def get_hugepage_size(self):
-        """
-        Get the current system setting for huge memory page size.
-        """
-        meminfo = open('/proc/meminfo', 'r').readlines()
-        huge_line_list = [h for h in meminfo if h.startswith("Hugepagesize")]
-        try:
-            return int(huge_line_list[0].split()[1])
-        except ValueError, e:
-            raise HugePageError("Could not get huge page size setting from "
-                                "/proc/meminfo: %s" % e)
-
-
-    def get_target_hugepages(self):
-        """
-        Calculate the target number of hugepages for testing purposes.
-        """
-        if self.vms < self.max_vms:
-            self.vms = self.max_vms
-        # memory of all VMs plus qemu overhead of 64MB per guest
-        vmsm = (self.vms * self.mem) + (self.vms * 64)
-        return int(vmsm * 1024 / self.hugepage_size)
-
-
-    def set_hugepages(self):
-        """
-        Sets the hugepage limit to the target hugepage value calculated.
-        """
-        hugepage_cfg = open("/proc/sys/vm/nr_hugepages", "r+")
-        hp = hugepage_cfg.readline()
-        while int(hp) < self.target_hugepages:
-            loop_hp = hp
-            hugepage_cfg.write(str(self.target_hugepages))
-            hugepage_cfg.flush()
-            hugepage_cfg.seek(0)
-            hp = int(hugepage_cfg.readline())
-            if loop_hp == hp:
-                raise HugePageError("Cannot set the kernel hugepage setting "
-                                    "to the target value of %d hugepages." %
-                                    self.target_hugepages)
-        hugepage_cfg.close()
-        print ("Successfuly set %s large memory pages on host " %
-               self.target_hugepages)
-
-
-    def mount_hugepage_fs(self):
-        """
-        Verify if there's a hugetlbfs mount set. If there's none, will set up
-        a hugetlbfs mount using the class attribute that defines the mount
-        point.
-        """
-        if not os.path.ismount(self.hugepage_path):
-            if not os.path.isdir(self.hugepage_path):
-                os.makedirs(self.hugepage_path)
-            cmd = "mount -t hugetlbfs none %s" % self.hugepage_path
-            if os.system(cmd):
-                raise HugePageError("Cannot mount hugetlbfs path %s" %
-                                    self.hugepage_path)
-
-
-    def setup(self):
-        self.set_hugepages()
-        self.mount_hugepage_fs()
-
-
-if __name__ == "__main__":
-    if len(sys.argv) < 2:
-        huge_page = HugePage()
-    else:
-        huge_page = HugePage(sys.argv[1])
-
-    huge_page.setup()
diff --git a/client/tests/kvm/scripts/allocator.py b/client/tests/kvm/scripts/ksm_overcommit_guest.py
similarity index 97%
rename from client/tests/kvm/scripts/allocator.py
rename to client/tests/kvm/scripts/ksm_overcommit_guest.py
index 227745a..d52be5b 100755
--- a/client/tests/kvm/scripts/allocator.py
+++ b/client/tests/kvm/scripts/ksm_overcommit_guest.py
@@ -8,11 +8,11 @@
 """
 
 
-import os, array, sys, struct, random, copy, inspect, tempfile, datetime, math
+import os, array, sys, random, copy, tempfile, datetime, math
 
 PAGE_SIZE = 4096 # machine page size
 
-TMPFS_OVERHEAD = 0.0022 # overhead on 1MB of write data 
+TMPFS_OVERHEAD = 0.0022 # overhead on 1MB of write data
 
 
 class MemFill(object):
@@ -34,7 +34,7 @@
 
         self.tmpdp = tempfile.mkdtemp()
         ret_code = os.system("mount -o size=%dM tmpfs %s -t tmpfs" %
-                             ((mem+math.ceil(mem*TMPFS_OVERHEAD)), 
+                             ((mem+math.ceil(mem*TMPFS_OVERHEAD)),
                              self.tmpdp))
         if ret_code != 0:
             if os.getuid() != 0:
diff --git a/client/tests/kvm/scripts/multicast_guest.py b/client/tests/kvm/scripts/multicast_guest.py
new file mode 100755
index 0000000..350cd5f
--- /dev/null
+++ b/client/tests/kvm/scripts/multicast_guest.py
@@ -0,0 +1,37 @@
+#!/usr/bin/python
+import socket, struct, os, signal, sys
+# -*- coding: utf-8 -*-
+
+"""
+Script used to join machine into multicast groups.
+
+@author Amos Kong <akong@redhat.com>
+"""
+
+if __name__ == "__main__":
+    if len(sys.argv) < 4:
+        print """%s [mgroup_count] [prefix] [suffix]
+        mgroup_count: count of multicast addresses
+        prefix: multicast address prefix
+        suffix: multicast address suffix""" % sys.argv[0]
+        sys.exit()
+
+    mgroup_count = int(sys.argv[1])
+    prefix = sys.argv[2]
+    suffix = int(sys.argv[3])
+
+    s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
+    for i in range(mgroup_count):
+        mcast = prefix + "." + str(suffix + i)
+        try:
+            mreq = struct.pack("4sl", socket.inet_aton(mcast),
+                               socket.INADDR_ANY)
+            s.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
+        except:
+            s.close()
+            print "Could not join multicast: %s" % mcast
+            raise
+
+    print "join_mcast_pid:%s" % os.getpid()
+    os.kill(os.getpid(), signal.SIGSTOP)
+    s.close()
diff --git a/client/tests/kvm/scripts/nic_bonding_guest.py b/client/tests/kvm/scripts/nic_bonding_guest.py
new file mode 100644
index 0000000..f2d4be9
--- /dev/null
+++ b/client/tests/kvm/scripts/nic_bonding_guest.py
@@ -0,0 +1,37 @@
+import os, re, commands, sys
+"""This script is used to setup bonding, macaddr of bond0 should be assigned by
+argv1"""
+
+if len(sys.argv) != 2:
+    sys.exit(1)
+mac = sys.argv[1]
+eth_nums = 0
+ifconfig_output = commands.getoutput("ifconfig")
+re_eth = "eth[0-9]*"
+for ename in re.findall(re_eth, ifconfig_output):
+    eth_config_file = "/etc/sysconfig/network-scripts/ifcfg-%s" % ename
+    eth_config = """DEVICE=%s
+USERCTL=no
+ONBOOT=yes
+MASTER=bond0
+SLAVE=yes
+BOOTPROTO=none
+""" % ename
+    f = file(eth_config_file,'w')
+    f.write(eth_config)
+    f.close()
+
+bonding_config_file = "/etc/sysconfig/network-scripts/ifcfg-bond0"
+bond_config = """DEVICE=bond0
+BOOTPROTO=dhcp
+NETWORKING_IPV6=no
+ONBOOT=yes
+USERCTL=no
+MACADDR=%s
+""" % mac
+f = file(bonding_config_file, "w")
+f.write(bond_config)
+f.close()
+os.system("modprobe bonding")
+os.system("service NetworkManager stop")
+os.system("service network restart")
diff --git a/client/tests/kvm/scripts/virtio_console_guest.py b/client/tests/kvm/scripts/virtio_console_guest.py
new file mode 100755
index 0000000..c407231
--- /dev/null
+++ b/client/tests/kvm/scripts/virtio_console_guest.py
@@ -0,0 +1,715 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+"""
+Auxiliary script used to send data between ports on guests.
+
+@copyright: 2010 Red Hat, Inc.
+@author: Jiri Zupka (jzupka@redhat.com)
+@author: Lukas Doktor (ldoktor@redhat.com)
+"""
+import threading
+from threading import Thread
+import os, select, re, random, sys, array
+import fcntl, traceback, signal
+
+DEBUGPATH = "/sys/kernel/debug"
+SYSFSPATH = "/sys/class/virtio-ports/"
+
+exiting = False
+
+class VirtioGuest:
+    """
+    Test tools of virtio_ports.
+    """
+    LOOP_NONE = 0
+    LOOP_POLL = 1
+    LOOP_SELECT = 2
+
+    def __init__(self):
+        self.files = {}
+        self.exit_thread = threading.Event()
+        self.threads = []
+        self.ports = {}
+        self.poll_fds = {}
+        self.catch_signal = None
+        self.use_config = threading.Event()
+
+
+    def _readfile(self, name):
+        """
+        Read file and return content as string
+
+        @param name: Name of file
+        @return: Content of file as string
+        """
+        out = ""
+        try:
+            f = open(name, "r")
+            out = f.read()
+            f.close()
+        except:
+            print "FAIL: Cannot open file %s" % (name)
+
+        return out
+
+
+    def _get_port_status(self):
+        """
+        Get info about ports from kernel debugfs.
+
+        @return: Ports dictionary of port properties
+        """
+        ports = {}
+        not_present_msg = "FAIL: There's no virtio-ports dir in debugfs"
+        if (not os.path.ismount(DEBUGPATH)):
+            os.system('mount -t debugfs none %s' % (DEBUGPATH))
+        try:
+            if not os.path.isdir('%s/virtio-ports' % (DEBUGPATH)):
+                print not_present_msg
+        except:
+            print not_present_msg
+        else:
+            viop_names = os.listdir('%s/virtio-ports' % (DEBUGPATH))
+            for name in viop_names:
+                open_db_file = "%s/virtio-ports/%s" % (DEBUGPATH, name)
+                f = open(open_db_file, 'r')
+                port = {}
+                file = []
+                for line in iter(f):
+                    file.append(line)
+                try:
+                    for line in file:
+                        m = re.match("(\S+): (\S+)", line)
+                        port[m.group(1)] = m.group(2)
+
+                    if (port['is_console'] == "yes"):
+                        port["path"] = "/dev/hvc%s" % (port["console_vtermno"])
+                        # Console works like a serialport
+                    else:
+                        port["path"] = "/dev/%s" % name
+
+                    if (not os.path.exists(port['path'])):
+                        print "FAIL: %s not exist" % port['path']
+
+                    sysfspath = SYSFSPATH + name
+                    if (not os.path.isdir(sysfspath)):
+                        print "FAIL: %s not exist" % (sysfspath)
+
+                    info_name = sysfspath + "/name"
+                    port_name = self._readfile(info_name).strip()
+                    if (port_name != port["name"]):
+                        print ("FAIL: Port info not match \n%s - %s\n%s - %s" %
+                               (info_name , port_name,
+                                "%s/virtio-ports/%s" % (DEBUGPATH, name),
+                                port["name"]))
+                except AttributeError:
+                    print ("In file " + open_db_file +
+                           " are bad data\n"+ "".join(file).strip())
+                    print ("FAIL: Fail file data.")
+                    return
+
+                ports[port['name']] = port
+                f.close()
+
+        return ports
+
+
+    def init(self, in_files):
+        """
+        Init and check port properties.
+        """
+        self.ports = self._get_port_status()
+
+        if self.ports == None:
+            return
+        for item in in_files:
+            if (item[1] != self.ports[item[0]]["is_console"]):
+                print self.ports
+                print "FAIL: Host console is not like console on guest side\n"
+        print "PASS: Init and check virtioconsole files in system."
+
+
+    class Switch(Thread):
+        """
+        Thread that sends data between ports.
+        """
+        def __init__ (self, in_files, out_files, event,
+                      cachesize=1024, method=0):
+            """
+            @param in_files: Array of input files.
+            @param out_files: Array of output files.
+            @param method: Method of read/write access.
+            @param cachesize: Block to receive and send.
+            """
+            Thread.__init__(self, name="Switch")
+
+            self.in_files = in_files
+            self.out_files = out_files
+            self.exit_thread = event
+            self.method = method
+
+            self.cachesize = cachesize
+
+
+        def _none_mode(self):
+            """
+            Read and write to device in blocking mode
+            """
+            data = ""
+            while not self.exit_thread.isSet():
+                data = ""
+                for desc in self.in_files:
+                    data += os.read(desc, self.cachesize)
+                if data != "":
+                    for desc in self.out_files:
+                        os.write(desc, data)
+
+
+        def _poll_mode(self):
+            """
+            Read and write to device in polling mode.
+            """
+
+            pi = select.poll()
+            po = select.poll()
+
+            for fd in self.in_files:
+                pi.register(fd, select.POLLIN)
+
+            for fd in self.out_files:
+                po.register(fd, select.POLLOUT)
+
+            while not self.exit_thread.isSet():
+                data = ""
+                t_out = self.out_files
+
+                readyf = pi.poll(1.0)
+                for i in readyf:
+                    data += os.read(i[0], self.cachesize)
+
+                if data != "":
+                    while ((len(t_out) != len(readyf)) and not
+                           self.exit_thread.isSet()):
+                        readyf = po.poll(1.0)
+                    for desc in t_out:
+                        os.write(desc, data)
+
+
+        def _select_mode(self):
+            """
+            Read and write to device in selecting mode.
+            """
+            while not self.exit_thread.isSet():
+                ret = select.select(self.in_files, [], [], 1.0)
+                data = ""
+                if ret[0] != []:
+                    for desc in ret[0]:
+                        data += os.read(desc, self.cachesize)
+                if data != "":
+                    ret = select.select([], self.out_files, [], 1.0)
+                    while ((len(self.out_files) != len(ret[1])) and not
+                           self.exit_thread.isSet()):
+                        ret = select.select([], self.out_files, [], 1.0)
+                    for desc in ret[1]:
+                        os.write(desc, data)
+
+
+        def run(self):
+            if (self.method == VirtioGuest.LOOP_POLL):
+                self._poll_mode()
+            elif (self.method == VirtioGuest.LOOP_SELECT):
+                self._select_mode()
+            else:
+                self._none_mode()
+
+
+    class Sender(Thread):
+        """
+        Creates a thread which sends random blocks of data to dst port.
+        """
+        def __init__(self, port, event, length):
+            """
+            @param port: Destination port
+            @param length: Length of the random data block
+            """
+            Thread.__init__(self, name="Sender")
+            self.port = port
+            self.exit_thread = event
+            self.data = array.array('L')
+            for i in range(max(length / self.data.itemsize, 1)):
+                self.data.append(random.randrange(sys.maxint))
+
+        def run(self):
+            while not self.exit_thread.isSet():
+                os.write(self.port, self.data)
+
+
+    def _open(self, in_files):
+        """
+        Open devices and return array of descriptors
+
+        @param in_files: Files array
+        @return: Array of descriptor
+        """
+        f = []
+
+        for item in in_files:
+            name = self.ports[item]["path"]
+            if (name in self.files):
+                f.append(self.files[name])
+            else:
+                try:
+                    self.files[name] = os.open(name, os.O_RDWR)
+                    if (self.ports[item]["is_console"] == "yes"):
+                        print os.system("stty -F %s raw -echo" % (name))
+                        print os.system("stty -F %s -a" % (name))
+                    f.append(self.files[name])
+                except Exception, inst:
+                    print "FAIL: Failed to open file %s" % (name)
+                    raise inst
+        return f
+
+    @staticmethod
+    def pollmask_to_str(mask):
+        """
+        Conver pool mast to string
+
+        @param mask: poll return mask
+        """
+        str = ""
+        if (mask & select.POLLIN):
+            str += "IN "
+        if (mask & select.POLLPRI):
+            str += "PRI IN "
+        if (mask & select.POLLOUT):
+            str += "OUT "
+        if (mask & select.POLLERR):
+            str += "ERR "
+        if (mask & select.POLLHUP):
+            str += "HUP "
+        if (mask & select.POLLMSG):
+            str += "MSG "
+        return str
+
+
+    def poll(self, port, expected, timeout=500):
+        """
+        Pool event from device and print event like text.
+
+        @param file: Device.
+        """
+        in_f = self._open([port])
+
+        p = select.poll()
+        p.register(in_f[0])
+
+        mask = p.poll(timeout)
+
+        maskstr = VirtioGuest.pollmask_to_str(mask[0][1])
+        if (mask[0][1] & expected) == expected:
+            print "PASS: Events: " + maskstr
+        else:
+            emaskstr = VirtioGuest.pollmask_to_str(expected)
+            print "FAIL: Events: " + maskstr + "  Expected: " + emaskstr
+
+
+    def lseek(self, port, pos, how):
+        """
+        Use lseek on the device. The device is unseekable so PASS is returned
+        when lseek command fails and vice versa.
+
+        @param port: Name of the port
+        @param pos: Offset
+        @param how: Relativ offset os.SEEK_{SET,CUR,END}
+        """
+        fd = self._open([port])[0]
+
+        try:
+            os.lseek(fd, pos, how)
+        except Exception, inst:
+            if inst.errno == 29:
+                print "PASS: the lseek failed as expected"
+            else:
+                print inst
+                print "FAIL: unknown error"
+        else:
+            print "FAIL: the lseek unexpectedly passed"
+
+
+    def blocking(self, port, mode=False):
+        """
+        Set port function mode blocking/nonblocking
+
+        @param port: port to set mode
+        @param mode: False to set nonblock mode, True for block mode
+        """
+        fd = self._open([port])[0]
+
+        try:
+            fl = fcntl.fcntl(fd, fcntl.F_GETFL)
+            if not mode:
+                fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
+            else:
+                fcntl.fcntl(fd, fcntl.F_SETFL, fl & ~os.O_NONBLOCK)
+
+        except Exception, inst:
+            print "FAIL: Setting (non)blocking mode: " + str(inst)
+            return
+
+        if mode:
+            print "PASS: set to blocking mode"
+        else:
+            print "PASS: set to nonblocking mode"
+
+
+    def __call__(self, sig, frame):
+        """
+        Call function. Used for signal handle.
+        """
+        if (sig == signal.SIGIO):
+            self.sigio_handler(sig, frame)
+
+
+    def sigio_handler(self, sig, frame):
+        """
+        Handler for sigio operation.
+
+        @param sig: signal which call handler.
+        @param frame: frame of caller
+        """
+        if self.poll_fds:
+            p = select.poll()
+            map(p.register, self.poll_fds.keys())
+
+            masks = p.poll(1)
+            print masks
+            for mask in masks:
+                self.poll_fds[mask[0]][1] |= mask[1]
+
+
+    def get_sigio_poll_return(self, port):
+        """
+        Return PASS, FAIL and poll walue in string format.
+
+        @param port: Port to check poll information.
+        """
+        fd = self._open([port])[0]
+
+        maskstr = VirtioGuest.pollmask_to_str(self.poll_fds[fd][1])
+        if (self.poll_fds[fd][0] ^ self.poll_fds[fd][1]):
+            emaskstr = VirtioGuest.pollmask_to_str(self.poll_fds[fd][0])
+            print "FAIL: Events: " + maskstr + "  Expected: " + emaskstr
+        else:
+            print "PASS: Events: " + maskstr
+        self.poll_fds[fd][1] = 0
+
+
+    def set_pool_want_return(self, port, poll_value):
+        """
+        Set value to static variable.
+
+        @param port: Port which should be set excepted mask
+        @param poll_value: Value to check sigio signal.
+        """
+        fd = self._open([port])[0]
+        self.poll_fds[fd] = [poll_value, 0]
+        print "PASS: Events: " + VirtioGuest.pollmask_to_str(poll_value)
+
+
+    def catching_signal(self):
+        """
+        return: True if should set catch signal, False if ignore signal and
+                none when configuration is not changed.
+        """
+        ret = self.catch_signal
+        self.catch_signal = None
+        return ret
+
+
+    def async(self, port, mode=True, exp_val = 0):
+        """
+        Set port function mode async/sync.
+
+        @param port: port which should be pooled.
+        @param mode: False to set sync mode, True for sync mode.
+        @param exp_val: Value which should be pooled.
+        """
+        fd = self._open([port])[0]
+
+        try:
+            fcntl.fcntl(fd, fcntl.F_SETOWN, os.getpid())
+            fl = fcntl.fcntl(fd, fcntl.F_GETFL)
+
+            self.use_config.clear()
+            if mode:
+                fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_ASYNC)
+                self.poll_fds[fd] = [exp_val, 0]
+                self.catch_signal = True
+            else:
+                del self.poll_fds[fd]
+                fcntl.fcntl(fd, fcntl.F_SETFL, fl & ~os.O_ASYNC)
+                self.catch_signal = False
+
+            os.kill(os.getpid(), signal.SIGUSR1)
+            self.use_config.wait()
+
+        except Exception, inst:
+            print "FAIL: Setting (a)sync mode: " + str(inst)
+            return
+
+        if mode:
+            print "PASS: Set to async mode"
+        else:
+            print "PASS: Set to sync mode"
+
+
+    def close(self, file):
+        """
+        Close open port.
+
+        @param file: File to close.
+        """
+        descriptor = None
+        path = self.ports[file]["path"]
+        if path != None:
+            if path in self.files.keys():
+                descriptor = self.files[path]
+                del self.files[path]
+            if descriptor != None:
+                try:
+                    os.close(descriptor)
+                except Exception, inst:
+                    print "FAIL: Closing the file: " + str(inst)
+                    return
+        print "PASS: Close"
+
+
+    def open(self, in_file):
+        """
+        Direct open devices.
+
+        @param in_file: Array of files.
+        @return: Array of descriptors.
+        """
+        name = self.ports[in_file]["path"]
+        try:
+            self.files[name] = os.open(name, os.O_RDWR)
+            if (self.ports[in_file]["is_console"] == "yes"):
+                print os.system("stty -F %s raw -echo" % (name))
+            print "PASS: Open all filles correctly."
+        except Exception, inst:
+            print "%s\nFAIL: Failed open file %s" % (str(inst), name)
+
+
+    def loopback(self, in_files, out_files, cachesize=1024, mode=LOOP_NONE):
+        """
+        Start a switch thread.
+
+        (There is a problem with multiple opens of a single file).
+
+        @param in_files: Array of input files.
+        @param out_files: Array of output files.
+        @param cachesize: Cachesize.
+        """
+        self.ports = self._get_port_status()
+
+        in_f = self._open(in_files)
+        out_f = self._open(out_files)
+
+        s = self.Switch(in_f, out_f, self.exit_thread, cachesize, mode)
+        s.start()
+        self.threads.append(s)
+        print "PASS: Start switch"
+
+
+    def exit_threads(self):
+        """
+        Function end all running data switch.
+        """
+        self.exit_thread.set()
+        for th in self.threads:
+            print "join"
+            th.join()
+        self.exit_thread.clear()
+
+        del self.threads[:]
+        for desc in self.files.itervalues():
+            os.close(desc)
+        self.files.clear()
+        print "PASS: All threads finished."
+
+
+    def die(self):
+        """
+        Quit consoleswitch.
+        """
+        self.exit_threads()
+        exit()
+
+
+    def send_loop_init(self, port, length):
+        """
+        Prepares the sender thread. Requires clean thread structure.
+        """
+        self.ports = self._get_port_status()
+        in_f = self._open([port])
+
+        self.threads.append(self.Sender(in_f[0], self.exit_thread, length))
+        print "PASS: Sender prepare"
+
+
+    def send_loop(self):
+        """
+        Start sender data transfer. Requires senderprepare run first.
+        """
+        self.threads[0].start()
+        print "PASS: Sender start"
+
+
+    def send(self, port, length=1, mode=True):
+        """
+        Send a data of some length
+
+        @param port: Port to write data
+        @param length: Length of data
+        @param mode: True = loop mode, False = one shoot mode
+        """
+        in_f = self._open([port])
+
+        data = ""
+        while len(data) < length:
+            data += "%c" % random.randrange(255)
+        try:
+            writes = os.write(in_f[0], data)
+        except Exception, inst:
+            print inst
+        if not writes:
+            writes = 0
+        if mode:
+            while (writes < length):
+                try:
+                    writes += os.write(in_f[0], data)
+                except Exception, inst:
+                    print inst
+        if writes >= length:
+            print "PASS: Send data length %d" % writes
+        else:
+            print ("FAIL: Partial send: desired %d, transfered %d" %
+                   (length, writes))
+
+
+    def recv(self, port, length=1, buffer=1024, mode=True):
+        """
+        Recv a data of some length
+
+        @param port: Port to write data
+        @param length: Length of data
+        @param mode: True = loop mode, False = one shoot mode
+        """
+        in_f = self._open([port])
+
+        recvs = ""
+        try:
+            recvs = os.read(in_f[0], buffer)
+        except Exception, inst:
+            print inst
+        if mode:
+            while (len(recvs) < length):
+                try:
+                    recvs += os.read(in_f[0], buffer)
+                except Exception, inst:
+                    print inst
+        if len(recvs) >= length:
+            print "PASS: Recv data length %d" % len(recvs)
+        else:
+            print ("FAIL: Partial recv: desired %d, transfered %d" %
+                   (length, len(recvs)))
+
+
+    def clean_port(self, port, buffer=1024):
+        in_f = self._open([port])
+        ret = select.select([in_f[0]], [], [], 1.0)
+        buf = ""
+        if ret[0]:
+            buf = os.read(in_f[0], buffer)
+        print ("PASS: Rest in socket: ") + str(buf[10])
+
+
+def is_alive():
+    """
+    Check is only main thread is alive and if guest react.
+    """
+    if threading.activeCount() == 2:
+        print ("PASS: Guest is ok no thread alive")
+    else:
+        threads = ""
+        for thread in threading.enumerate():
+            threads += thread.name + ", "
+        print ("FAIL: On guest run thread. Active thread:" + threads)
+
+
+def compile():
+    """
+    Compile virtio_console_guest.py to speed up.
+    """
+    import py_compile
+    py_compile.compile(sys.path[0] + "/virtio_console_guest.py")
+    print "PASS: compile"
+    sys.exit()
+
+
+def guest_exit():
+    global exiting
+    exiting = True
+    os.kill(os.getpid(), signal.SIGUSR1)
+
+
+def worker(virt):
+    """
+    Worker thread (infinite) loop of virtio_guest.
+    """
+    global exiting
+    print "PASS: Start"
+
+    while not exiting:
+        str = raw_input()
+        try:
+            exec str
+        except:
+            exc_type, exc_value, exc_traceback = sys.exc_info()
+            print "On Guest exception from: \n" + "".join(
+                            traceback.format_exception(exc_type,
+                                                       exc_value,
+                                                       exc_traceback))
+
+
+def sigusr_handler(sig, frame):
+    pass
+
+
+def main():
+    """
+    Main function with infinite loop to catch signal from system.
+    """
+    if (len(sys.argv) > 1) and (sys.argv[1] == "-c"):
+        compile()
+
+    global exiting
+    virt = VirtioGuest()
+    slave = Thread(target=worker, args=(virt, ))
+    slave.start()
+    signal.signal(signal.SIGUSR1, sigusr_handler)
+    while not exiting:
+        signal.pause()
+        catch = virt.catching_signal()
+        if catch:
+            signal.signal(signal.SIGIO, virt)
+        elif catch == False:
+            signal.signal(signal.SIGIO, signal.SIG_DFL)
+        if (catch != None):
+            virt.use_config.set()
+    print "PASS: guest_exit"
+
+
+if __name__ == "__main__":
+    main()
diff --git a/client/tests/kvm/stepeditor.py b/client/tests/kvm/stepeditor.py
index 43e189f..bcdf572 100755
--- a/client/tests/kvm/stepeditor.py
+++ b/client/tests/kvm/stepeditor.py
@@ -17,14 +17,22 @@
 def corner_and_size_clipped(startpoint, endpoint, limits):
     c0 = startpoint[:]
     c1 = endpoint[:]
-    if c0[0] < 0: c0[0] = 0
-    if c0[1] < 0: c0[1] = 0
-    if c1[0] < 0: c1[0] = 0
-    if c1[1] < 0: c1[1] = 0
-    if c0[0] > limits[0] - 1: c0[0] = limits[0] - 1
-    if c0[1] > limits[1] - 1: c0[1] = limits[1] - 1
-    if c1[0] > limits[0] - 1: c1[0] = limits[0] - 1
-    if c1[1] > limits[1] - 1: c1[1] = limits[1] - 1
+    if c0[0] < 0:
+        c0[0] = 0
+    if c0[1] < 0:
+        c0[1] = 0
+    if c1[0] < 0:
+        c1[0] = 0
+    if c1[1] < 0:
+        c1[1] = 0
+    if c0[0] > limits[0] - 1:
+        c0[0] = limits[0] - 1
+    if c0[1] > limits[1] - 1:
+        c0[1] = limits[1] - 1
+    if c1[0] > limits[0] - 1:
+        c1[0] = limits[0] - 1
+    if c1[1] > limits[1] - 1:
+        c1[1] = limits[1] - 1
     return ([min(c0[0], c1[0]),
              min(c0[1], c1[1])],
             [abs(c1[0] - c0[0]) + 1,
@@ -88,9 +96,12 @@
     else:
         return ""
 
-    if event.state & gtk.gdk.CONTROL_MASK: str = "ctrl-" + str
-    if event.state & gtk.gdk.MOD1_MASK: str = "alt-" + str
-    if event.state & gtk.gdk.SHIFT_MASK: str = "shift-" + str
+    if event.state & gtk.gdk.CONTROL_MASK:
+        str = "ctrl-" + str
+    if event.state & gtk.gdk.MOD1_MASK:
+        str = "alt-" + str
+    if event.state & gtk.gdk.SHIFT_MASK:
+        str = "shift-" + str
 
     return str
 
@@ -259,7 +270,7 @@
         box.pack_start(frame)
         frame.show()
 
-        self.text_buffer = gtk.TextBuffer() ;
+        self.text_buffer = gtk.TextBuffer()
         self.entry_keys = gtk.TextView(self.text_buffer)
         self.entry_keys.set_wrap_mode(gtk.WRAP_WORD)
         self.entry_keys.connect("key-press-event", self.event_key_press)
diff --git a/client/tests/kvm/test_setup.py b/client/tests/kvm/test_setup.py
new file mode 100644
index 0000000..eebe0c3
--- /dev/null
+++ b/client/tests/kvm/test_setup.py
@@ -0,0 +1,695 @@
+"""
+Library to perform pre/post test setup for KVM autotest.
+"""
+import os, shutil, tempfile, re, ConfigParser, glob, inspect
+import logging, time
+from autotest_lib.client.common_lib import error
+from autotest_lib.client.bin import utils
+
+
+@error.context_aware
+def cleanup(dir):
+    """
+    If dir is a mountpoint, do what is possible to unmount it. Afterwards,
+    try to remove it.
+
+    @param dir: Directory to be cleaned up.
+    """
+    error.context("cleaning up unattended install directory %s" % dir)
+    if os.path.ismount(dir):
+        utils.run('fuser -k %s' % dir, ignore_status=True)
+        utils.run('umount %s' % dir)
+    if os.path.isdir(dir):
+        shutil.rmtree(dir)
+
+
+@error.context_aware
+def clean_old_image(image):
+    """
+    Clean a leftover image file from previous processes. If it contains a
+    mounted file system, do the proper cleanup procedures.
+
+    @param image: Path to image to be cleaned up.
+    """
+    error.context("cleaning up old leftover image %s" % image)
+    if os.path.exists(image):
+        mtab = open('/etc/mtab', 'r')
+        mtab_contents = mtab.read()
+        mtab.close()
+        if image in mtab_contents:
+            utils.run('fuser -k %s' % image, ignore_status=True)
+            utils.run('umount %s' % image)
+        os.remove(image)
+
+
+def display_attributes(instance):
+    """
+    Inspects a given class instance attributes and displays them, convenient
+    for debugging.
+    """
+    logging.debug("Attributes set:")
+    for member in inspect.getmembers(instance):
+        name, value = member
+        attribute = getattr(instance, name)
+        if not (name.startswith("__") or callable(attribute) or not value):
+            logging.debug("    %s: %s", name, value)
+
+
+class Disk(object):
+    """
+    Abstract class for Disk objects, with the common methods implemented.
+    """
+    def __init__(self):
+        self.path = None
+
+
+    def setup_answer_file(self, filename, contents):
+        utils.open_write_close(os.path.join(self.mount, filename), contents)
+
+
+    def copy_to(self, src):
+        dst = os.path.join(self.mount, os.path.basename(src))
+        if os.path.isdir(src):
+            shutil.copytree(src, dst)
+        elif os.path.isfile(src):
+            shutil.copyfile(src, dst)
+
+
+    def close(self):
+        os.chmod(self.path, 0755)
+        cleanup(self.mount)
+        logging.debug("Disk %s successfuly set", self.path)
+
+
+class FloppyDisk(Disk):
+    """
+    Represents a 1.44 MB floppy disk. We can copy files to it, and setup it in
+    convenient ways.
+    """
+    @error.context_aware
+    def __init__(self, path, qemu_img_binary, tmpdir):
+        error.context("Creating unattended install floppy image %s" % path)
+        self.tmpdir = tmpdir
+        self.mount = tempfile.mkdtemp(prefix='floppy_', dir=self.tmpdir)
+        self.virtio_mount = None
+        self.path = path
+        clean_old_image(path)
+        if not os.path.isdir(os.path.dirname(path)):
+            os.makedirs(os.path.dirname(path))
+
+        try:
+            c_cmd = '%s create -f raw %s 1440k' % (qemu_img_binary, path)
+            utils.run(c_cmd)
+            f_cmd = 'mkfs.msdos -s 1 %s' % path
+            utils.run(f_cmd)
+            m_cmd = 'mount -o loop,rw %s %s' % (path, self.mount)
+            utils.run(m_cmd)
+        except error.CmdError, e:
+            cleanup(self.mount)
+            raise
+
+
+    def _copy_virtio_drivers(self, virtio_floppy):
+        """
+        Copy the virtio drivers on the virtio floppy to the install floppy.
+
+        1) Mount the floppy containing the viostor drivers
+        2) Copy its contents to the root of the install floppy
+        """
+        virtio_mount = tempfile.mkdtemp(prefix='virtio_floppy_',
+                                        dir=self.tmpdir)
+
+        pwd = os.getcwd()
+        try:
+            m_cmd = 'mount -o loop %s %s' % (virtio_floppy, virtio_mount)
+            utils.run(m_cmd)
+            os.chdir(virtio_mount)
+            path_list = glob.glob('*')
+            for path in path_list:
+                self.copy_to(path)
+        finally:
+            os.chdir(pwd)
+            cleanup(virtio_mount)
+
+
+    def setup_virtio_win2003(self, virtio_floppy, virtio_oemsetup_id):
+        """
+        Setup the install floppy with the virtio storage drivers, win2003 style.
+
+        Win2003 and WinXP depend on the file txtsetup.oem file to install
+        the virtio drivers from the floppy, which is a .ini file.
+        Process:
+
+        1) Copy the virtio drivers on the virtio floppy to the install floppy
+        2) Parse the ini file with config parser
+        3) Modify the identifier of the default session that is going to be
+           executed on the config parser object
+        4) Re-write the config file to the disk
+        """
+        self._copy_virtio_drivers(virtio_floppy)
+        txtsetup_oem = os.path.join(self.mount, 'txtsetup.oem')
+        if not os.path.isfile(txtsetup_oem):
+            raise IOError('File txtsetup.oem not found on the install '
+                          'floppy. Please verify if your floppy virtio '
+                          'driver image has this file')
+        parser = ConfigParser.ConfigParser()
+        parser.read(txtsetup_oem)
+        if not parser.has_section('Defaults'):
+            raise ValueError('File txtsetup.oem does not have the session '
+                             '"Defaults". Please check txtsetup.oem')
+        default_driver = parser.get('Defaults', 'SCSI')
+        if default_driver != virtio_oemsetup_id:
+            parser.set('Defaults', 'SCSI', virtio_oemsetup_id)
+            fp = open(txtsetup_oem, 'w')
+            parser.write(fp)
+            fp.close()
+
+
+    def setup_virtio_win2008(self, virtio_floppy):
+        """
+        Setup the install floppy with the virtio storage drivers, win2008 style.
+
+        Win2008, Vista and 7 require people to point out the path to the drivers
+        on the unattended file, so we just need to copy the drivers to the
+        driver floppy disk.
+        Process:
+
+        1) Copy the virtio drivers on the virtio floppy to the install floppy
+        """
+        self._copy_virtio_drivers(virtio_floppy)
+
+
+class CdromDisk(Disk):
+    """
+    Represents a CDROM disk that we can master according to our needs.
+    """
+    def __init__(self, path, tmpdir):
+        self.mount = tempfile.mkdtemp(prefix='cdrom_unattended_', dir=tmpdir)
+        self.path = path
+        clean_old_image(path)
+        if not os.path.isdir(os.path.dirname(path)):
+            os.makedirs(os.path.dirname(path))
+
+
+    @error.context_aware
+    def close(self):
+        error.context("Creating unattended install CD image %s" % self.path)
+        g_cmd = ('mkisofs -o %s -max-iso9660-filenames '
+                 '-relaxed-filenames -D --input-charset iso8859-1 '
+                 '%s' % (self.path, self.mount))
+        utils.run(g_cmd)
+
+        os.chmod(self.path, 0755)
+        cleanup(self.mount)
+        logging.debug("unattended install CD image %s successfuly created",
+                      self.path)
+
+
+class UnattendedInstallConfig(object):
+    """
+    Creates a floppy disk image that will contain a config file for unattended
+    OS install. The parameters to the script are retrieved from environment
+    variables.
+    """
+    def __init__(self, test, params):
+        """
+        Sets class atributes from test parameters.
+
+        @param test: KVM test object.
+        @param params: Dictionary with test parameters.
+        """
+        root_dir = test.bindir
+        images_dir = os.path.join(root_dir, 'images')
+        self.deps_dir = os.path.join(root_dir, 'deps')
+        self.unattended_dir = os.path.join(root_dir, 'unattended')
+
+        attributes = ['kernel_args', 'finish_program', 'cdrom_cd1',
+                      'unattended_file', 'medium', 'url', 'kernel', 'initrd',
+                      'nfs_server', 'nfs_dir', 'install_virtio', 'floppy',
+                      'cdrom_unattended', 'boot_path', 'extra_params',
+                      'qemu_img_binary']
+
+        for a in attributes:
+            setattr(self, a, params.get(a, ''))
+
+        if self.install_virtio == 'yes':
+            v_attributes = ['virtio_floppy', 'virtio_storage_path',
+                            'virtio_network_path', 'virtio_oemsetup_id',
+                            'virtio_network_installer']
+            for va in v_attributes:
+                setattr(self, va, params.get(va, ''))
+
+        self.tmpdir = test.tmpdir
+
+        if getattr(self, 'unattended_file'):
+            self.unattended_file = os.path.join(root_dir, self.unattended_file)
+
+        if getattr(self, 'qemu_img_binary'):
+            if not os.path.isfile(getattr(self, 'qemu_img_binary')):
+                self.qemu_img_binary = os.path.join(root_dir,
+                                                    self.qemu_img_binary)
+
+        if getattr(self, 'cdrom_cd1'):
+            self.cdrom_cd1 = os.path.join(root_dir, self.cdrom_cd1)
+        self.cdrom_cd1_mount = tempfile.mkdtemp(prefix='cdrom_cd1_',
+                                                dir=self.tmpdir)
+        if self.medium == 'nfs':
+            self.nfs_mount = tempfile.mkdtemp(prefix='nfs_',
+                                              dir=self.tmpdir)
+
+        if getattr(self, 'floppy'):
+            self.floppy = os.path.join(root_dir, self.floppy)
+            if not os.path.isdir(os.path.dirname(self.floppy)):
+                os.makedirs(os.path.dirname(self.floppy))
+
+        self.image_path = os.path.dirname(self.kernel)
+
+
+    @error.context_aware
+    def render_answer_file(self):
+        """
+        Replace KVM_TEST_CDKEY (in the unattended file) with the cdkey
+        provided for this test and replace the KVM_TEST_MEDIUM with
+        the tree url or nfs address provided for this test.
+
+        @return: Answer file contents
+        """
+        error.base_context('Rendering final answer file')
+        error.context('Reading answer file %s' % self.unattended_file)
+        unattended_contents = open(self.unattended_file).read()
+        dummy_cdkey_re = r'\bKVM_TEST_CDKEY\b'
+        real_cdkey = os.environ.get('KVM_TEST_cdkey')
+        if re.search(dummy_cdkey_re, unattended_contents):
+            if real_cdkey:
+                unattended_contents = re.sub(dummy_cdkey_re, real_cdkey,
+                                             unattended_contents)
+            else:
+                print ("WARNING: 'cdkey' required but not specified for "
+                       "this unattended installation")
+
+        dummy_medium_re = r'\bKVM_TEST_MEDIUM\b'
+        if self.medium == "cdrom":
+            content = "cdrom"
+        elif self.medium == "url":
+            content = "url --url %s" % self.url
+        elif self.medium == "nfs":
+            content = "nfs --server=%s --dir=%s" % (self.nfs_server,
+                                                    self.nfs_dir)
+        else:
+            raise ValueError("Unexpected installation medium %s" % self.url)
+
+        unattended_contents = re.sub(dummy_medium_re, content,
+                                     unattended_contents)
+
+        def replace_virtio_key(contents, dummy_re, env):
+            """
+            Replace a virtio dummy string with contents.
+
+            If install_virtio is not set, replace it with a dummy string.
+
+            @param contents: Contents of the unattended file
+            @param dummy_re: Regular expression used to search on the.
+                    unattended file contents.
+            @param env: Name of the environment variable.
+            """
+            dummy_path = "C:"
+            driver = os.environ.get(env, '')
+
+            if re.search(dummy_re, contents):
+                if self.install_virtio == "yes":
+                    if driver.endswith("msi"):
+                        driver = 'msiexec /passive /package ' + driver
+                    else:
+                        try:
+                            # Let's escape windows style paths properly
+                            drive, path = driver.split(":")
+                            driver = drive + ":" + re.escape(path)
+                        except:
+                            pass
+                    contents = re.sub(dummy_re, driver, contents)
+                else:
+                    contents = re.sub(dummy_re, dummy_path, contents)
+            return contents
+
+        vdict = {r'\bKVM_TEST_STORAGE_DRIVER_PATH\b':
+                 'KVM_TEST_virtio_storage_path',
+                 r'\bKVM_TEST_NETWORK_DRIVER_PATH\b':
+                 'KVM_TEST_virtio_network_path',
+                 r'\bKVM_TEST_VIRTIO_NETWORK_INSTALLER\b':
+                 'KVM_TEST_virtio_network_installer_path'}
+
+        for vkey in vdict:
+            unattended_contents = replace_virtio_key(unattended_contents,
+                                                     vkey, vdict[vkey])
+
+        logging.debug("Unattended install contents:")
+        for line in unattended_contents.splitlines():
+            logging.debug(line)
+        return unattended_contents
+
+
+    def setup_boot_disk(self):
+        answer_contents = self.render_answer_file()
+
+        if self.unattended_file.endswith('.sif'):
+            dest_fname = 'winnt.sif'
+            setup_file = 'winnt.bat'
+            boot_disk = FloppyDisk(self.floppy, self.qemu_img_binary,
+                                   self.tmpdir)
+            boot_disk.setup_answer_file(dest_fname, answer_contents)
+            setup_file_path = os.path.join(self.unattended_dir, setup_file)
+            boot_disk.copy_to(setup_file_path)
+            if self.install_virtio == "yes":
+                boot_disk.setup_virtio_win2003(self.virtio_floppy,
+                                               self.virtio_oemsetup_id)
+            boot_disk.copy_to(self.finish_program)
+
+        elif self.unattended_file.endswith('.ks'):
+            # Red Hat kickstart install
+            dest_fname = 'ks.cfg'
+            if self.cdrom_unattended:
+                boot_disk = CdromDisk(self.cdrom_unattended, self.tmpdir)
+            elif self.floppy:
+                boot_disk = FloppyDisk(self.floppy, self.qemu_img_binary,
+                                       self.tmpdir)
+            else:
+                raise ValueError("Neither cdrom_unattended nor floppy set "
+                                 "on the config file, please verify")
+            boot_disk.setup_answer_file(dest_fname, answer_contents)
+
+        elif self.unattended_file.endswith('.xml'):
+            if "autoyast" in self.extra_params:
+                # SUSE autoyast install
+                dest_fname = "autoinst.xml"
+                if self.cdrom_unattended:
+                    boot_disk = CdromDisk(self.cdrom_unattended)
+                elif self.floppy:
+                    boot_disk = FloppyDisk(self.floppy, self.qemu_img_binary,
+                                           self.tmpdir)
+                else:
+                    raise ValueError("Neither cdrom_unattended nor floppy set "
+                                     "on the config file, please verify")
+                boot_disk.setup_answer_file(dest_fname, answer_contents)
+
+            else:
+                # Windows unattended install
+                dest_fname = "autounattend.xml"
+                boot_disk = FloppyDisk(self.floppy, self.qemu_img_binary,
+                                       self.tmpdir)
+                boot_disk.setup_answer_file(dest_fname, answer_contents)
+                if self.install_virtio == "yes":
+                    boot_disk.setup_virtio_win2008(self.virtio_floppy)
+                boot_disk.copy_to(self.finish_program)
+
+        else:
+            raise ValueError('Unknown answer file type: %s' %
+                             self.unattended_file)
+
+        boot_disk.close()
+
+
+    @error.context_aware
+    def setup_cdrom(self):
+        """
+        Mount cdrom and copy vmlinuz and initrd.img.
+        """
+        error.context("Copying vmlinuz and initrd.img from install cdrom %s" %
+                      self.cdrom_cd1)
+        m_cmd = ('mount -t iso9660 -v -o loop,ro %s %s' %
+                 (self.cdrom_cd1, self.cdrom_cd1_mount))
+        utils.run(m_cmd)
+
+        try:
+            if not os.path.isdir(self.image_path):
+                os.makedirs(self.image_path)
+            kernel_fetch_cmd = ("cp %s/%s/%s %s" %
+                                (self.cdrom_cd1_mount, self.boot_path,
+                                 os.path.basename(self.kernel), self.kernel))
+            utils.run(kernel_fetch_cmd)
+            initrd_fetch_cmd = ("cp %s/%s/%s %s" %
+                                (self.cdrom_cd1_mount, self.boot_path,
+                                 os.path.basename(self.initrd), self.initrd))
+            utils.run(initrd_fetch_cmd)
+        finally:
+            cleanup(self.cdrom_cd1_mount)
+
+
+    @error.context_aware
+    def setup_url(self):
+        """
+        Download the vmlinuz and initrd.img from URL.
+        """
+        error.context("downloading vmlinuz and initrd.img from %s" % self.url)
+        os.chdir(self.image_path)
+        kernel_fetch_cmd = "wget -q %s/%s/%s" % (self.url, self.boot_path,
+                                                 os.path.basename(self.kernel))
+        initrd_fetch_cmd = "wget -q %s/%s/%s" % (self.url, self.boot_path,
+                                                 os.path.basename(self.initrd))
+
+        if os.path.exists(self.kernel):
+            os.remove(self.kernel)
+        if os.path.exists(self.initrd):
+            os.remove(self.initrd)
+
+        utils.run(kernel_fetch_cmd)
+        utils.run(initrd_fetch_cmd)
+
+
+    def setup_nfs(self):
+        """
+        Copy the vmlinuz and initrd.img from nfs.
+        """
+        error.context("copying the vmlinuz and initrd.img from NFS share")
+
+        m_cmd = ("mount %s:%s %s -o ro" %
+                 (self.nfs_server, self.nfs_dir, self.nfs_mount))
+        utils.run(m_cmd)
+
+        try:
+            kernel_fetch_cmd = ("cp %s/%s/%s %s" %
+                                (self.nfs_mount, self.boot_path,
+                                os.path.basename(self.kernel), self.image_path))
+            utils.run(kernel_fetch_cmd)
+            initrd_fetch_cmd = ("cp %s/%s/%s %s" %
+                                (self.nfs_mount, self.boot_path,
+                                os.path.basename(self.initrd), self.image_path))
+            utils.run(initrd_fetch_cmd)
+        finally:
+            cleanup(self.nfs_mount)
+
+
+    def setup(self):
+        """
+        Configure the environment for unattended install.
+
+        Uses an appropriate strategy according to each install model.
+        """
+        logging.info("Starting unattended install setup")
+        display_attributes(self)
+
+        if self.unattended_file and (self.floppy or self.cdrom_unattended):
+            self.setup_boot_disk()
+        if self.medium == "cdrom":
+            if self.kernel and self.initrd:
+                self.setup_cdrom()
+        elif self.medium == "url":
+            self.setup_url()
+        elif self.medium == "nfs":
+            self.setup_nfs()
+        else:
+            raise ValueError("Unexpected installation method %s" %
+                             self.medium)
+
+
+class HugePageConfig(object):
+    def __init__(self, params):
+        """
+        Gets environment variable values and calculates the target number
+        of huge memory pages.
+
+        @param params: Dict like object containing parameters for the test.
+        """
+        self.vms = len(params.objects("vms"))
+        self.mem = int(params.get("mem"))
+        self.max_vms = int(params.get("max_vms", 0))
+        self.hugepage_path = '/mnt/kvm_hugepage'
+        self.hugepage_size = self.get_hugepage_size()
+        self.target_hugepages = self.get_target_hugepages()
+        self.kernel_hp_file = '/proc/sys/vm/nr_hugepages'
+
+
+    def get_hugepage_size(self):
+        """
+        Get the current system setting for huge memory page size.
+        """
+        meminfo = open('/proc/meminfo', 'r').readlines()
+        huge_line_list = [h for h in meminfo if h.startswith("Hugepagesize")]
+        try:
+            return int(huge_line_list[0].split()[1])
+        except ValueError, e:
+            raise ValueError("Could not get huge page size setting from "
+                             "/proc/meminfo: %s" % e)
+
+
+    def get_target_hugepages(self):
+        """
+        Calculate the target number of hugepages for testing purposes.
+        """
+        if self.vms < self.max_vms:
+            self.vms = self.max_vms
+        # memory of all VMs plus qemu overhead of 64MB per guest
+        vmsm = (self.vms * self.mem) + (self.vms * 64)
+        return int(vmsm * 1024 / self.hugepage_size)
+
+
+    @error.context_aware
+    def set_hugepages(self):
+        """
+        Sets the hugepage limit to the target hugepage value calculated.
+        """
+        error.context("setting hugepages limit to %s" % self.target_hugepages)
+        hugepage_cfg = open(self.kernel_hp_file, "r+")
+        hp = hugepage_cfg.readline()
+        while int(hp) < self.target_hugepages:
+            loop_hp = hp
+            hugepage_cfg.write(str(self.target_hugepages))
+            hugepage_cfg.flush()
+            hugepage_cfg.seek(0)
+            hp = int(hugepage_cfg.readline())
+            if loop_hp == hp:
+                raise ValueError("Cannot set the kernel hugepage setting "
+                                 "to the target value of %d hugepages." %
+                                 self.target_hugepages)
+        hugepage_cfg.close()
+        logging.debug("Successfuly set %s large memory pages on host ",
+                      self.target_hugepages)
+
+
+    @error.context_aware
+    def mount_hugepage_fs(self):
+        """
+        Verify if there's a hugetlbfs mount set. If there's none, will set up
+        a hugetlbfs mount using the class attribute that defines the mount
+        point.
+        """
+        error.context("mounting hugepages path")
+        if not os.path.ismount(self.hugepage_path):
+            if not os.path.isdir(self.hugepage_path):
+                os.makedirs(self.hugepage_path)
+            cmd = "mount -t hugetlbfs none %s" % self.hugepage_path
+            utils.system(cmd)
+
+
+    def setup(self):
+        logging.debug("Number of VMs this test will use: %d", self.vms)
+        logging.debug("Amount of memory used by each vm: %s", self.mem)
+        logging.debug("System setting for large memory page size: %s",
+                      self.hugepage_size)
+        logging.debug("Number of large memory pages needed for this test: %s",
+                      self.target_hugepages)
+        self.set_hugepages()
+        self.mount_hugepage_fs()
+
+
+    @error.context_aware
+    def cleanup(self):
+        error.context("trying to dealocate hugepage memory")
+        try:
+            utils.system("umount %s" % self.hugepage_path)
+        except error.CmdError:
+            return
+        utils.system("echo 0 > %s" % self.kernel_hp_file)
+        logging.debug("Hugepage memory successfuly dealocated")
+
+
+class EnospcConfig(object):
+    """
+    Performs setup for the test enospc. This is a borg class, similar to a
+    singleton. The idea is to keep state in memory for when we call cleanup()
+    on postprocessing.
+    """
+    __shared_state = {}
+    def __init__(self, test, params):
+        self.__dict__ = self.__shared_state
+        root_dir = test.bindir
+        self.tmpdir = test.tmpdir
+        self.qemu_img_binary = params.get('qemu_img_binary')
+        if not os.path.isfile(self.qemu_img_binary):
+            self.qemu_img_binary = os.path.join(root_dir,
+                                                self.qemu_img_binary)
+        self.raw_file_path = os.path.join(self.tmpdir, 'enospc.raw')
+        # Here we're trying to choose fairly explanatory names so it's less
+        # likely that we run in conflict with other devices in the system
+        self.vgtest_name = params.get("vgtest_name")
+        self.lvtest_name = params.get("lvtest_name")
+        self.lvtest_device = "/dev/%s/%s" % (self.vgtest_name, self.lvtest_name)
+        image_dir = os.path.dirname(params.get("image_name"))
+        self.qcow_file_path = os.path.join(image_dir, 'enospc.qcow2')
+        try:
+            getattr(self, 'loopback')
+        except AttributeError:
+            self.loopback = ''
+
+
+    @error.context_aware
+    def setup(self):
+        logging.debug("Starting enospc setup")
+        error.context("performing enospc setup")
+        display_attributes(self)
+        # Double check if there aren't any leftovers
+        self.cleanup()
+        try:
+            utils.run("%s create -f raw %s 10G" %
+                      (self.qemu_img_binary, self.raw_file_path))
+            # Associate a loopback device with the raw file.
+            # Subject to race conditions, that's why try here to associate
+            # it with the raw file as quickly as possible
+            l_result = utils.run("losetup -f")
+            utils.run("losetup -f %s" % self.raw_file_path)
+            self.loopback = l_result.stdout.strip()
+            # Add the loopback device configured to the list of pvs
+            # recognized by LVM
+            utils.run("pvcreate %s" % self.loopback)
+            utils.run("vgcreate %s %s" % (self.vgtest_name, self.loopback))
+            # Create an lv inside the vg with starting size of 200M
+            utils.run("lvcreate -L 200M -n %s %s" %
+                      (self.lvtest_name, self.vgtest_name))
+            # Create a 10GB qcow2 image in the logical volume
+            utils.run("%s create -f qcow2 %s 10G" %
+                      (self.qemu_img_binary, self.lvtest_device))
+            # Let's symlink the logical volume with the image name that autotest
+            # expects this device to have
+            os.symlink(self.lvtest_device, self.qcow_file_path)
+        except Exception, e:
+            self.cleanup()
+            raise
+
+    @error.context_aware
+    def cleanup(self):
+        error.context("performing enospc cleanup")
+        if os.path.isfile(self.lvtest_device):
+            utils.run("fuser -k %s" % self.lvtest_device)
+            time.sleep(2)
+        l_result = utils.run("lvdisplay")
+        # Let's remove all volumes inside the volume group created
+        if self.lvtest_name in l_result.stdout:
+            utils.run("lvremove -f %s" % self.lvtest_device)
+        # Now, removing the volume group itself
+        v_result = utils.run("vgdisplay")
+        if self.vgtest_name in v_result.stdout:
+            utils.run("vgremove -f %s" % self.vgtest_name)
+        # Now, if we can, let's remove the physical volume from lvm list
+        if self.loopback:
+            p_result = utils.run("pvdisplay")
+            if self.loopback in p_result.stdout:
+                utils.run("pvremove -f %s" % self.loopback)
+        l_result = utils.run('losetup -a')
+        if self.loopback and (self.loopback in l_result.stdout):
+            try:
+                utils.run("losetup -d %s" % self.loopback)
+            except error.CmdError:
+                logging.error("Failed to liberate loopback %s", self.loopback)
+        if os.path.islink(self.qcow_file_path):
+            os.remove(self.qcow_file_path)
+        if os.path.isfile(self.raw_file_path):
+            os.remove(self.raw_file_path)
diff --git a/client/tests/kvm/tests.cfg.sample b/client/tests/kvm/tests.cfg.sample
index ce3e307..bde7aba 100644
--- a/client/tests/kvm/tests.cfg.sample
+++ b/client/tests/kvm/tests.cfg.sample
@@ -11,10 +11,17 @@
 # * qemu and qemu-img are expected to be found under /usr/bin/qemu-kvm and
 #   /usr/bin/qemu-img respectively.
 # * All image files are expected under /tmp/kvm_autotest_root/images/
-# * All iso files are expected under /tmp/kvm_autotest_root/isos/
-qemu_img_binary = /usr/bin/qemu-img
+# * All install iso files are expected under /tmp/kvm_autotest_root/isos/
+# * The parameters cdrom_unattended, floppy, kernel and initrd are generated
+#   by KVM autotest, so remember to put them under a writable location
+#   (for example, the cdrom share can be read only)
 image_name(_.*)? ?<= /tmp/kvm_autotest_root/images/
-cdrom(_.*)? ?<= /tmp/kvm_autotest_root/isos/
+cdrom(_.*)? ?<= /tmp/kvm_autotest_root/
+floppy ?<= /tmp/kvm_autotest_root/
+Linux:
+    unattended_install:
+        kernel ?<= /tmp/kvm_autotest_root/
+        initrd ?<= /tmp/kvm_autotest_root/
 
 # Here are the test sets variants. The variant 'qemu_kvm_windows_quick' is
 # fully commented, the following ones have comments only on noteworthy points
@@ -26,6 +33,7 @@
     - @qemu_kvm_windows_quick:
         # We want qemu-kvm for this run
         qemu_binary = /usr/bin/qemu-kvm
+        qemu_img_binary = /usr/bin/qemu-img
         # Only qcow2 file format
         only qcow2
         # Only rtl8139 for nw card (default on qemu-kvm)
@@ -43,10 +51,11 @@
         # Subtest choice. You can modify that line to add more subtests
         only unattended_install.cdrom boot shutdown
 
-    # Runs qemu, f13 64 bit guest OS, install, boot, shutdown
-    - @qemu_f13_quick:
+    # Runs qemu, f14 64 bit guest OS, install, boot, shutdown
+    - @qemu_f14_quick:
         # We want qemu for this run
         qemu_binary = /usr/bin/qemu
+        qemu_img_binary = /usr/bin/qemu-img
         only qcow2
         # The default nw card for qemu is e1000
         only e1000
@@ -55,22 +64,23 @@
         only up
         only no_pci_assignable
         only smallpages
-        only Fedora.13.64
+        only Fedora.14.64
         only unattended_install.cdrom boot shutdown
         # qemu needs -enable-kvm on the cmdline
         extra_params += ' -enable-kvm'
 
-    # Runs qemu-kvm, f13 64 bit guest OS, install, boot, shutdown
-    - @qemu_kvm_f13_quick:
+    # Runs qemu-kvm, f14 64 bit guest OS, install, boot, shutdown
+    - @qemu_kvm_f14_quick:
         # We want qemu-kvm for this run
         qemu_binary = /usr/bin/qemu-kvm
+        qemu_img_binary = /usr/bin/qemu-img
         only qcow2
         only virtio_net
         only virtio_blk
         only smp2
         only no_pci_assignable
         only smallpages
-        only Fedora.13.64
+        only Fedora.14.64
         only unattended_install.cdrom boot shutdown
 
 # You may provide information about the DTM server for WHQL tests here:
@@ -87,4 +97,4 @@
 #kill_unresponsive_vms.* ?= no
 
 # Choose your test list from the testsets defined
-only qemu_kvm_f13_quick
+only qemu_kvm_f14_quick
diff --git a/client/tests/kvm/tests/autotest.py b/client/tests/kvm/tests/autotest.py
index 2916ebd..afc2e3b 100644
--- a/client/tests/kvm/tests/autotest.py
+++ b/client/tests/kvm/tests/autotest.py
@@ -1,7 +1,5 @@
-import os, logging
-from autotest_lib.client.common_lib import error
-from autotest_lib.client.bin import utils
-import kvm_subprocess, kvm_utils, kvm_test_utils
+import os
+import kvm_test_utils
 
 
 def run_autotest(test, params, env):
@@ -12,9 +10,10 @@
     @param params: Dictionary with test parameters.
     @param env: Dictionary with the test environment.
     """
-    vm = kvm_test_utils.get_living_vm(env, params.get("main_vm"))
+    vm = env.get_vm(params["main_vm"])
+    vm.verify_alive()
     timeout = int(params.get("login_timeout", 360))
-    session = kvm_test_utils.wait_for_login(vm, timeout=timeout)
+    session = vm.wait_for_login(timeout=timeout)
 
     # Collect test parameters
     timeout = int(params.get("test_timeout", 300))
@@ -22,4 +21,5 @@
                                 params.get("test_control_file"))
     outputdir = test.outputdir
 
-    kvm_test_utils.run_autotest(vm, session, control_path, timeout, outputdir)
+    kvm_test_utils.run_autotest(vm, session, control_path, timeout, outputdir,
+                                params)
diff --git a/client/tests/kvm/tests/balloon_check.py b/client/tests/kvm/tests/balloon_check.py
index 1ee05bf..0c2a367 100644
--- a/client/tests/kvm/tests/balloon_check.py
+++ b/client/tests/kvm/tests/balloon_check.py
@@ -1,6 +1,7 @@
-import re, string, logging, random, time
+import re, logging, random, time
 from autotest_lib.client.common_lib import error
-import kvm_test_utils, kvm_utils, kvm_monitor
+import kvm_monitor
+
 
 def run_balloon_check(test, params, env):
     """
@@ -65,9 +66,10 @@
 
 
     fail = 0
-    vm = kvm_test_utils.get_living_vm(env, params.get("main_vm"))
+    vm = env.get_vm(params["main_vm"])
+    vm.verify_alive()
     timeout = int(params.get("login_timeout", 360))
-    session = kvm_test_utils.wait_for_login(vm, timeout=timeout)
+    session = vm.wait_for_login(timeout=timeout)
 
     # Upper limit that we can raise the memory
     vm_assigned_mem = int(params.get("mem"))
diff --git a/client/tests/kvm/tests/boot.py b/client/tests/kvm/tests/boot.py
index 8cc0218..4fabcd5 100644
--- a/client/tests/kvm/tests/boot.py
+++ b/client/tests/kvm/tests/boot.py
@@ -1,6 +1,4 @@
-import logging, time
-from autotest_lib.client.common_lib import error
-import kvm_subprocess, kvm_test_utils, kvm_utils
+import time
 
 
 def run_boot(test, params, env):
@@ -15,19 +13,14 @@
     @param params: Dictionary with the test parameters
     @param env: Dictionary with test environment.
     """
-    vm = kvm_test_utils.get_living_vm(env, params.get("main_vm"))
+    vm = env.get_vm(params["main_vm"])
+    vm.verify_alive()
     timeout = float(params.get("login_timeout", 240))
-    session = kvm_test_utils.wait_for_login(vm, 0, timeout, 0, 2)
+    session = vm.wait_for_login(timeout=timeout)
 
-    try:
-        if not params.get("reboot_method"):
-            return
+    if params.get("reboot_method"):
+        if params["reboot_method"] == "system_reset":
+            time.sleep(int(params.get("sleep_before_reset", 10)))
+        session = vm.reboot(session, params["reboot_method"], 0, timeout)
 
-        # Reboot the VM
-        session = kvm_test_utils.reboot(vm, session,
-                                    params.get("reboot_method"),
-                                    float(params.get("sleep_before_reset", 10)),
-                                    0, timeout)
-
-    finally:
-        session.close()
+    session.close()
diff --git a/client/tests/kvm/tests/boot_savevm.py b/client/tests/kvm/tests/boot_savevm.py
index 3305695..6af4132 100644
--- a/client/tests/kvm/tests/boot_savevm.py
+++ b/client/tests/kvm/tests/boot_savevm.py
@@ -1,6 +1,7 @@
 import logging, time
 from autotest_lib.client.common_lib import error
-import kvm_test_utils, kvm_utils, kvm_monitor
+import kvm_monitor
+
 
 def run_boot_savevm(test, params, env):
     """
@@ -13,10 +14,11 @@
     @param params: Dictionary with the test parameters
     @param env: Dictionary with test environment.
     """
-    vm = kvm_test_utils.get_living_vm(env, params.get("main_vm"))
+    vm = env.get_vm(params["main_vm"])
+    vm.verify_alive()
     savevm_delay = float(params.get("savevm_delay"))
     savevm_login_delay = float(params.get("savevm_login_delay"))
-    logging.info("savevm_delay = %f" % savevm_delay)
+    logging.info("savevm_delay = %f", savevm_delay)
     login_expire = time.time() + savevm_login_delay
     end_time = time.time() + float(params.get("savevm_timeout"))
 
@@ -50,12 +52,9 @@
         if (time.time() > login_expire):
             login_expire = time.time() + savevm_login_delay
             logging.info("Logging in after loadvm...")
-            session = kvm_utils.wait_for(vm.remote_login, 1, 0, 1)
-            if not session:
-                logging.info("Failed to login")
-            else:
-                logging.info("Logged in to guest!")
-                break
+            session = vm.login()
+            logging.info("Logged in to guest!")
+            break
 
     if (time.time() > end_time):
         raise error.TestFail("fail: timeout")
diff --git a/client/tests/kvm/tests/build.py b/client/tests/kvm/tests/build.py
index c4f0b18..cbf4aed 100644
--- a/client/tests/kvm/tests/build.py
+++ b/client/tests/kvm/tests/build.py
@@ -1,589 +1,4 @@
-import time, os, sys, urllib, re, signal, logging, datetime, glob, ConfigParser
-import shutil
-from autotest_lib.client.bin import utils, test, os_dep
-from autotest_lib.client.common_lib import error
-import kvm_utils
-
-
-def check_configure_options(script_path):
-    """
-    Return the list of available options (flags) of a given kvm configure build
-    script.
-
-    @param script: Path to the configure script
-    """
-    abspath = os.path.abspath(script_path)
-    help_raw = utils.system_output('%s --help' % abspath, ignore_status=True)
-    help_output = help_raw.split("\n")
-    option_list = []
-    for line in help_output:
-        cleaned_line = line.lstrip()
-        if cleaned_line.startswith("--"):
-            option = cleaned_line.split()[0]
-            option = option.split("=")[0]
-            option_list.append(option)
-
-    return option_list
-
-
-def kill_qemu_processes():
-    """
-    Kills all qemu processes, also kills all processes holding /dev/kvm down.
-    """
-    logging.debug("Killing any qemu processes that might be left behind")
-    utils.system("pkill qemu", ignore_status=True)
-    # Let's double check to see if some other process is holding /dev/kvm
-    if os.path.isfile("/dev/kvm"):
-        utils.system("fuser -k /dev/kvm", ignore_status=True)
-
-
-def load_kvm_modules(module_dir=None, load_stock=False, extra_modules=None):
-    """
-    Unload previously loaded kvm modules, then load modules present on any
-    sub directory of module_dir. Function will walk through module_dir until
-    it finds the modules.
-
-    @param module_dir: Directory where the KVM modules are located.
-    @param load_stock: Whether we are going to load system kernel modules.
-    @param extra_modules: List of extra modules to load.
-    """
-    vendor = "intel"
-    if os.system("grep vmx /proc/cpuinfo 1>/dev/null") != 0:
-        vendor = "amd"
-    logging.debug("Detected CPU vendor as '%s'" %(vendor))
-
-    kill_qemu_processes()
-
-    logging.info("Unloading previously loaded KVM modules")
-    utils.unload_module("kvm")
-    if extra_modules:
-        for module in extra_modules:
-            utils.unload_module(module)
-
-    if module_dir:
-        logging.info("Loading the built KVM modules...")
-        kvm_module_path = None
-        kvm_vendor_module_path = None
-        abort = False
-
-        list_modules = ['kvm.ko', 'kvm-%s.ko' % vendor]
-        if extra_modules:
-            for extra_module in extra_modules:
-                list_modules.append('%s.ko' % extra_module)
-
-        list_module_paths = []
-        for folder, subdirs, files in os.walk(module_dir):
-            for module in list_modules:
-                if module in files:
-                    module_path = os.path.join(folder, module)
-                    list_module_paths.append(module_path)
-
-        # We might need to arrange the modules in the correct order
-        # to avoid module load problems
-        list_modules_load = []
-        for module in list_modules:
-            for module_path in list_module_paths:
-                if os.path.basename(module_path) == module:
-                    list_modules_load.append(module_path)
-
-        if len(list_module_paths) != len(list_modules):
-            logging.error("KVM modules not found. If you don't want to use the "
-                          "modules built by this test, make sure the option "
-                          "load_modules: 'no' is marked on the test control "
-                          "file.")
-            raise error.TestError("The modules %s were requested to be loaded, "
-                                  "but the only modules found were %s" %
-                                  (list_modules, list_module_paths))
-
-        for module_path in list_modules_load:
-            try:
-                utils.system("insmod %s" % module_path)
-            except Exception, e:
-                raise error.TestFail("Failed to load KVM modules: %s" % e)
-
-    if load_stock:
-        logging.info("Loading current system KVM modules...")
-        utils.system("modprobe kvm")
-        utils.system("modprobe kvm-%s" % vendor)
-        if extra_modules:
-            for module in extra_modules:
-                utils.system("modprobe %s" % module)
-
-
-def create_symlinks(test_bindir, prefix=None, bin_list=None, unittest=None):
-    """
-    Create symbolic links for the appropriate qemu and qemu-img commands on
-    the kvm test bindir.
-
-    @param test_bindir: KVM test bindir
-    @param prefix: KVM prefix path
-    @param bin_list: List of qemu binaries to link
-    @param unittest: Path to configuration file unittests.cfg
-    """
-    qemu_path = os.path.join(test_bindir, "qemu")
-    qemu_img_path = os.path.join(test_bindir, "qemu-img")
-    qemu_unittest_path = os.path.join(test_bindir, "unittests")
-    if os.path.lexists(qemu_path):
-        os.unlink(qemu_path)
-    if os.path.lexists(qemu_img_path):
-        os.unlink(qemu_img_path)
-    if unittest and os.path.lexists(qemu_unittest_path):
-        os.unlink(qemu_unittest_path)
-
-    logging.debug("Linking qemu binaries")
-
-    if bin_list:
-        for bin in bin_list:
-            if os.path.basename(bin) == 'qemu-kvm':
-                os.symlink(bin, qemu_path)
-            elif os.path.basename(bin) == 'qemu-img':
-                os.symlink(bin, qemu_img_path)
-
-    elif prefix:
-        kvm_qemu = os.path.join(prefix, "bin", "qemu-system-x86_64")
-        if not os.path.isfile(kvm_qemu):
-            raise error.TestError('Invalid qemu path')
-        kvm_qemu_img = os.path.join(prefix, "bin", "qemu-img")
-        if not os.path.isfile(kvm_qemu_img):
-            raise error.TestError('Invalid qemu-img path')
-        os.symlink(kvm_qemu, qemu_path)
-        os.symlink(kvm_qemu_img, qemu_img_path)
-
-    if unittest:
-        logging.debug("Linking unittest dir")
-        os.symlink(unittest, qemu_unittest_path)
-
-
-def save_build(build_dir, dest_dir):
-    logging.debug('Saving the result of the build on %s', dest_dir)
-    base_name = os.path.basename(build_dir)
-    tarball_name = base_name + '.tar.bz2'
-    os.chdir(os.path.dirname(build_dir))
-    utils.system('tar -cjf %s %s' % (tarball_name, base_name))
-    shutil.move(tarball_name, os.path.join(dest_dir, tarball_name))
-
-
-class BaseInstaller(object):
-    def __init__(self, test, params):
-        load_modules = params.get('load_modules', 'no')
-        if not load_modules or load_modules == 'yes':
-            self.load_modules = True
-        elif load_modules == 'no':
-            self.load_modules = False
-        default_extra_modules = str(None)
-        self.extra_modules = eval(params.get("extra_modules",
-                                             default_extra_modules))
-
-        self.srcdir = test.srcdir
-        if not os.path.isdir(self.srcdir):
-            os.makedirs(self.srcdir)
-
-        self.test_bindir = test.bindir
-        self.results_dir = test.resultsdir
-
-        # KVM build prefix, for the modes that do need it
-        prefix = os.path.join(test.bindir, 'build')
-        self.prefix = os.path.abspath(prefix)
-
-        # Current host kernel directory
-        default_host_kernel_source = '/lib/modules/%s/build' % os.uname()[2]
-        self.host_kernel_srcdir = params.get('host_kernel_source',
-                                             default_host_kernel_source)
-
-        # Extra parameters that can be passed to the configure script
-        self.extra_configure_options = params.get('extra_configure_options',
-                                                  None)
-
-        # Do we want to save the result of the build on test.resultsdir?
-        self.save_results = True
-        save_results = params.get('save_results', 'no')
-        if save_results == 'no':
-            self.save_results = False
-
-
-class YumInstaller(BaseInstaller):
-    """
-    Class that uses yum to install and remove packages.
-    """
-    def __init__(self, test, params):
-        super(YumInstaller, self).__init__(test, params)
-        # Checking if all required dependencies are available
-        os_dep.command("rpm")
-        os_dep.command("yum")
-
-        default_pkg_list = str(['qemu-kvm', 'qemu-kvm-tools'])
-        default_qemu_bin_paths = str(['/usr/bin/qemu-kvm', '/usr/bin/qemu-img'])
-        default_pkg_path_list = str(None)
-        self.pkg_list = eval(params.get("pkg_list", default_pkg_list))
-        self.pkg_path_list = eval(params.get("pkg_path_list",
-                                             default_pkg_path_list))
-        self.qemu_bin_paths = eval(params.get("qemu_bin_paths",
-                                              default_qemu_bin_paths))
-
-
-    def _clean_previous_installs(self):
-        kill_qemu_processes()
-        removable_packages = ""
-        for pkg in self.pkg_list:
-            removable_packages += " %s" % pkg
-
-        utils.system("yum remove -y %s" % removable_packages)
-
-
-    def _get_packages(self):
-        for pkg in self.pkg_path_list:
-            utils.get_file(pkg, os.path.join(self.srcdir,
-                                             os.path.basename(pkg)))
-
-
-    def _install_packages(self):
-        """
-        Install all downloaded packages.
-        """
-        os.chdir(self.srcdir)
-        utils.system("yum install --nogpgcheck -y *.rpm")
-
-
-    def install(self):
-        self._clean_previous_installs()
-        self._get_packages()
-        self._install_packages()
-        create_symlinks(test_bindir=self.test_bindir,
-                        bin_list=self.qemu_bin_paths)
-        if self.load_modules:
-            load_kvm_modules(load_stock=True, extra_modules=self.extra_modules)
-        if self.save_results:
-            save_build(self.srcdir, self.results_dir)
-
-
-class KojiInstaller(YumInstaller):
-    """
-    Class that handles installing KVM from the fedora build service, koji.
-    It uses yum to install and remove packages.
-    """
-    def __init__(self, test, params):
-        """
-        Gets parameters and initializes the package downloader.
-
-        @param test: kvm test object
-        @param params: Dictionary with test arguments
-        """
-        super(KojiInstaller, self).__init__(test, params)
-        default_koji_cmd = '/usr/bin/koji'
-        default_src_pkg = 'qemu'
-        self.src_pkg = params.get("src_pkg", default_src_pkg)
-        self.tag = params.get("koji_tag", None)
-        self.build = params.get("koji_build", None)
-        koji_cmd = params.get("koji_cmd", default_koji_cmd)
-        self.downloader = kvm_utils.KojiDownloader(cmd=koji_cmd)
-
-
-    def _get_packages(self):
-        """
-        Downloads the specific arch RPMs for the specific build name.
-        """
-        self.downloader.get(src_package=self.src_pkg, tag=self.tag,
-                            build=self.build, dst_dir=self.srcdir)
-
-
-    def install(self):
-        super(KojiInstaller, self)._clean_previous_installs()
-        self._get_packages()
-        super(KojiInstaller, self)._install_packages()
-        create_symlinks(test_bindir=self.test_bindir,
-                        bin_list=self.qemu_bin_paths)
-        if self.load_modules:
-            load_kvm_modules(load_stock=True, extra_modules=self.extra_modules)
-        if self.save_results:
-            save_build(self.srcdir, self.results_dir)
-
-
-class SourceDirInstaller(BaseInstaller):
-    """
-    Class that handles building/installing KVM directly from a tarball or
-    a single source code dir.
-    """
-    def __init__(self, test, params):
-        """
-        Initializes class attributes, and retrieves KVM code.
-
-        @param test: kvm test object
-        @param params: Dictionary with test arguments
-        """
-        super(SourceDirInstaller, self).__init__(test, params)
-
-        install_mode = params["mode"]
-        srcdir = params.get("srcdir", None)
-
-        if install_mode == 'localsrc':
-            if srcdir is None:
-                raise error.TestError("Install from source directory specified"
-                                      "but no source directory provided on the"
-                                      "control file.")
-            else:
-                shutil.copytree(srcdir, self.srcdir)
-
-        if install_mode == 'release':
-            release_tag = params.get("release_tag")
-            release_dir = params.get("release_dir")
-            release_listing = params.get("release_listing")
-            logging.info("Installing KVM from release tarball")
-            if not release_tag:
-                release_tag = kvm_utils.get_latest_kvm_release_tag(
-                                                                release_listing)
-            tarball = os.path.join(release_dir, 'kvm', release_tag,
-                                   "kvm-%s.tar.gz" % release_tag)
-            logging.info("Retrieving release kvm-%s" % release_tag)
-            tarball = utils.unmap_url("/", tarball, "/tmp")
-
-        elif install_mode == 'snapshot':
-            logging.info("Installing KVM from snapshot")
-            snapshot_dir = params.get("snapshot_dir")
-            if not snapshot_dir:
-                raise error.TestError("Snapshot dir not provided")
-            snapshot_date = params.get("snapshot_date")
-            if not snapshot_date:
-                # Take yesterday's snapshot
-                d = (datetime.date.today() -
-                     datetime.timedelta(1)).strftime("%Y%m%d")
-            else:
-                d = snapshot_date
-            tarball = os.path.join(snapshot_dir, "kvm-snapshot-%s.tar.gz" % d)
-            logging.info("Retrieving kvm-snapshot-%s" % d)
-            tarball = utils.unmap_url("/", tarball, "/tmp")
-
-        elif install_mode == 'localtar':
-            tarball = params.get("tarball")
-            if not tarball:
-                raise error.TestError("KVM Tarball install specified but no"
-                                      " tarball provided on control file.")
-            logging.info("Installing KVM from a local tarball")
-            logging.info("Using tarball %s")
-            tarball = utils.unmap_url("/", params.get("tarball"), "/tmp")
-
-        if install_mode in ['release', 'snapshot', 'localtar']:
-            utils.extract_tarball_to_dir(tarball, self.srcdir)
-
-        if install_mode in ['release', 'snapshot', 'localtar', 'srcdir']:
-            self.repo_type = kvm_utils.check_kvm_source_dir(self.srcdir)
-            configure_script = os.path.join(self.srcdir, 'configure')
-            self.configure_options = check_configure_options(configure_script)
-
-
-    def _build(self):
-        make_jobs = utils.count_cpus()
-        os.chdir(self.srcdir)
-        # For testing purposes, it's better to build qemu binaries with
-        # debugging symbols, so we can extract more meaningful stack traces.
-        cfg = "./configure --prefix=%s" % self.prefix
-        if "--disable-strip" in self.configure_options:
-            cfg += " --disable-strip"
-        steps = [cfg, "make clean", "make -j %s" % make_jobs]
-        logging.info("Building KVM")
-        for step in steps:
-            utils.system(step)
-
-
-    def _install(self):
-        os.chdir(self.srcdir)
-        logging.info("Installing KVM userspace")
-        if self.repo_type == 1:
-            utils.system("make -C qemu install")
-        elif self.repo_type == 2:
-            utils.system("make install")
-        create_symlinks(self.test_bindir, self.prefix)
-
-
-    def _load_modules(self):
-        load_kvm_modules(module_dir=self.srcdir,
-                         extra_modules=self.extra_modules)
-
-
-    def install(self):
-        self._build()
-        self._install()
-        if self.load_modules:
-            self._load_modules()
-        if self.save_results:
-            save_build(self.srcdir, self.results_dir)
-
-
-class GitInstaller(SourceDirInstaller):
-    def __init__(self, test, params):
-        """
-        Initialize class parameters and retrieves code from git repositories.
-
-        @param test: kvm test object.
-        @param params: Dictionary with test parameters.
-        """
-        super(GitInstaller, self).__init__(test, params)
-
-        kernel_repo = params.get("git_repo")
-        user_repo = params.get("user_git_repo")
-        kmod_repo = params.get("kmod_repo")
-        test_repo = params.get("test_git_repo")
-
-        kernel_branch = params.get("kernel_branch", "master")
-        user_branch = params.get("user_branch", "master")
-        kmod_branch = params.get("kmod_branch", "master")
-        test_branch = params.get("test_branch", "master")
-
-        kernel_lbranch = params.get("kernel_lbranch", "master")
-        user_lbranch = params.get("user_lbranch", "master")
-        kmod_lbranch = params.get("kmod_lbranch", "master")
-        test_lbranch = params.get("test_lbranch", "master")
-
-        kernel_commit = params.get("kernel_commit", None)
-        user_commit = params.get("user_commit", None)
-        kmod_commit = params.get("kmod_commit", None)
-        test_commit = params.get("test_commit", None)
-
-        kernel_patches = eval(params.get("kernel_patches", "[]"))
-        user_patches = eval(params.get("user_patches", "[]"))
-        kmod_patches = eval(params.get("user_patches", "[]"))
-
-        if not user_repo:
-            message = "KVM user git repository path not specified"
-            logging.error(message)
-            raise error.TestError(message)
-
-        userspace_srcdir = os.path.join(self.srcdir, "kvm_userspace")
-        kvm_utils.get_git_branch(user_repo, user_branch, userspace_srcdir,
-                                 user_commit, user_lbranch)
-        self.userspace_srcdir = userspace_srcdir
-
-        if user_patches:
-            os.chdir(self.userspace_srcdir)
-            for patch in user_patches:
-                utils.get_file(patch, os.path.join(self.userspace_srcdir,
-                                                   os.path.basename(patch)))
-                utils.system('patch -p1 %s' % os.path.basename(patch))
-
-        if test_repo:
-            test_srcdir = os.path.join(self.srcdir, "kvm-unit-tests")
-            kvm_utils.get_git_branch(test_repo, test_branch, test_srcdir,
-                                     test_commit, test_lbranch)
-            unittest_cfg = os.path.join(test_srcdir, 'x86',
-                                        'unittests.cfg')
-            self.test_srcdir = test_srcdir
-        else:
-            unittest_cfg = os.path.join(userspace_srcdir, 'kvm', 'test', 'x86',
-                                        'unittests.cfg')
-
-        self.unittest_cfg = None
-        if os.path.isfile(unittest_cfg):
-            self.unittest_cfg = unittest_cfg
-
-        if kernel_repo:
-            kernel_srcdir = os.path.join(self.srcdir, "kvm")
-            kvm_utils.get_git_branch(kernel_repo, kernel_branch, kernel_srcdir,
-                                     kernel_commit, kernel_lbranch)
-            self.kernel_srcdir = kernel_srcdir
-            if kernel_patches:
-                os.chdir(self.kernel_srcdir)
-                for patch in kernel_patches:
-                    utils.get_file(patch, os.path.join(self.userspace_srcdir,
-                                                       os.path.basename(patch)))
-                    utils.system('patch -p1 %s' % os.path.basename(patch))
-        else:
-            self.kernel_srcdir = None
-
-        if kmod_repo:
-            kmod_srcdir = os.path.join (self.srcdir, "kvm_kmod")
-            kvm_utils.get_git_branch(kmod_repo, kmod_branch, kmod_srcdir,
-                                     kmod_commit, kmod_lbranch)
-            self.kmod_srcdir = kmod_srcdir
-            if kmod_patches:
-                os.chdir(self.kmod_srcdir)
-                for patch in kmod_patches:
-                    utils.get_file(patch, os.path.join(self.userspace_srcdir,
-                                                       os.path.basename(patch)))
-                    utils.system('patch -p1 %s' % os.path.basename(patch))
-        else:
-            self.kmod_srcdir = None
-
-        configure_script = os.path.join(self.userspace_srcdir, 'configure')
-        self.configure_options = check_configure_options(configure_script)
-
-
-    def _build(self):
-        make_jobs = utils.count_cpus()
-        cfg = './configure'
-        self.modules_build_succeed = False
-        if self.kmod_srcdir:
-            logging.info('Building KVM modules')
-            os.chdir(self.kmod_srcdir)
-            module_build_steps = [cfg,
-                                  'make clean',
-                                  'make sync LINUX=%s' % self.kernel_srcdir,
-                                  'make']
-        elif self.kernel_srcdir:
-            logging.info('Building KVM modules')
-            os.chdir(self.userspace_srcdir)
-            cfg += ' --kerneldir=%s' % self.host_kernel_srcdir
-            module_build_steps = [cfg,
-                            'make clean',
-                            'make -C kernel LINUX=%s sync' % self.kernel_srcdir]
-        else:
-            module_build_steps = []
-
-        try:
-            if module_build_steps:
-                for step in module_build_steps:
-                    utils.run(step)
-                self.modules_build_succeed = True
-        except error.CmdError, e:
-            logging.error("KVM modules build failed to build: %s" % e)
-
-        logging.info('Building KVM userspace code')
-        os.chdir(self.userspace_srcdir)
-        cfg += ' --prefix=%s' % self.prefix
-        if "--disable-strip" in self.configure_options:
-            cfg += ' --disable-strip'
-        if self.extra_configure_options:
-            cfg += ' %s' % self.extra_configure_options
-        utils.system(cfg)
-        utils.system('make clean')
-        utils.system('make -j %s' % make_jobs)
-
-        self.unittest_prefix = None
-        if self.unittest_cfg:
-            os.chdir(os.path.dirname(os.path.dirname(self.unittest_cfg)))
-            utils.system('./configure --prefix=%s' % self.prefix)
-            utils.system('make')
-            utils.system('make install')
-            self.unittest_prefix = os.path.join(self.prefix, 'share', 'qemu',
-                                                'tests')
-
-
-    def _install(self):
-        os.chdir(self.userspace_srcdir)
-        utils.system('make install')
-        create_symlinks(test_bindir=self.test_bindir, prefix=self.prefix,
-                        bin_list=None,
-                        unittest=self.unittest_prefix)
-
-
-    def _load_modules(self):
-        if self.kmod_srcdir and self.modules_build_succeed:
-            load_kvm_modules(module_dir=self.kmod_srcdir,
-                             extra_modules=self.extra_modules)
-        elif self.kernel_srcdir and self.modules_build_succeed:
-            load_kvm_modules(module_dir=self.userspace_srcdir,
-                             extra_modules=self.extra_modules)
-        else:
-            logging.info("Loading stock KVM modules")
-            load_kvm_modules(load_stock=True,
-                             extra_modules=self.extra_modules)
-
-
-    def install(self):
-        self._build()
-        self._install()
-        if self.load_modules:
-            self._load_modules()
-        if self.save_results:
-            save_build(self.srcdir, self.results_dir)
+import installer
 
 
 def run_build(test, params, env):
@@ -595,20 +10,17 @@
     @param params: Dictionary with test parameters.
     @param env: Test environment.
     """
-    install_mode = params.get("mode")
     srcdir = params.get("srcdir", test.srcdir)
     params["srcdir"] = srcdir
 
-    if install_mode in ['localsrc', 'localtar', 'release', 'snapshot']:
-        installer = SourceDirInstaller(test, params)
-    elif install_mode == 'git':
-        installer = GitInstaller(test, params)
-    elif install_mode == 'yum':
-        installer = YumInstaller(test, params)
-    elif install_mode == 'koji':
-        installer = KojiInstaller(test, params)
-    else:
-        raise error.TestError('Invalid or unsupported'
-                              ' install mode: %s' % install_mode)
-
-    installer.install()
+    try:
+        installer_object = installer.make_installer(params)
+        installer_object.set_install_params(test, params)
+        installer_object.install()
+        env.register_installer(installer_object)
+    except Exception, e:
+        # if the build/install fails, don't allow other tests
+        # to get a installer.
+        msg = "KVM install failed: %s" % (e)
+        env.register_installer(installer.FailedInstaller(msg))
+        raise
diff --git a/client/tests/kvm/tests/clock_getres.py b/client/tests/kvm/tests/clock_getres.py
new file mode 100644
index 0000000..d1baf88
--- /dev/null
+++ b/client/tests/kvm/tests/clock_getres.py
@@ -0,0 +1,37 @@
+import logging, os
+from autotest_lib.client.common_lib import error
+from autotest_lib.client.bin import utils
+
+
+def run_clock_getres(test, params, env):
+    """
+    Verify if guests using kvm-clock as the time source have a sane clock
+    resolution.
+
+    @param test: kvm test object.
+    @param params: Dictionary with test parameters.
+    @param env: Dictionary with the test environment.
+    """
+    t_name = "test_clock_getres"
+    base_dir = "/tmp"
+
+    deps_dir = os.path.join(test.bindir, "deps", t_name)
+    os.chdir(deps_dir)
+    try:
+        utils.system("make clean")
+        utils.system("make")
+    except:
+        raise error.TestError("Failed to compile %s" % t_name)
+
+    test_clock = os.path.join(deps_dir, t_name)
+    if not os.path.isfile(test_clock):
+        raise error.TestError("Could not find %s" % t_name)
+
+    vm = env.get_vm(params["main_vm"])
+    vm.verify_alive()
+    timeout = int(params.get("login_timeout", 360))
+    session = vm.wait_for_login(timeout=timeout)
+    vm.copy_files_to(test_clock, base_dir)
+    session.cmd(os.path.join(base_dir, t_name))
+    logging.info("PASS: Guest reported appropriate clock resolution")
+    logging.info("Guest's dmesg:\n%s", session.cmd_output("dmesg").strip())
diff --git a/client/tests/kvm/tests/enospc.py b/client/tests/kvm/tests/enospc.py
new file mode 100644
index 0000000..3c53b64
--- /dev/null
+++ b/client/tests/kvm/tests/enospc.py
@@ -0,0 +1,74 @@
+import logging, time, re
+from autotest_lib.client.common_lib import error
+from autotest_lib.client.bin import utils
+import kvm_vm
+
+
+def run_enospc(test, params, env):
+    """
+    ENOSPC test
+
+    1) Create a virtual disk on lvm
+    2) Boot up guest with two disks
+    3) Continually write data to second disk
+    4) Check images and extend second disk when no space
+    5) Continue paused guest
+    6) Repeat step 3~5 several times
+
+    @param test: KVM test object.
+    @param params: Dictionary with the test parameters.
+    @param env: Dictionary with test environment.
+    """
+    vm = env.get_vm(params["main_vm"])
+    vm.verify_alive()
+    login_timeout = int(params.get("login_timeout", 360))
+    session_serial = vm.wait_for_serial_login(timeout=login_timeout)
+
+    vgtest_name = params.get("vgtest_name")
+    lvtest_name = params.get("lvtest_name")
+    logical_volume = "/dev/%s/%s" % (vgtest_name, lvtest_name)
+
+    drive_format = params.get("drive_format")
+    if drive_format == "virtio":
+        devname = "/dev/vdb"
+    elif drive_format == "ide":
+        output = session_serial.cmd_output("dir /dev")
+        devname = "/dev/" + re.findall("([sh]db)\s", output)[0]
+    elif drive_format == "scsi":
+        devname = "/dev/sdb"
+    cmd = params.get("background_cmd")
+    cmd %= devname
+    logging.info("Sending background cmd '%s'", cmd)
+    session_serial.sendline(cmd)
+
+    iterations = int(params.get("repeat_time", 40))
+    i = 0
+    pause_n = 0
+    while i < iterations:
+        status = vm.monitor.cmd("info status")
+        logging.debug(status)
+        if "paused" in status:
+            pause_n += 1
+            logging.info("Checking all images in use by the VM")
+            for image_name in vm.params.objects("images"):
+                image_params = vm.params.object_params(image_name)
+                try:
+                    kvm_vm.check_image(image_params, test.bindir)
+                except kvm_vm.VMError, e:
+                    logging.error(e)
+            logging.info("Guest paused, extending Logical Volume size")
+            try:
+                utils.run("lvextend -L +200M %s" % logical_volume)
+            except error.CmdError, e:
+                logging.debug(e.result_obj.stdout)
+            vm.monitor.cmd("cont")
+        time.sleep(10)
+        i += 1
+
+    if pause_n == 0:
+        raise error.TestFail("Guest didn't pause during loop")
+    else:
+        logging.info("Guest paused %s times from %s iterations",
+                     pause_n, iterations)
+
+    logging.info("Final %s", vm.monitor.cmd("info status"))
diff --git a/client/tests/kvm/tests/ethtool.py b/client/tests/kvm/tests/ethtool.py
index 56b1c70..81e45d3 100644
--- a/client/tests/kvm/tests/ethtool.py
+++ b/client/tests/kvm/tests/ethtool.py
@@ -1,7 +1,8 @@
-import logging, commands, re
+import logging, re
 from autotest_lib.client.common_lib import error
 from autotest_lib.client.bin import utils
-import kvm_test_utils, kvm_utils
+import kvm_test_utils, kvm_utils, kvm_subprocess
+
 
 def run_ethtool(test, params, env):
     """
@@ -22,7 +23,7 @@
         find a way to get it installed using yum/apt-get/
         whatever
     """
-    def ethtool_get(type):
+    def ethtool_get(f_type):
         feature_pattern = {
             'tx':  'tx.*checksumming',
             'rx':  'rx.*checksumming',
@@ -32,28 +33,32 @@
             'gro': 'generic.*receive.*offload',
             'lro': 'large.*receive.*offload',
             }
-        s, o = session.get_command_status_output("ethtool -k %s" % ethname)
+        o = session.cmd("ethtool -k %s" % ethname)
         try:
-            return re.findall("%s: (.*)" % feature_pattern.get(type), o)[0]
+            return re.findall("%s: (.*)" % feature_pattern.get(f_type), o)[0]
         except IndexError:
-            logging.debug("Could not get %s status" % type)
+            logging.debug("Could not get %s status", f_type)
 
 
-    def ethtool_set(type, status):
+    def ethtool_set(f_type, status):
         """
         Set ethernet device offload status
 
-        @param type: Offload type name
+        @param f_type: Offload type name
         @param status: New status will be changed to
         """
-        logging.info("Try to set %s %s" % (type, status))
+        logging.info("Try to set %s %s", f_type, status)
         if status not in ["off", "on"]:
             return False
-        cmd = "ethtool -K %s %s %s" % (ethname, type, status)
-        if ethtool_get(type) != status:
-            return session.get_command_status(cmd) == 0
-        if ethtool_get(type) != status:
-            logging.error("Fail to set %s %s" % (type, status))
+        cmd = "ethtool -K %s %s %s" % (ethname, f_type, status)
+        if ethtool_get(f_type) != status:
+            try:
+                session.cmd(cmd)
+                return True
+            except:
+                return False
+        if ethtool_get(f_type) != status:
+            logging.error("Fail to set %s %s", f_type, status)
             return False
         return True
 
@@ -74,13 +79,12 @@
         logging.info("Compare md5sum of the files on guest and host")
         host_result = utils.hash_file(name, method="md5")
         try:
-            o = session.get_command_output("md5sum %s" % name)
+            o = session.cmd_output("md5sum %s" % name)
             guest_result = re.findall("\w+", o)[0]
         except IndexError:
             logging.error("Could not get file md5sum in guest")
             return False
-        logging.debug("md5sum: guest(%s), host(%s)" %
-                      (guest_result, host_result))
+        logging.debug("md5sum: guest(%s), host(%s)", guest_result, host_result)
         return guest_result == host_result
 
 
@@ -92,41 +96,48 @@
         @param src: Source host of transfer file
         @return: Tuple (status, error msg/tcpdump result)
         """
-        session2.get_command_status("rm -rf %s" % filename)
-        dd_cmd = "dd if=/dev/urandom of=%s bs=1M count=%s" % (filename,
-                                                   params.get("filesize"))
-        logging.info("Creat file in source host, cmd: %s" % dd_cmd)
+        session2.cmd_output("rm -rf %s" % filename)
+        dd_cmd = ("dd if=/dev/urandom of=%s bs=1M count=%s" %
+                  (filename, params.get("filesize")))
+        failure = (False, "Failed to create file using dd, cmd: %s" % dd_cmd)
+        logging.info("Creating file in source host, cmd: %s", dd_cmd)
         tcpdump_cmd = "tcpdump -lep -s 0 tcp -vv port ssh"
         if src == "guest":
-            s = session.get_command_status(dd_cmd, timeout=360)
             tcpdump_cmd += " and src %s" % guest_ip
-            copy_files_fun = vm.copy_files_from
+            copy_files_from = vm.copy_files_from
+            try:
+                session.cmd_output(dd_cmd, timeout=360)
+            except kvm_subprocess.ShellCmdError, e:
+                return failure
         else:
-            s, o = commands.getstatusoutput(dd_cmd)
             tcpdump_cmd += " and dst %s" % guest_ip
-            copy_files_fun = vm.copy_files_to
-        if s != 0:
-            return (False, "Fail to create file by dd, cmd: %s" % dd_cmd)
+            copy_files_from = vm.copy_files_to
+            try:
+                utils.system(dd_cmd)
+            except error.CmdError, e:
+                return failure
 
         # only capture the new tcp port after offload setup
         original_tcp_ports = re.findall("tcp.*:(\d+).*%s" % guest_ip,
                                       utils.system_output("/bin/netstat -nap"))
         for i in original_tcp_ports:
             tcpdump_cmd += " and not port %s" % i
-        logging.debug("Listen by command: %s" % tcpdump_cmd)
+        logging.debug("Listen using command: %s", tcpdump_cmd)
         session2.sendline(tcpdump_cmd)
-        if not kvm_utils.wait_for(lambda: session.get_command_status(
-                                           "pgrep tcpdump") == 0, 30):
+        if not kvm_utils.wait_for(
+                           lambda:session.cmd_status("pgrep tcpdump") == 0, 30):
             return (False, "Tcpdump process wasn't launched")
 
         logging.info("Start to transfer file")
-        if not copy_files_fun(filename, filename):
-            return (False, "Child process transfer file failed")
+        try:
+            copy_files_from(filename, filename)
+        except kvm_utils.SCPError, e:
+            return (False, "File transfer failed (%s)" % e)
         logging.info("Transfer file completed")
-        if session.get_command_status("killall tcpdump") != 0:
-            return (False, "Could not kill all tcpdump process")
-        s, tcpdump_string = session2.read_up_to_prompt(timeout=60)
-        if not s:
+        session.cmd("killall tcpdump")
+        try:
+            tcpdump_string = session2.read_up_to_prompt(timeout=60)
+        except kvm_subprocess.ExpectError:
             return (False, "Fail to read tcpdump's output")
 
         if not compare_md5sum(filename):
@@ -169,14 +180,12 @@
         return True
 
 
-    vm = kvm_test_utils.get_living_vm(env, params.get("main_vm"))
-    session = kvm_test_utils.wait_for_login(vm,
-                  timeout=int(params.get("login_timeout", 360)))
+    vm = env.get_vm(params["main_vm"])
+    vm.verify_alive()
+    session = vm.wait_for_login(timeout=int(params.get("login_timeout", 360)))
     # Let's just error the test if we identify that there's no ethtool installed
-    if session.get_command_status("ethtool -h"):
-        raise error.TestError("Command ethtool not installed on guest")
-    session2 = kvm_test_utils.wait_for_login(vm,
-                  timeout=int(params.get("login_timeout", 360)))
+    session.cmd("ethtool -h")
+    session2 = vm.wait_for_login(timeout=int(params.get("login_timeout", 360)))
     mtu = 1514
     feature_status = {}
     filename = "/tmp/ethtool.dd"
@@ -196,24 +205,24 @@
     ethtool_save_params()
     success = True
     try:
-        for type in supported_features:
-            callback = test_matrix[type][0]
-            for i in test_matrix[type][2]:
+        for f_type in supported_features:
+            callback = test_matrix[f_type][0]
+            for i in test_matrix[f_type][2]:
                 if not ethtool_set(i, "off"):
-                    logging.error("Fail to disable %s" % i)
+                    logging.error("Fail to disable %s", i)
                     success = False
-            for i in [f for f in test_matrix[type][1]] + [type]:
+            for i in [f for f in test_matrix[f_type][1]] + [f_type]:
                 if not ethtool_set(i, "on"):
-                    logging.error("Fail to enable %s" % i)
+                    logging.error("Fail to enable %s", i)
                     success = False
             if not callback():
-                raise error.TestFail("Test failed, %s: on" % type)
+                raise error.TestFail("Test failed, %s: on", f_type)
 
-            if not ethtool_set(type, "off"):
-                logging.error("Fail to disable %s" % type)
+            if not ethtool_set(f_type, "off"):
+                logging.error("Fail to disable %s", f_type)
                 success = False
             if not callback(status="off"):
-                raise error.TestFail("Test failed, %s: off" % type)
+                raise error.TestFail("Test failed, %s: off", f_type)
         if not success:
             raise error.TestError("Enable/disable offload function fail")
     finally:
diff --git a/client/tests/kvm/tests/file_transfer.py b/client/tests/kvm/tests/file_transfer.py
index e872bed..fe70b37 100644
--- a/client/tests/kvm/tests/file_transfer.py
+++ b/client/tests/kvm/tests/file_transfer.py
@@ -1,7 +1,7 @@
-import logging, commands, re, time, os
+import logging, time, os
 from autotest_lib.client.common_lib import error
 from autotest_lib.client.bin import utils
-import kvm_utils, kvm_test_utils
+
 
 def run_file_transfer(test, params, env):
     """
@@ -17,20 +17,23 @@
     @param params: Dictionary with the test parameters.
     @param env: Dictionary with test environment.
     """
-    vm = kvm_test_utils.get_living_vm(env, params.get("main_vm"))
-    timeout=int(params.get("login_timeout", 360))
+    vm = env.get_vm(params["main_vm"])
+    vm.verify_alive()
+    login_timeout = int(params.get("login_timeout", 360))
 
-    session = kvm_test_utils.wait_for_login(vm, timeout=timeout)
-    if not session:
-        raise error.TestFail("Could not log into guest '%s'" % vm.name)
+    session = vm.wait_for_login(timeout=login_timeout)
 
-    dir = test.tmpdir
+    dir_name = test.tmpdir
     transfer_timeout = int(params.get("transfer_timeout"))
     transfer_type = params.get("transfer_type")
     tmp_dir = params.get("tmp_dir", "/tmp/")
     clean_cmd = params.get("clean_cmd", "rm -f")
     filesize = int(params.get("filesize", 4000))
-    cmd = "dd if=/dev/urandom of=%s/a.out bs=1M count=%d" % (dir, filesize)
+    count = int(filesize / 10)
+    if count == 0:
+        count = 1
+    cmd = "dd if=/dev/zero of=%s/a.out bs=10M count=%d" % (dir_name,
+                                                           count)
     guest_path = tmp_dir + "b.out"
 
     try:
@@ -41,24 +44,20 @@
             logging.info("Transfering file host -> guest, timeout: %ss",
                          transfer_timeout)
             t_begin = time.time()
-            success = vm.copy_files_to("%s/a.out" % dir, guest_path,
-                                       timeout=transfer_timeout)
+            vm.copy_files_to("%s/a.out" % dir_name, guest_path,
+                             timeout=transfer_timeout)
             t_end = time.time()
             throughput = filesize / (t_end - t_begin)
-            if not success:
-                raise error.TestFail("Fail to transfer file from host to guest")
             logging.info("File transfer host -> guest succeed, "
                          "estimated throughput: %.2fMB/s", throughput)
 
             logging.info("Transfering file guest -> host, timeout: %ss",
                          transfer_timeout)
             t_begin = time.time()
-            success = vm.copy_files_from(guest_path, "%s/c.out" % dir,
-                                         timeout=transfer_timeout)
+            vm.copy_files_from(guest_path, "%s/c.out" % dir_name,
+                               timeout=transfer_timeout)
             t_end = time.time()
             throughput = filesize / (t_end - t_begin)
-            if not success:
-                raise error.TestFail("Fail to transfer file from guest to host")
             logging.info("File transfer guest -> host succeed, "
                          "estimated throughput: %.2fMB/s", throughput)
         else:
@@ -66,12 +65,12 @@
                                   transfer_type)
 
         for f in ['a.out', 'c.out']:
-            p = os.path.join(dir, f)
+            p = os.path.join(dir_name, f)
             size = os.path.getsize(p)
             logging.debug('Size of %s: %sB', f, size)
 
-        md5_orig = utils.hash_file("%s/a.out" % dir, method="md5")
-        md5_new = utils.hash_file("%s/c.out" % dir, method="md5")
+        md5_orig = utils.hash_file("%s/a.out" % dir_name, method="md5")
+        md5_new = utils.hash_file("%s/c.out" % dir_name, method="md5")
 
         if md5_orig != md5_new:
             raise error.TestFail("File changed after transfer host -> guest "
@@ -80,11 +79,11 @@
     finally:
         logging.info('Cleaning temp file on guest')
         clean_cmd += " %s" % guest_path
-        s, o = session.get_command_status_output(clean_cmd)
-        if s:
-            logging.warning("Failed to clean remote file %s, output:%s",
-                            guest_path, o)
+        session.cmd(clean_cmd)
         logging.info('Cleaning temp files on host')
-        os.remove('%s/a.out' % dir)
-        os.remove('%s/c.out' % dir)
+        try:
+            os.remove('%s/a.out' % dir_name)
+            os.remove('%s/c.out' % dir_name)
+        except OSError:
+            pass
         session.close()
diff --git a/client/tests/kvm/tests/guest_s4.py b/client/tests/kvm/tests/guest_s4.py
index 2eb035b..efd8e3b 100644
--- a/client/tests/kvm/tests/guest_s4.py
+++ b/client/tests/kvm/tests/guest_s4.py
@@ -1,8 +1,9 @@
 import logging, time
 from autotest_lib.client.common_lib import error
-import kvm_test_utils, kvm_utils
+import kvm_utils
 
 
+@error.context_aware
 def run_guest_s4(test, params, env):
     """
     Suspend guest to disk, supports both Linux & Windows OSes.
@@ -11,16 +12,15 @@
     @param params: Dictionary with test parameters.
     @param env: Dictionary with the test environment.
     """
-    vm = kvm_test_utils.get_living_vm(env, params.get("main_vm"))
+    error.base_context("before S4")
+    vm = env.get_vm(params["main_vm"])
+    vm.verify_alive()
     timeout = int(params.get("login_timeout", 360))
-    session = kvm_test_utils.wait_for_login(vm, timeout=timeout)
+    session = vm.wait_for_login(timeout=timeout)
 
-    logging.info("Checking whether guest OS supports suspend to disk (S4)...")
-    s, o = session.get_command_status_output(params.get("check_s4_support_cmd"))
-    if "not enough space" in o:
-        raise error.TestError("Check S4 support failed: %s" % o)
-    elif s != 0:
-        raise error.TestNAError("Guest OS does not support S4")
+    error.context("checking whether guest OS supports S4", logging.info)
+    session.cmd(params.get("check_s4_support_cmd"))
+    error.context()
 
     logging.info("Waiting until all guest OS services are fully started...")
     time.sleep(float(params.get("services_up_timeout", 30)))
@@ -32,20 +32,22 @@
     time.sleep(5)
 
     # Get the second session to start S4
-    session2 = kvm_test_utils.wait_for_login(vm, timeout=timeout)
+    session2 = vm.wait_for_login(timeout=timeout)
 
     # Make sure the background program is running as expected
+    error.context("making sure background program is running")
     check_s4_cmd = params.get("check_s4_cmd")
-    if session2.get_command_status(check_s4_cmd) != 0:
-        raise error.TestError("Failed to launch '%s' as a background process" %
-                              test_s4_cmd)
-    logging.info("Launched background command in guest: %s" % test_s4_cmd)
+    session2.cmd(check_s4_cmd)
+    logging.info("Launched background command in guest: %s", test_s4_cmd)
+    error.context()
+    error.base_context()
 
     # Suspend to disk
     logging.info("Starting suspend to disk now...")
     session2.sendline(params.get("set_s4_cmd"))
 
     # Make sure the VM goes down
+    error.base_context("after S4")
     suspend_timeout = 240 + int(params.get("smp")) * 60
     if not kvm_utils.wait_for(vm.is_dead, suspend_timeout, 2, 2):
         raise error.TestFail("VM refuses to go down. Suspend failed.")
@@ -55,24 +57,20 @@
 
     # Start vm, and check whether the program is still running
     logging.info("Resuming suspended VM...")
-    if not vm.create():
-        raise error.TestError("Failed to start VM after suspend to disk")
+    vm.create()
 
     # Log into the resumed VM
     relogin_timeout = int(params.get("relogin_timeout", 240))
     logging.info("Logging into resumed VM, timeout %s", relogin_timeout)
-    session2 = kvm_utils.wait_for(vm.remote_login, relogin_timeout, 0, 2)
-    if not session2:
-        raise error.TestFail("Could not log into VM after resuming from "
-                             "suspend to disk")
+    session2 = vm.wait_for_login(timeout=relogin_timeout)
 
     # Check whether the test command is still alive
-    logging.info("Checking if background command is still alive...")
-    if session2.get_command_status(check_s4_cmd) != 0:
-        raise error.TestFail("Background command '%s' stopped running. S4 "
-                             "failed." % test_s4_cmd)
+    error.context("making sure background program is still running",
+                  logging.info)
+    session2.cmd(check_s4_cmd)
+    error.context()
 
     logging.info("VM resumed successfuly after suspend to disk")
-    session2.get_command_output(params.get("kill_test_s4_cmd"))
+    session2.cmd_output(params.get("kill_test_s4_cmd"))
     session.close()
     session2.close()
diff --git a/client/tests/kvm/tests/guest_test.py b/client/tests/kvm/tests/guest_test.py
index b6bebc7..95c6f7f 100644
--- a/client/tests/kvm/tests/guest_test.py
+++ b/client/tests/kvm/tests/guest_test.py
@@ -1,6 +1,5 @@
 import os, logging
-from autotest_lib.client.common_lib import error
-import kvm_utils, kvm_test_utils
+import kvm_utils
 
 
 def run_guest_test(test, params, env):
@@ -19,12 +18,16 @@
     login_timeout = int(params.get("login_timeout", 360))
     reboot = params.get("reboot", "no")
 
-    vm = kvm_test_utils.get_living_vm(env, params.get("main_vm"))
-    session = kvm_test_utils.wait_for_login(vm, timeout=login_timeout)
+    vm = env.get_vm(params["main_vm"])
+    vm.verify_alive()
+    if params.get("serial_login") == "yes":
+        session = vm.wait_for_serial_login(timeout=login_timeout)
+    else:
+        session = vm.wait_for_login(timeout=login_timeout)
 
     if reboot == "yes":
         logging.debug("Rebooting guest before test ...")
-        session = kvm_test_utils.reboot(vm, session, timeout=login_timeout)
+        session = vm.reboot(session, timeout=login_timeout)
 
     try:
         logging.info("Starting script...")
@@ -48,38 +51,29 @@
             # Change dir to dst_rsc_dir, and remove the guest script dir there
             rm_cmd = "cd %s && (rmdir /s /q %s || del /s /q %s)" % \
                      (dst_rsc_dir, rsc_dir, rsc_dir)
-            if session.get_command_status(rm_cmd, timeout=test_timeout) != 0:
-                raise error.TestFail("Remove %s failed." % rsc_dir)
+            session.cmd(rm_cmd, timeout=test_timeout)
             logging.debug("Clean directory succeeded.")
 
             # then download the resource.
-            rsc_cmd = "cd %s && %s %s" %(dst_rsc_dir, download_cmd, rsc_server)
-            if session.get_command_status(rsc_cmd, timeout=test_timeout) != 0:
-                raise error.TestFail("Download test resource failed.")
+            rsc_cmd = "cd %s && %s %s" % (dst_rsc_dir, download_cmd, rsc_server)
+            session.cmd(rsc_cmd, timeout=test_timeout)
             logging.info("Download resource finished.")
         else:
-            session.get_command_output("del %s" % dst_rsc_path,
-                                       internal_timeout=0)
+            session.cmd_output("del %s" % dst_rsc_path, internal_timeout=0)
             script_path = kvm_utils.get_path(test.bindir, script)
             vm.copy_files_to(script_path, dst_rsc_path, timeout=60)
 
-        command = "cmd /c %s %s %s" %(interpreter, dst_rsc_path, script_params)
+        cmd = "%s %s %s" % (interpreter, dst_rsc_path, script_params)
 
-        logging.info("---------------- Script output ----------------")
-        status = session.get_command_status(command,
-                                            print_func=logging.info,
-                                            timeout=test_timeout)
-        logging.info("---------------- End of script output ----------------")
-
-        if status is None:
-            raise error.TestFail("Timeout expired before script execution "
-                                 "completed (or something weird happened)")
-        if status != 0:
-            raise error.TestFail("Script execution failed")
+        try:
+            logging.info("------------ Script output ------------")
+            session.cmd(cmd, print_func=logging.info, timeout=test_timeout)
+        finally:
+            logging.info("------------ End of script output ------------")
 
         if reboot == "yes":
             logging.debug("Rebooting guest after test ...")
-            session = kvm_test_utils.reboot(vm, session, timeout=login_timeout)
+            session = vm.reboot(session, timeout=login_timeout)
 
         logging.debug("guest test PASSED.")
     finally:
diff --git a/client/tests/kvm/tests/image_copy.py b/client/tests/kvm/tests/image_copy.py
new file mode 100644
index 0000000..8a4d74c
--- /dev/null
+++ b/client/tests/kvm/tests/image_copy.py
@@ -0,0 +1,45 @@
+import os, logging
+from autotest_lib.client.common_lib import error
+from autotest_lib.client.bin import utils
+import kvm_utils
+
+
+def run_image_copy(test, params, env):
+    """
+    Copy guest images from nfs server.
+    1) Mount the NFS share directory
+    2) Check the existence of source image
+    3) If it exists, copy the image from NFS
+
+    @param test: kvm test object
+    @param params: Dictionary with the test parameters
+    @param env: Dictionary with test environment.
+    """
+    mount_dest_dir = params.get('dst_dir', '/mnt/images')
+    if not os.path.exists(mount_dest_dir):
+        try:
+            os.makedirs(mount_dest_dir)
+        except OSError, err:
+            logging.warning('mkdir %s error:\n%s', mount_dest_dir, err)
+
+    if not os.path.exists(mount_dest_dir):
+        raise error.TestError('Failed to create NFS share dir %s' %
+                              mount_dest_dir)
+
+    src = params.get('images_good')
+    image = '%s.%s' % (os.path.split(params['image_name'])[1],
+                       params['image_format'])
+    src_path = os.path.join(mount_dest_dir, image)
+    dst_path = '%s.%s' % (params['image_name'], params['image_format'])
+    cmd = 'cp %s %s' % (src_path, dst_path)
+
+    if not kvm_utils.mount(src, mount_dest_dir, 'nfs', 'ro'):
+        raise error.TestError('Could not mount NFS share %s to %s' %
+                              (src, mount_dest_dir))
+
+    # Check the existence of source image
+    if not os.path.exists(src_path):
+        raise error.TestError('Could not find %s in NFS share' % src_path)
+
+    logging.debug('Copying image %s...', image)
+    utils.system(cmd)
diff --git a/client/tests/kvm/tests/iofuzz.py b/client/tests/kvm/tests/iofuzz.py
index 45a0eb9..7189f91 100644
--- a/client/tests/kvm/tests/iofuzz.py
+++ b/client/tests/kvm/tests/iofuzz.py
@@ -1,6 +1,6 @@
-import logging, time, re, random
+import logging, re, random
 from autotest_lib.client.common_lib import error
-import kvm_subprocess, kvm_test_utils, kvm_utils
+import kvm_subprocess
 
 
 def run_iofuzz(test, params, env):
@@ -33,11 +33,10 @@
         logging.debug("outb(0x%x, 0x%x)", port, data)
         outb_cmd = ("echo -e '\\%s' | dd of=/dev/port seek=%d bs=1 count=1" %
                     (oct(data), port))
-        s, o = session.get_command_status_output(outb_cmd)
-        if s is None:
-            logging.debug("Command did not return")
-        if s != 0:
-            logging.debug("Command returned status %s", s)
+        try:
+            session.cmd(outb_cmd)
+        except kvm_subprocess.ShellError, e:
+            logging.debug(e)
 
 
     def inb(session, port):
@@ -49,11 +48,10 @@
         """
         logging.debug("inb(0x%x)", port)
         inb_cmd = "dd if=/dev/port seek=%d of=/dev/null bs=1 count=1" % port
-        s, o = session.get_command_status_output(inb_cmd)
-        if s is None:
-            logging.debug("Command did not return")
-        if s != 0:
-            logging.debug("Command returned status %s", s)
+        try:
+            session.cmd(inb_cmd)
+        except kvm_subprocess.ShellError, e:
+            logging.debug(e)
 
 
     def fuzz(session, inst_list):
@@ -71,7 +69,7 @@
         for (op, operand) in inst_list:
             if op == "read":
                 inb(session, operand[0])
-            elif op =="write":
+            elif op == "write":
                 outb(session, operand[0], operand[1])
             else:
                 raise error.TestError("Unknown command %s" % op)
@@ -81,26 +79,26 @@
                 if vm.process.is_alive():
                     logging.debug("VM is alive, try to re-login")
                     try:
-                        session = kvm_test_utils.wait_for_login(vm, 0, 10, 0, 2)
+                        session = vm.wait_for_login(timeout=10)
                     except:
                         logging.debug("Could not re-login, reboot the guest")
-                        session = kvm_test_utils.reboot(vm, session,
-                                                        method = "system_reset")
+                        session = vm.reboot(method="system_reset")
                 else:
                     raise error.TestFail("VM has quit abnormally during %s",
                                          (op, operand))
 
 
     login_timeout = float(params.get("login_timeout", 240))
-    vm = kvm_test_utils.get_living_vm(env, params.get("main_vm"))
-    session = kvm_test_utils.wait_for_login(vm, 0, login_timeout, 0, 2)
+    vm = env.get_vm(params["main_vm"])
+    vm.verify_alive()
+    session = vm.wait_for_login(timeout=login_timeout)
 
     try:
         ports = {}
         r = random.SystemRandom()
 
         logging.info("Enumerate guest devices through /proc/ioports")
-        ioports = session.get_command_output("cat /proc/ioports")
+        ioports = session.cmd_output("cat /proc/ioports")
         logging.debug(ioports)
         devices = re.findall("(\w+)-(\w+)\ : (.*)", ioports)
 
diff --git a/client/tests/kvm/tests/ioquit.py b/client/tests/kvm/tests/ioquit.py
index 8126139..34b4fb5 100644
--- a/client/tests/kvm/tests/ioquit.py
+++ b/client/tests/kvm/tests/ioquit.py
@@ -1,6 +1,4 @@
 import logging, time, random
-from autotest_lib.client.common_lib import error
-import kvm_test_utils
 
 
 def run_ioquit(test, params, env):
@@ -11,26 +9,21 @@
     @param params: Dictionary with the test parameters.
     @param env: Dictionary with test environment.
     """
-
-    vm = kvm_test_utils.get_living_vm(env, params.get("main_vm"))
-    session = kvm_test_utils.wait_for_login(vm,
-                  timeout=int(params.get("login_timeout", 360)))
-    session2 = kvm_test_utils.wait_for_login(vm,
-                  timeout=int(params.get("login_timeout", 360)))
+    vm = env.get_vm(params["main_vm"])
+    vm.verify_alive()
+    login_timeout = int(params.get("login_timeout", 360))
+    session = vm.wait_for_login(timeout=login_timeout)
+    session2 = vm.wait_for_login(timeout=login_timeout)
     try:
         bg_cmd = params.get("background_cmd")
         logging.info("Add IO workload for guest OS.")
-        (s, o) = session.get_command_status_output(bg_cmd, timeout=60)
+        session.cmd_output(bg_cmd, timeout=60)
         check_cmd = params.get("check_cmd")
-        (s, o) = session2.get_command_status_output(check_cmd, timeout=60)
-        if s:
-            raise error.TestError("Fail to add IO workload for Guest OS")
+        session2.cmd(check_cmd, timeout=60)
 
         logging.info("Sleep for a while")
-        time.sleep(random.randrange(30,100))
-        (s, o) = session2.get_command_status_output(check_cmd, timeout=60)
-        if s:
-            logging.info("IO workload finished before the VM was killed")
+        time.sleep(random.randrange(30, 100))
+        session2.cmd(check_cmd, timeout=60)
         logging.info("Kill the virtual machine")
         vm.process.close()
     finally:
diff --git a/client/tests/kvm/tests/iozone_windows.py b/client/tests/kvm/tests/iozone_windows.py
index a96fdfc..4046106 100644
--- a/client/tests/kvm/tests/iozone_windows.py
+++ b/client/tests/kvm/tests/iozone_windows.py
@@ -1,8 +1,6 @@
-import logging, time, os
-from autotest_lib.client.common_lib import error
+import logging, os
 from autotest_lib.client.bin import utils
 from autotest_lib.client.tests.iozone import postprocessing
-import kvm_subprocess, kvm_test_utils, kvm_utils
 
 
 def run_iozone_windows(test, params, env):
@@ -17,9 +15,10 @@
     @param params: Dictionary with the test parameters
     @param env: Dictionary with test environment.
     """
-    vm = kvm_test_utils.get_living_vm(env, params.get("main_vm"))
+    vm = env.get_vm(params["main_vm"])
+    vm.verify_alive()
     timeout = int(params.get("login_timeout", 360))
-    session = kvm_test_utils.wait_for_login(vm, timeout=timeout)
+    session = vm.wait_for_login(timeout=timeout)
     results_path = os.path.join(test.resultsdir,
                                 'raw_output_%s' % test.iteration)
     analysisdir = os.path.join(test.resultsdir, 'analysis_%s' % test.iteration)
@@ -28,8 +27,7 @@
     c = params.get("iozone_cmd")
     t = int(params.get("iozone_timeout"))
     logging.info("Running IOzone command on guest, timeout %ss", t)
-    results = session.get_command_output(command=c, timeout=t,
-                                         print_func=logging.debug)
+    results = session.cmd_output(cmd=c, timeout=t, print_func=logging.debug)
     utils.open_write_close(results_path, results)
 
     # Postprocess the results using the IOzone postprocessing module
diff --git a/client/tests/kvm/tests/jumbo.py b/client/tests/kvm/tests/jumbo.py
index 2c91c83..b7f88ae 100644
--- a/client/tests/kvm/tests/jumbo.py
+++ b/client/tests/kvm/tests/jumbo.py
@@ -3,6 +3,7 @@
 from autotest_lib.client.bin import utils
 import kvm_test_utils, kvm_utils
 
+
 def run_jumbo(test, params, env):
     """
     Test the RX jumbo frame function of vnics:
@@ -22,9 +23,9 @@
     @param params: Dictionary with the test parameters.
     @param env: Dictionary with test environment.
     """
-
-    vm = kvm_test_utils.get_living_vm(env, params.get("main_vm"))
-    session = kvm_test_utils.wait_for_login(vm)
+    vm = env.get_vm(params["main_vm"])
+    vm.verify_alive()
+    session = vm.wait_for_login(timeout=int(params.get("login_timeout", 360)))
     mtu = params.get("mtu", "1500")
     flood_time = params.get("flood_time", "300")
     max_icmp_pkt_size = int(mtu) - 28
@@ -40,11 +41,7 @@
 
         logging.info("Changing the MTU of guest ...")
         guest_mtu_cmd = "ifconfig %s mtu %s" % (ethname , mtu)
-        s, o = session.get_command_status_output(guest_mtu_cmd)
-        if s != 0:
-            logging.error(o)
-            raise error.TestError("Fail to set the MTU of guest NIC: %s" %
-                                  ethname)
+        session.cmd(guest_mtu_cmd)
 
         logging.info("Chaning the MTU of host tap ...")
         host_mtu_cmd = "ifconfig %s mtu %s" % (ifname, mtu)
@@ -92,7 +89,7 @@
         def size_increase_ping(step=random.randrange(90, 110)):
             logging.info("Size increase ping")
             for size in range(0, max_icmp_pkt_size + 1, step):
-                logging.info("Ping %s with size %s" % (ip, size))
+                logging.info("Ping %s with size %s", ip, size)
                 s, o = kvm_test_utils.ping(ip, 1, interface=ifname,
                                            packetsize=size,
                                            hint="do", timeout=1)
diff --git a/client/tests/kvm/tests/kdump.py b/client/tests/kvm/tests/kdump.py
new file mode 100644
index 0000000..c847131
--- /dev/null
+++ b/client/tests/kvm/tests/kdump.py
@@ -0,0 +1,75 @@
+import logging
+from autotest_lib.client.common_lib import error
+import kvm_utils
+
+
+def run_kdump(test, params, env):
+    """
+    KVM reboot test:
+    1) Log into a guest
+    2) Check and enable the kdump
+    3) For each vcpu, trigger a crash and check the vmcore
+
+    @param test: kvm test object
+    @param params: Dictionary with the test parameters
+    @param env: Dictionary with test environment.
+    """
+    vm = env.get_vm(params["main_vm"])
+    vm.verify_alive()
+    timeout = float(params.get("login_timeout", 240))
+    crash_timeout = float(params.get("crash_timeout", 360))
+    session = vm.wait_for_login(timeout=timeout)
+    def_kernel_param_cmd = ("grubby --update-kernel=`grubby --default-kernel`"
+                            " --args=crashkernel=128M")
+    kernel_param_cmd = params.get("kernel_param_cmd", def_kernel_param_cmd)
+    def_kdump_enable_cmd = "chkconfig kdump on && service kdump start"
+    kdump_enable_cmd = params.get("kdump_enable_cmd", def_kdump_enable_cmd)
+    def_crash_kernel_prob_cmd = "grep -q 1 /sys/kernel/kexec_crash_loaded"
+    crash_kernel_prob_cmd = params.get("crash_kernel_prob_cmd",
+                                       def_crash_kernel_prob_cmd)
+
+    def crash_test(vcpu):
+        """
+        Trigger a crash dump through sysrq-trigger
+
+        @param vcpu: vcpu which is used to trigger a crash
+        """
+        session = vm.wait_for_login(timeout=timeout)
+        session.cmd_output("rm -rf /var/crash/*")
+
+        logging.info("Triggering crash on vcpu %d ...", vcpu)
+        crash_cmd = "taskset -c %d echo c > /proc/sysrq-trigger" % vcpu
+        session.sendline(crash_cmd)
+
+        if not kvm_utils.wait_for(lambda: not session.is_responsive(), 240, 0,
+                                  1):
+            raise error.TestFail("Could not trigger crash on vcpu %d" % vcpu)
+
+        logging.info("Waiting for kernel crash dump to complete")
+        session = vm.wait_for_login(timeout=crash_timeout)
+
+        logging.info("Probing vmcore file...")
+        session.cmd("ls -R /var/crash | grep vmcore")
+        logging.info("Found vmcore.")
+
+        session.cmd_output("rm -rf /var/crash/*")
+
+    try:
+        logging.info("Checking the existence of crash kernel...")
+        try:
+            session.cmd(crash_kernel_prob_cmd)
+        except:
+            logging.info("Crash kernel is not loaded. Trying to load it")
+            session.cmd(kernel_param_cmd)
+            session = vm.reboot(session, timeout=timeout)
+
+        logging.info("Enabling kdump service...")
+        # the initrd may be rebuilt here so we need to wait a little more
+        session.cmd(kdump_enable_cmd, timeout=120)
+
+        nvcpu = int(params.get("smp", 1))
+        for i in range (nvcpu):
+            crash_test(i)
+
+    finally:
+        session.close()
diff --git a/client/tests/kvm/tests/ksm_overcommit.py b/client/tests/kvm/tests/ksm_overcommit.py
index dd4a30d..5aba25a 100644
--- a/client/tests/kvm/tests/ksm_overcommit.py
+++ b/client/tests/kvm/tests/ksm_overcommit.py
@@ -1,4 +1,4 @@
-import logging, time, random, string, math, os, tempfile
+import logging, time, random, math, os
 from autotest_lib.client.common_lib import error
 from autotest_lib.client.bin import utils
 import kvm_subprocess, kvm_test_utils, kvm_utils, kvm_preprocessing
@@ -18,27 +18,27 @@
 
     def _start_allocator(vm, session, timeout):
         """
-        Execute allocator.py on a guest, wait until it is initialized.
+        Execute ksm_overcommit_guest.py on a guest, wait until it is initialized.
 
         @param vm: VM object.
         @param session: Remote session to a VM object.
-        @param timeout: Timeout that will be used to verify if allocator.py
-                started properly.
+        @param timeout: Timeout that will be used to verify if
+                ksm_overcommit_guest.py started properly.
         """
-        logging.debug("Starting allocator.py on guest %s", vm.name)
-        session.sendline("python /tmp/allocator.py")
-        (match, data) = session.read_until_last_line_matches(["PASS:", "FAIL:"],
-                                                             timeout)
-        if match == 1 or match is None:
-            raise error.TestFail("Command allocator.py on guest %s failed.\n"
-                                 "return code: %s\n output:\n%s" %
-                                 (vm.name, match, data))
+        logging.debug("Starting ksm_overcommit_guest.py on guest %s", vm.name)
+        session.sendline("python /tmp/ksm_overcommit_guest.py")
+        try:
+            session.read_until_last_line_matches(["PASS:", "FAIL:"], timeout)
+        except kvm_subprocess.ExpectProcessTerminatedError, e:
+            e_msg = ("Command ksm_overcommit_guest.py on vm '%s' failed: %s" %
+                     (vm.name, str(e)))
+            raise error.TestFail(e_msg)
 
 
     def _execute_allocator(command, vm, session, timeout):
         """
-        Execute a given command on allocator.py main loop, indicating the vm
-        the command was executed on.
+        Execute a given command on ksm_overcommit_guest.py main loop,
+        indicating the vm the command was executed on.
 
         @param command: Command that will be executed.
         @param vm: VM object.
@@ -47,15 +47,18 @@
 
         @return: Tuple (match index, data)
         """
-        logging.debug("Executing '%s' on allocator.py loop, vm: %s, timeout: %s",
-                      command, vm.name, timeout)
+        logging.debug("Executing '%s' on ksm_overcommit_guest.py loop, "
+                      "vm: %s, timeout: %s", command, vm.name, timeout)
         session.sendline(command)
-        (match, data) = session.read_until_last_line_matches(["PASS:","FAIL:"],
+        try:
+            (match, data) = session.read_until_last_line_matches(
+                                                             ["PASS:","FAIL:"],
                                                              timeout)
-        if match == 1 or match is None:
-            raise error.TestFail("Failed to execute '%s' on allocator.py, "
-                                 "vm: %s, output:\n%s" %
-                                 (command, vm.name, data))
+        except kvm_subprocess.ExpectProcessTerminatedError, e:
+            e_msg = ("Failed to execute command '%s' on "
+                     "ksm_overcommit_guest.py, vm '%s': %s" %
+                     (command, vm.name, str(e)))
+            raise error.TestFail(e_msg)
         return (match, data)
 
 
@@ -79,10 +82,8 @@
         for session in lsessions:
             vm = lvms[lsessions.index(session)]
 
-            logging.debug("Turning off swap on vm %s" % vm.name)
-            ret = session.get_command_status("swapoff -a", timeout=300)
-            if ret is None or ret:
-                raise error.TestFail("Failed to swapoff on VM %s" % vm.name)
+            logging.debug("Turning off swap on vm %s", vm.name)
+            session.cmd("swapoff -a", timeout=300)
 
             # Start the allocator
             _start_allocator(vm, session, 60 * perf_ratio)
@@ -97,7 +98,7 @@
             a_cmd = "mem.value_fill(%d)" % skeys[0]
             _execute_allocator(a_cmd, vm, lsessions[i], 120 * perf_ratio)
 
-            # Let allocator.py do its job
+            # Let ksm_overcommit_guest.py do its job
             # (until shared mem reaches expected value)
             shm = 0
             j = 0
@@ -110,7 +111,7 @@
                     raise error.TestError("SHM didn't merge the memory until "
                                           "the DL on guest: %s" % vm.name)
                 st = ksm_size / 200 * perf_ratio
-                logging.debug("Waiting %ds before proceeding..." % st)
+                logging.debug("Waiting %ds before proceeding...", st)
                 time.sleep(st)
                 if (new_ksm):
                     shm = get_ksmstat()
@@ -136,8 +137,8 @@
         logging.info("Phase 2: Split the pages on the first guest")
 
         a_cmd = "mem.static_random_fill()"
-        (match, data) = _execute_allocator(a_cmd, lvms[0], lsessions[0],
-                                           120 * perf_ratio)
+        data = _execute_allocator(a_cmd, lvms[0], lsessions[0],
+                                  120 * perf_ratio)[1]
 
         r_msg = data.splitlines()[-1]
         logging.debug("Return message of static_random_fill: %s", r_msg)
@@ -167,8 +168,8 @@
             vm = lvms[i]
             session = lsessions[i]
             a_cmd = "mem.static_random_fill()"
-            logging.debug("Executing %s on allocator.py loop, vm: %s",
-                          a_cmd, vm.name)
+            logging.debug("Executing %s on ksm_overcommit_guest.py loop, "
+                          "vm: %s", a_cmd, vm.name)
             session.sendline(a_cmd)
 
             out = ""
@@ -184,7 +185,7 @@
                     if (ksm_swap):
                         free_mem = (free_mem +
                                     int(utils.read_from_meminfo("SwapFree")))
-                    logging.debug("Free memory on host: %d" % (free_mem))
+                    logging.debug("Free memory on host: %d", free_mem)
 
                     # We need to keep some memory for python to run.
                     if (free_mem < 64000) or (ksm_swap and
@@ -194,15 +195,15 @@
                             lvms[j].destroy(gracefully = False)
                         time.sleep(20)
                         vm.monitor.cmd("c")
-                        logging.debug("Only %s free memory, killing %d guests" %
-                                      (free_mem, (i-1)))
+                        logging.debug("Only %s free memory, killing %d guests",
+                                      free_mem, (i - 1))
                         last_vm = i
                         break
                     out = session.read_nonblocking(0.1)
                     time.sleep(2)
-            except OSError, (err):
-                logging.debug("Only %s host free memory, killing %d guests" %
-                              (free_mem, (i - 1)))
+            except OSError:
+                logging.debug("Only %s host free memory, killing %d guests",
+                              free_mem, (i - 1))
                 logging.debug("Stopping %s", vm.name)
                 vm.monitor.cmd("stop")
                 for j in range(0, i):
@@ -214,7 +215,7 @@
 
             if last_vm != 0:
                 break
-            logging.debug("Memory filled for guest %s" % (vm.name))
+            logging.debug("Memory filled for guest %s", vm.name)
 
         logging.info("Phase 3a: PASS")
 
@@ -223,7 +224,7 @@
             lsessions[i].close()
             if i == (vmsc - 1):
                 logging.debug(kvm_test_utils.get_memory_info([lvms[i]]))
-            logging.debug("Destroying guest %s" % lvms[i].name)
+            logging.debug("Destroying guest %s", lvms[i].name)
             lvms[i].destroy(gracefully = False)
 
         # Verify last machine with randomly generated memory
@@ -232,7 +233,7 @@
                            (mem / 200 * 50 * perf_ratio))
         logging.debug(kvm_test_utils.get_memory_info([lvms[last_vm]]))
 
-        (status, data) = lsessions[i].get_command_status_output("die()", 20)
+        lsessions[i].cmd_output("die()", 20)
         lvms[last_vm].destroy(gracefully = False)
         logging.info("Phase 3b: PASS")
 
@@ -248,14 +249,9 @@
         session = lsessions[0]
         vm = lvms[0]
         for i in range(1, max_alloc):
-            lsessions.append(kvm_utils.wait_for(vm.remote_login, 360, 0, 2))
-            if not lsessions[i]:
-                raise error.TestFail("Could not log into guest %s" %
-                                     vm.name)
+            lsessions.append(vm.wait_for_login(timeout=360))
 
-        ret = session.get_command_status("swapoff -a", timeout=300)
-        if ret != 0:
-            raise error.TestFail("Failed to turn off swap on %s" % vm.name)
+        session.cmd("swapoff -a", timeout=300)
 
         for i in range(0, max_alloc):
             # Start the allocator
@@ -264,8 +260,8 @@
         logging.info("Phase 1: PASS")
 
         logging.info("Phase 2a: Simultaneous merging")
-        logging.debug("Memory used by allocator on guests = %dMB" %
-                     (ksm_size / max_alloc))
+        logging.debug("Memory used by allocator on guests = %dMB",
+                      (ksm_size / max_alloc))
 
         for i in range(0, max_alloc):
             a_cmd = "mem = MemFill(%d, %s, %s)" % ((ksm_size / max_alloc),
@@ -275,7 +271,7 @@
             a_cmd = "mem.value_fill(%d)" % (skeys[0])
             _execute_allocator(a_cmd, vm, lsessions[i], 90 * perf_ratio)
 
-        # Wait until allocator.py merges the pages (3 * ksm_size / 3)
+        # Wait until ksm_overcommit_guest.py merges the pages (3 * ksm_size / 3)
         shm = 0
         i = 0
         logging.debug("Target shared memory size: %s", ksm_size)
@@ -300,46 +296,46 @@
         # Actual splitting
         for i in range(0, max_alloc):
             a_cmd = "mem.static_random_fill()"
-            (match, data) = _execute_allocator(a_cmd, vm, lsessions[i],
-                                               90 * perf_ratio)
+            data = _execute_allocator(a_cmd, vm, lsessions[i],
+                                      90 * perf_ratio)[1]
 
             data = data.splitlines()[-1]
             logging.debug(data)
             out = int(data.split()[4])
-            logging.debug("Performance: %dMB * 1000 / %dms = %dMB/s" %
-                         ((ksm_size / max_alloc), out,
-                          (ksm_size * 1000 / out / max_alloc)))
+            logging.debug("Performance: %dMB * 1000 / %dms = %dMB/s",
+                          (ksm_size / max_alloc), out,
+                          (ksm_size * 1000 / out / max_alloc))
         logging.debug(kvm_test_utils.get_memory_info([vm]))
         logging.info("Phase 2b: PASS")
 
         logging.info("Phase 2c: Simultaneous verification")
         for i in range(0, max_alloc):
             a_cmd = "mem.static_random_verify()"
-            (match, data) = _execute_allocator(a_cmd, vm, lsessions[i],
-                                               (mem / 200 * 50 * perf_ratio))
+            data = _execute_allocator(a_cmd, vm, lsessions[i],
+                                      (mem / 200 * 50 * perf_ratio))[1]
         logging.info("Phase 2c: PASS")
 
         logging.info("Phase 2d: Simultaneous merging")
         # Actual splitting
         for i in range(0, max_alloc):
             a_cmd = "mem.value_fill(%d)" % skeys[0]
-            (match, data) = _execute_allocator(a_cmd, vm, lsessions[i],
-                                               120 * perf_ratio)
+            data = _execute_allocator(a_cmd, vm, lsessions[i],
+                                      120 * perf_ratio)[1]
         logging.debug(kvm_test_utils.get_memory_info([vm]))
         logging.info("Phase 2d: PASS")
 
         logging.info("Phase 2e: Simultaneous verification")
         for i in range(0, max_alloc):
             a_cmd = "mem.value_check(%d)" % skeys[0]
-            (match, data) = _execute_allocator(a_cmd, vm, lsessions[i],
-                                               (mem / 200 * 50 * perf_ratio))
+            data = _execute_allocator(a_cmd, vm, lsessions[i],
+                                      (mem / 200 * 50 * perf_ratio))[1]
         logging.info("Phase 2e: PASS")
 
         logging.info("Phase 2f: Simultaneous spliting last 96B")
         for i in range(0, max_alloc):
             a_cmd = "mem.static_random_fill(96)"
-            (match, data) = _execute_allocator(a_cmd, vm, lsessions[i],
-                                               60 * perf_ratio)
+            data = _execute_allocator(a_cmd, vm, lsessions[i],
+                                      60 * perf_ratio)[1]
 
             data = data.splitlines()[-1]
             out = int(data.split()[4])
@@ -360,7 +356,7 @@
 
         logging.debug("Cleaning up...")
         for i in range(0, max_alloc):
-            lsessions[i].get_command_status_output("die()", 20)
+            lsessions[i].cmd_output("die()", 20)
         session.close()
         vm.destroy(gracefully = False)
 
@@ -373,10 +369,12 @@
         utils.run("echo 5000 > /sys/kernel/mm/ksm/pages_to_scan")
         utils.run("echo 1 > /sys/kernel/mm/ksm/run")
 
-        if (os.path.exists("/sys/kernel/mm/transparent_hugepage/enabled")):
-            utils.run("echo 'never' > /sys/kernel/mm/transparent_hugepage/enabled ")
-        if (os.path.exists("/sys/kernel/mm/redhat_transparent_hugepage/enabled")):
-            utils.run("echo 'never' > /sys/kernel/mm/redhat_transparent_hugepage/enabled ")
+        e_up = "/sys/kernel/mm/transparent_hugepage/enabled"
+        e_rh = "/sys/kernel/mm/redhat_transparent_hugepage/enabled"
+        if os.path.exists(e_up):
+            utils.run("echo 'never' > %s" % e_up)
+        if os.path.exists(e_rh):
+            utils.run("echo 'never' > %s" % e_rh)
         new_ksm = True
     else:
         try:
@@ -518,14 +516,14 @@
             key = random.randrange(0, 999)
         dkeys.append(key)
 
-    logging.debug("skeys: %s" % skeys)
-    logging.debug("dkeys: %s" % dkeys)
+    logging.debug("skeys: %s", skeys)
+    logging.debug("dkeys: %s", dkeys)
 
     lvms = []
     lsessions = []
 
     # As we don't know the number and memory amount of VMs in advance,
-    # we need to specify and create them here (FIXME: not a nice thing)
+    # we need to specify and create them here
     vm_name = params.get("main_vm")
     params['mem'] = mem
     params['vms'] = vm_name
@@ -541,11 +539,11 @@
 
     # ksm_size: amount of memory used by allocator
     ksm_size = mem - guest_reserve
-    logging.debug("Memory used by allocator on guests = %dM" % (ksm_size))
+    logging.debug("Memory used by allocator on guests = %dM", ksm_size)
 
     # Creating the first guest
     kvm_preprocessing.preprocess_vm(test, params, env, vm_name)
-    lvms.append(kvm_utils.env_get_vm(env, vm_name))
+    lvms.append(env.get_vm(vm_name))
     if not lvms[0]:
         raise error.TestError("VM object not found in environment")
     if not lvms[0].is_alive():
@@ -554,9 +552,7 @@
 
     logging.debug("Booting first guest %s", lvms[0].name)
 
-    lsessions.append(kvm_utils.wait_for(lvms[0].remote_login, 360, 0, 2))
-    if not lsessions[0]:
-        raise error.TestFail("Could not log into first guest")
+    lsessions.append(lvms[0].wait_for_login(timeout=360))
     # Associate vm PID
     try:
         tmp = open(params.get('pid_' + vm_name), 'r')
@@ -576,20 +572,16 @@
 
         # Last VM is later used to run more allocators simultaneously
         lvms.append(lvms[0].clone(vm_name, params))
-        kvm_utils.env_register_vm(env, vm_name, lvms[i])
+        env.register_vm(vm_name, lvms[i])
         params['vms'] += " " + vm_name
 
-        logging.debug("Booting guest %s" % lvms[i].name)
-        if not lvms[i].create():
-            raise error.TestFail("Cannot create VM %s" % lvms[i].name)
+        logging.debug("Booting guest %s", lvms[i].name)
+        lvms[i].create()
         if not lvms[i].is_alive():
             raise error.TestError("VM %s seems to be dead; Test requires a"
                                   "living VM" % lvms[i].name)
 
-        lsessions.append(kvm_utils.wait_for(lvms[i].remote_login, 360, 0, 2))
-        if not lsessions[i]:
-            raise error.TestFail("Could not log into guest %s" %
-                                 lvms[i].name)
+        lsessions.append(lvms[i].wait_for_login(timeout=360))
         try:
             tmp = open(params.get('pid_' + vm_name), 'r')
             params['pid_' + vm_name] = int(tmp.readline())
@@ -602,13 +594,12 @@
     time.sleep(vmsc * 2 * perf_ratio)
     logging.debug(kvm_test_utils.get_memory_info(lvms))
 
-    # Copy allocator.py into guests
+    # Copy ksm_overcommit_guest.py into guests
     pwd = os.path.join(os.environ['AUTODIR'],'tests/kvm')
-    vksmd_src = os.path.join(pwd, "scripts/allocator.py")
+    vksmd_src = os.path.join(pwd, "scripts/ksm_overcommit_guest.py")
     dst_dir = "/tmp"
     for vm in lvms:
-        if not vm.copy_files_to(vksmd_src, dst_dir):
-            raise error.TestFail("copy_files_to failed %s" % vm.name)
+        vm.copy_files_to(vksmd_src, dst_dir)
     logging.info("Phase 0: PASS")
 
     if params['ksm_mode'] == "parallel":
diff --git a/client/tests/kvm/tests/linux_s3.py b/client/tests/kvm/tests/linux_s3.py
index 4a782b8..5a04fca 100644
--- a/client/tests/kvm/tests/linux_s3.py
+++ b/client/tests/kvm/tests/linux_s3.py
@@ -1,6 +1,5 @@
 import logging, time
 from autotest_lib.client.common_lib import error
-import kvm_test_utils
 
 
 def run_linux_s3(test, params, env):
@@ -11,23 +10,20 @@
     @param params: Dictionary with test parameters.
     @param env: Dictionary with the test environment.
     """
-    vm = kvm_test_utils.get_living_vm(env, params.get("main_vm"))
+    vm = env.get_vm(params["main_vm"])
+    vm.verify_alive()
     timeout = int(params.get("login_timeout", 360))
-    session = kvm_test_utils.wait_for_login(vm, timeout=timeout)
+    session = vm.wait_for_login(timeout=timeout)
 
     logging.info("Checking that VM supports S3")
-    status = session.get_command_status("grep -q mem /sys/power/state")
-    if status == None:
-        logging.error("Failed to check if S3 exists")
-    elif status != 0:
-        raise error.TestFail("Guest does not support S3")
+    session.cmd("grep -q mem /sys/power/state")
 
     logging.info("Waiting for a while for X to start")
     time.sleep(10)
 
-    src_tty = session.get_command_output("fgconsole").strip()
-    logging.info("Current virtual terminal is %s" % src_tty)
-    if src_tty not in map(str, range(1,10)):
+    src_tty = session.cmd_output("fgconsole").strip()
+    logging.info("Current virtual terminal is %s", src_tty)
+    if src_tty not in map(str, range(1, 10)):
         raise error.TestFail("Got a strange current vt (%s)" % src_tty)
 
     dst_tty = "1"
@@ -38,9 +34,7 @@
     command = "chvt %s && echo mem > /sys/power/state && chvt %s" % (dst_tty,
                                                                      src_tty)
     suspend_timeout = 120 + int(params.get("smp")) * 60
-    status = session.get_command_status(command, timeout=suspend_timeout)
-    if status != 0:
-        raise error.TestFail("Suspend to mem failed")
+    session.cmd(command, timeout=suspend_timeout)
 
     logging.info("VM resumed after S3")
 
diff --git a/client/tests/kvm/tests/mac_change.py b/client/tests/kvm/tests/mac_change.py
index c614e15..3fd196f 100644
--- a/client/tests/kvm/tests/mac_change.py
+++ b/client/tests/kvm/tests/mac_change.py
@@ -15,14 +15,12 @@
     @param params: Dictionary with the test parameters.
     @param env: Dictionary with test environment.
     """
+    vm = env.get_vm(params["main_vm"])
+    vm.verify_alive()
     timeout = int(params.get("login_timeout", 360))
-    vm = kvm_test_utils.get_living_vm(env, params.get("main_vm"))
-    logging.info("Trying to log into guest '%s' by serial", vm.name)
-    session = kvm_utils.wait_for(lambda: vm.serial_login(),
-                                  timeout, 0, step=2)
-    if not session:
-        raise error.TestFail("Could not log into guest '%s'" % vm.name)
-
+    session_serial = vm.wait_for_serial_login(timeout=timeout)
+    # This session will be used to assess whether the IP change worked
+    session = vm.wait_for_login(timeout=timeout)
     old_mac = vm.get_mac_address(0)
     while True:
         vm.free_mac_address(0)
@@ -30,23 +28,21 @@
         if old_mac != new_mac:
             break
     logging.info("The initial MAC address is %s", old_mac)
-    interface = kvm_test_utils.get_linux_ifname(session, old_mac)
+    interface = kvm_test_utils.get_linux_ifname(session_serial, old_mac)
     # Start change MAC address
     logging.info("Changing MAC address to %s", new_mac)
     change_cmd = ("ifconfig %s down && ifconfig %s hw ether %s && "
                   "ifconfig %s up" % (interface, interface, new_mac, interface))
-    if session.get_command_status(change_cmd) != 0:
-        raise error.TestFail("Fail to send mac_change command")
+    session_serial.cmd(change_cmd)
 
     # Verify whether MAC address was changed to the new one
     logging.info("Verifying the new mac address")
-    if session.get_command_status("ifconfig | grep -i %s" % new_mac) != 0:
-        raise error.TestFail("Fail to change MAC address")
+    session_serial.cmd("ifconfig | grep -i %s" % new_mac)
 
     # Restart `dhclient' to regain IP for new mac address
     logging.info("Restart the network to gain new IP")
     dhclient_cmd = "dhclient -r && dhclient %s" % interface
-    session.sendline(dhclient_cmd)
+    session_serial.sendline(dhclient_cmd)
 
     # Re-log into the guest after changing mac address
     if kvm_utils.wait_for(session.is_responsive, 120, 20, 3):
@@ -57,8 +53,7 @@
 
     # Re-log into guest and check if session is responsive
     logging.info("Re-log into the guest")
-    session = kvm_test_utils.wait_for_login(vm,
-              timeout=int(params.get("login_timeout", 360)))
+    session = vm.wait_for_login(timeout=timeout)
     if not session.is_responsive():
         raise error.TestFail("The new session is not responsive.")
 
diff --git a/client/tests/kvm/tests/migration.py b/client/tests/kvm/tests/migration.py
index d6f4b11..b462e66 100644
--- a/client/tests/kvm/tests/migration.py
+++ b/client/tests/kvm/tests/migration.py
@@ -1,6 +1,6 @@
 import logging, time
 from autotest_lib.client.common_lib import error
-import kvm_subprocess, kvm_test_utils, kvm_utils
+import kvm_utils
 
 
 def run_migration(test, params, env):
@@ -19,17 +19,20 @@
     @param params: Dictionary with test parameters.
     @param env: Dictionary with the test environment.
     """
-    vm = kvm_test_utils.get_living_vm(env, params.get("main_vm"))
+    vm = env.get_vm(params["main_vm"])
+    vm.verify_alive()
     timeout = int(params.get("login_timeout", 360))
-    session = kvm_test_utils.wait_for_login(vm, timeout=timeout)
+    session = vm.wait_for_login(timeout=timeout)
 
     mig_timeout = float(params.get("mig_timeout", "3600"))
     mig_protocol = params.get("migration_protocol", "tcp")
-    mig_cancel = bool(params.get("mig_cancel"))
+    mig_cancel_delay = int(params.get("mig_cancel") == "yes") * 2
+    offline = params.get("offline", "no") == "yes"
+    check = params.get("vmstate_check", "no") == "yes"
 
     # Get the output of migration_test_command
     test_command = params.get("migration_test_command")
-    reference_output = session.get_command_output(test_command)
+    reference_output = session.cmd_output(test_command)
 
     # Start some process in the background (and leave the session open)
     background_command = params.get("migration_bg_command", "")
@@ -38,39 +41,32 @@
 
     # Start another session with the guest and make sure the background
     # process is running
-    session2 = kvm_test_utils.wait_for_login(vm, timeout=timeout)
+    session2 = vm.wait_for_login(timeout=timeout)
 
     try:
         check_command = params.get("migration_bg_check_command", "")
-        if session2.get_command_status(check_command, timeout=30) != 0:
-            raise error.TestError("Could not start background process '%s'" %
-                                  background_command)
+        session2.cmd(check_command, timeout=30)
         session2.close()
 
         # Migrate the VM
-        dest_vm = kvm_test_utils.migrate(vm, env,mig_timeout, mig_protocol,
-                                         mig_cancel)
+        vm.migrate(mig_timeout, mig_protocol, mig_cancel_delay, offline, check)
 
         # Log into the guest again
         logging.info("Logging into guest after migration...")
-        session2 = kvm_utils.wait_for(dest_vm.remote_login, 30, 0, 2)
-        if not session2:
-            raise error.TestFail("Could not log into guest after migration")
+        session2 = vm.wait_for_login(timeout=30)
         logging.info("Logged in after migration")
 
         # Make sure the background process is still running
-        if session2.get_command_status(check_command, timeout=30) != 0:
-            raise error.TestFail("Could not find running background process "
-                                 "after migration: '%s'" % background_command)
+        session2.cmd(check_command, timeout=30)
 
         # Get the output of migration_test_command
-        output = session2.get_command_output(test_command)
+        output = session2.cmd_output(test_command)
 
         # Compare output to reference output
         if output != reference_output:
             logging.info("Command output before migration differs from "
                          "command output after migration")
-            logging.info("Command: %s" % test_command)
+            logging.info("Command: %s", test_command)
             logging.info("Output before:" +
                          kvm_utils.format_str_for_message(reference_output))
             logging.info("Output after:" +
@@ -81,8 +77,7 @@
     finally:
         # Kill the background process
         if session2 and session2.is_alive():
-            session2.get_command_output(params.get("migration_bg_kill_command",
-                                                   ""))
+            session2.cmd_output(params.get("migration_bg_kill_command", ""))
 
     session2.close()
     session.close()
diff --git a/client/tests/kvm/tests/migration_multi_host.py b/client/tests/kvm/tests/migration_multi_host.py
new file mode 100644
index 0000000..30e3ecc
--- /dev/null
+++ b/client/tests/kvm/tests/migration_multi_host.py
@@ -0,0 +1,107 @@
+import logging, socket
+from autotest_lib.client.common_lib import error
+
+
+def run_migration_multi_host(test, params, env):
+    """
+    KVM multi-host migration test:
+
+    Migration execution progress:
+
+    source host                       dest host
+    ----------------------------------------------------------------------------
+    log into guest
+    ----------------------------------------------------------------------------
+    start socket server
+
+    wait 30 secs -------------------- wait login_timeout+30 secs ---------------
+
+    accept connection                 connect to socket server,send mig_port
+    ----------------------------------------------------------------------------
+    start migration
+
+    wait 30 secs -------------------- wait mig_timeout+30 secs -----------------
+
+    try to log into migrated guest    check VM's status via monitor cmd
+    ----------------------------------------------------------------------------
+
+    @param test: kvm test object.
+    @param params: Dictionary with test parameters.
+    @param env: Dictionary with the test environment.
+    """
+    def guest_active(vm):
+        o = vm.monitor.info("status")
+        if isinstance(o, str):
+            return "status: running" in o
+        else:
+            return o.get("status") == "running"
+
+    vm = env.get_vm(params["main_vm"])
+    vm.verify_alive()
+    login_timeout = int(params.get("login_timeout", 360))
+    role = params.get("role")
+    srchost = params.get("srchost")
+    dsthost = params.get("dsthost")
+    mig_timeout = int(params.get("mig_timeout"))
+    # Port used to communicate info between source and destination
+    comm_port = int(params.get("comm_port", 12324))
+    regain_ip_cmd = params.get("regain_ip_cmd", "dhclient")
+    if role == 'source':
+        session = vm.wait_for_login(timeout=login_timeout)
+
+        # Listen on a port to get the migration port received from
+        # dest machine
+        s_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+        s_socket.bind(('', comm_port))
+        s_socket.listen(1)
+
+        # Wait 30 seconds for source and dest to reach this point
+        test.job.barrier(srchost, 'socket_started', 30).rendezvous(srchost,
+                                                                   dsthost)
+
+        c_socket = s_socket.accept()[0]
+        mig_port = int(c_socket.recv(6))
+        logging.info("Received from destination the migration port %s",
+                     mig_port)
+        c_socket.close()
+
+        logging.info("Start migrating now...")
+        vm.migrate(dest_host=dsthost, remote_port=mig_port)
+
+        # Wait up to 30 seconds for dest to reach this point
+        test.job.barrier(srchost, 'mig_finished', 30).rendezvous(srchost,
+                                                                 dsthost)
+
+    elif role == 'destination':
+        # Wait up to login_timeout + 30 seconds for the source to
+        # reach this point
+        test.job.barrier(dsthost, 'socket_started',
+                         login_timeout + 30).rendezvous(srchost,
+                                                        dsthost)
+
+        c_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+        c_socket.connect((srchost, comm_port))
+        logging.info("Communicating to source migration port %s",
+                     vm.migration_port)
+        c_socket.send("%d" % vm.migration_port)
+        c_socket.close()
+
+        # Wait up to mig_timeout + 30 seconds for the source to
+        # reach this point: migration finished
+        test.job.barrier(dsthost, 'mig_finished',
+                         mig_timeout + 30).rendezvous(srchost,
+                                                      dsthost)
+
+        if not guest_active(vm):
+            raise error.TestFail("Guest not active after migration")
+
+        logging.info("Migrated guest appears to be running")
+
+        # Log into the guest again
+        logging.info("Logging into migrated guest after migration...")
+        session_serial = vm.wait_for_serial_login(timeout=login_timeout)
+        session_serial.cmd(regain_ip_cmd)
+        session = vm.wait_for_login(timeout=login_timeout)
+
+    else:
+        raise error.TestError('Invalid role specified')
diff --git a/client/tests/kvm/tests/migration_with_file_transfer.py b/client/tests/kvm/tests/migration_with_file_transfer.py
new file mode 100644
index 0000000..044c0c8
--- /dev/null
+++ b/client/tests/kvm/tests/migration_with_file_transfer.py
@@ -0,0 +1,85 @@
+import logging, time, os
+from autotest_lib.client.common_lib import utils, error
+from autotest_lib.client.bin import utils as client_utils
+import kvm_utils
+
+
+@error.context_aware
+def run_migration_with_file_transfer(test, params, env):
+    """
+    KVM migration test:
+    1) Get a live VM and clone it.
+    2) Verify that the source VM supports migration.  If it does, proceed with
+            the test.
+    3) Reboot the VM
+    4) Send a migration command to the source VM and wait until it's finished.
+    5) Kill off the source VM.
+    6) Log into the destination VM after the migration is finished.
+
+    @param test: kvm test object.
+    @param params: Dictionary with test parameters.
+    @param env: Dictionary with the test environment.
+    """
+    vm = env.get_vm(params["main_vm"])
+    vm.verify_alive()
+    login_timeout = int(params.get("login_timeout", 360))
+    session = vm.wait_for_login(timeout=login_timeout)
+
+    mig_timeout = float(params.get("mig_timeout", "3600"))
+    mig_protocol = params.get("migration_protocol", "tcp")
+    mig_cancel_delay = int(params.get("mig_cancel") == "yes") * 2
+
+    host_path = "/tmp/file-%s" % kvm_utils.generate_random_string(6)
+    host_path_returned = "%s-returned" % host_path
+    guest_path = params.get("guest_path", "/tmp/file")
+    file_size = params.get("file_size", "500")
+    transfer_timeout = int(params.get("transfer_timeout", "240"))
+
+    try:
+        utils.run("dd if=/dev/urandom of=%s bs=1M count=%s" % (host_path,
+                                                               file_size))
+
+        def run_and_migrate(bg):
+            bg.start()
+            try:
+                while bg.isAlive():
+                    logging.info("File transfer not ended, starting a round of "
+                                 "migration...")
+                    vm.migrate(mig_timeout, mig_protocol, mig_cancel_delay)
+            except:
+                # If something bad happened in the main thread, ignore
+                # exceptions raised in the background thread
+                bg.join(suppress_exception=True)
+                raise
+            else:
+                bg.join()
+
+        error.context("transferring file to guest while migrating",
+                      logging.info)
+        bg = kvm_utils.Thread(vm.copy_files_to, (host_path, guest_path),
+                              dict(verbose=True, timeout=transfer_timeout))
+        run_and_migrate(bg)
+
+        error.context("transferring file back to host while migrating",
+                      logging.info)
+        bg = kvm_utils.Thread(vm.copy_files_from,
+                              (guest_path, host_path_returned),
+                              dict(verbose=True, timeout=transfer_timeout))
+        run_and_migrate(bg)
+
+        # Make sure the returned file is identical to the original one
+        error.context("comparing hashes", logging.info)
+        orig_hash = client_utils.hash_file(host_path)
+        returned_hash = client_utils.hash_file(host_path_returned)
+        if orig_hash != returned_hash:
+            raise error.TestFail("Returned file hash (%s) differs from "
+                                 "original one (%s)" % (returned_hash,
+                                                        orig_hash))
+        error.context()
+
+    finally:
+        session.close()
+        if os.path.isfile(host_path):
+            os.remove(host_path)
+        if os.path.isfile(host_path_returned):
+            os.remove(host_path_returned)
diff --git a/client/tests/kvm/tests/migration_with_reboot.py b/client/tests/kvm/tests/migration_with_reboot.py
new file mode 100644
index 0000000..a15f983
--- /dev/null
+++ b/client/tests/kvm/tests/migration_with_reboot.py
@@ -0,0 +1,43 @@
+import kvm_utils
+
+
+def run_migration_with_reboot(test, params, env):
+    """
+    KVM migration test:
+    1) Get a live VM and clone it.
+    2) Verify that the source VM supports migration.  If it does, proceed with
+            the test.
+    3) Reboot the VM
+    4) Send a migration command to the source VM and wait until it's finished.
+    5) Kill off the source VM.
+    6) Log into the destination VM after the migration is finished.
+
+    @param test: kvm test object.
+    @param params: Dictionary with test parameters.
+    @param env: Dictionary with the test environment.
+    """
+    vm = env.get_vm(params["main_vm"])
+    vm.verify_alive()
+    login_timeout = int(params.get("login_timeout", 360))
+    session = vm.wait_for_login(timeout=login_timeout)
+
+    mig_timeout = float(params.get("mig_timeout", "3600"))
+    mig_protocol = params.get("migration_protocol", "tcp")
+    mig_cancel_delay = int(params.get("mig_cancel") == "yes") * 2
+
+    try:
+        # Reboot the VM in the background
+        bg = kvm_utils.Thread(vm.reboot, (session,))
+        bg.start()
+        try:
+            while bg.isAlive():
+                vm.migrate(mig_timeout, mig_protocol, mig_cancel_delay)
+        except:
+            # If something bad happened in the main thread, ignore exceptions
+            # raised in the background thread
+            bg.join(suppress_exception=True)
+            raise
+        else:
+            session = bg.join()
+    finally:
+        session.close()
diff --git a/client/tests/kvm/tests/module_probe.py b/client/tests/kvm/tests/module_probe.py
new file mode 100644
index 0000000..72f239b
--- /dev/null
+++ b/client/tests/kvm/tests/module_probe.py
@@ -0,0 +1,56 @@
+import re, commands, logging, os
+from autotest_lib.client.common_lib import error, utils
+import kvm_subprocess, kvm_test_utils, kvm_utils, installer
+
+
+def run_module_probe(test, params, env):
+    """
+    load/unload KVM modules several times.
+
+    The test can run in two modes:
+
+    - based on previous 'build' test: in case KVM modules were installed by a
+      'build' test, we used the modules installed by the previous test.
+
+    - based on own params: if no previous 'build' test was run,
+      we assume a pre-installed KVM module. Some parameters that
+      work for the 'build' can be used, then, such as 'extra_modules'.
+    """
+
+    installer_object = env.previous_installer()
+    if installer_object is None:
+        installer_object = installer.PreInstalledKvm()
+        installer_object.set_install_params(test, params)
+
+    logging.debug('installer object: %r', installer_object)
+
+    mod_str = params.get("mod_list")
+    if mod_str:
+        mod_list = re.split("[, ]", mod_str)
+        logging.debug("mod list will be: %r", mod_list)
+    else:
+        mod_list = installer_object.full_module_list()
+        logging.debug("mod list from installer: %r", mod_list)
+
+    # unload the modules before starting:
+    installer_object._unload_modules(mod_list)
+
+    load_count = int(params.get("load_count", 100))
+    try:
+        for i in range(load_count):
+            try:
+                installer_object.load_modules(mod_list)
+            except Exception,e:
+                raise error.TestFail("Failed to load modules [%r]: %s" %
+                                     (installer_object.full_module_list, e))
+
+            # unload using rmmod directly because utils.unload_module() (used by
+            # installer) does too much (runs lsmod, checks for dependencies),
+            # and we want to run the loop as fast as possible.
+            for mod in reversed(mod_list):
+                r = utils.system("rmmod %s" % (mod), ignore_status=True)
+                if r <> 0:
+                    raise error.TestFail("Failed to unload module %s. "
+                                         "exit status: %d" % (mod, r))
+    finally:
+        installer_object.load_modules()
diff --git a/client/tests/kvm/tests/multicast.py b/client/tests/kvm/tests/multicast.py
index a47779a..5dfecbc 100644
--- a/client/tests/kvm/tests/multicast.py
+++ b/client/tests/kvm/tests/multicast.py
@@ -1,7 +1,7 @@
 import logging, os, re
 from autotest_lib.client.common_lib import error
 from autotest_lib.client.bin import utils
-import kvm_test_utils
+import kvm_test_utils, kvm_subprocess
 
 
 def run_multicast(test, params, env):
@@ -18,15 +18,15 @@
     @param params: Dictionary with the test parameters.
     @param env: Dictionary with test environment.
     """
-    vm = kvm_test_utils.get_living_vm(env, params.get("main_vm"))
-    session = kvm_test_utils.wait_for_login(vm,
-                                  timeout=int(params.get("login_timeout", 360)))
+    vm = env.get_vm(params["main_vm"])
+    vm.verify_alive()
+    session = vm.wait_for_login(timeout=int(params.get("login_timeout", 360)))
 
     def run_guest(cmd):
-        s, o = session.get_command_status_output(cmd)
-        if s:
-            logging.warning('Command %s executed in guest returned exit code '
-                            '%s, output: %s', cmd, s, o.strip())
+        try:
+            session.cmd(cmd)
+        except kvm_subprocess.ShellError, e:
+            logging.warn(e)
 
     def run_host_guest(cmd):
         run_guest(cmd)
@@ -53,11 +53,10 @@
     prefix = re.findall("\d+.\d+.\d+", mcast)[0]
     suffix = int(re.findall("\d+", mcast)[-1])
     # copy python script to guest for joining guest to multicast groups
-    mcast_path = os.path.join(test.bindir, "scripts/join_mcast.py")
-    if not vm.copy_files_to(mcast_path, "/tmp"):
-        raise error.TestError("Fail to copy %s to guest" % mcast_path)
-    output = session.get_command_output("python /tmp/join_mcast.py %d %s %d" %
-                                        (mgroup_count, prefix, suffix))
+    mcast_path = os.path.join(test.bindir, "scripts/multicast_guest.py")
+    vm.copy_files_to(mcast_path, "/tmp")
+    output = session.cmd_output("python /tmp/multicast_guest.py %d %s %d" %
+                                (mgroup_count, prefix, suffix))
 
     # if success to join multicast, the process will be paused, and return PID.
     try:
@@ -86,6 +85,6 @@
                                      (s, o))
 
     finally:
-        logging.debug(session.get_command_output("ipmaddr show"))
-        session.get_command_output("kill -s SIGCONT %s" % pid)
+        logging.debug(session.cmd_output("ipmaddr show"))
+        session.cmd_output("kill -s SIGCONT %s" % pid)
         session.close()
diff --git a/client/tests/kvm/tests/netperf.py b/client/tests/kvm/tests/netperf.py
index dc21e0f..e1153e1 100644
--- a/client/tests/kvm/tests/netperf.py
+++ b/client/tests/kvm/tests/netperf.py
@@ -1,7 +1,8 @@
-import logging, commands, os
+import logging, os
 from autotest_lib.client.common_lib import error
 from autotest_lib.client.bin import utils
-import kvm_test_utils
+import kvm_subprocess
+
 
 def run_netperf(test, params, env):
     """
@@ -16,9 +17,10 @@
     @param params: Dictionary with the test parameters.
     @param env: Dictionary with test environment.
     """
-    vm = kvm_test_utils.get_living_vm(env, params.get("main_vm"))
+    vm = env.get_vm(params["main_vm"])
+    vm.verify_alive()
     login_timeout = int(params.get("login_timeout", 360))
-    session = kvm_test_utils.wait_for_login(vm, timeout=login_timeout)
+    session = vm.wait_for_login(timeout=login_timeout)
 
     netperf_dir = os.path.join(os.environ['AUTODIR'], "tests/netperf2")
     setup_cmd = params.get("setup_cmd")
@@ -26,20 +28,18 @@
     result_file = os.path.join(test.resultsdir, "output_%s" % test.iteration)
 
     firewall_flush = "iptables -F"
-    session.get_command_output(firewall_flush)
+    session.cmd_output(firewall_flush)
 
     for i in params.get("netperf_files").split():
-        if not vm.copy_files_to(os.path.join(netperf_dir, i), "/tmp"):
-            raise error.TestError("Could not copy file %s to guest" % i)
+        vm.copy_files_to(os.path.join(netperf_dir, i), "/tmp")
 
-    if session.get_command_status(firewall_flush):
+    try:
+        session.cmd(firewall_flush)
+    except kvm_subprocess.ShellError:
         logging.warning("Could not flush firewall rules on guest")
 
-    if session.get_command_status(setup_cmd % "/tmp", timeout=200):
-        raise error.TestFail("Fail to setup netperf on guest")
-
-    if session.get_command_status(params.get("netserver_cmd") % "/tmp"):
-        raise error.TestFail("Fail to start netperf server on guest")
+    session.cmd(setup_cmd % "/tmp", timeout=200)
+    session.cmd(params.get("netserver_cmd") % "/tmp")
 
     try:
         logging.info("Setup and run netperf client on host")
@@ -49,15 +49,18 @@
         result.write("Netperf test results\n")
 
         for i in params.get("protocols").split():
-            cmd = params.get("netperf_cmd") % (netperf_dir, i, guest_ip)
-            logging.info("Netperf: protocol %s", i)
-            try:
-                netperf_output = utils.system_output(cmd,
-                                                     retain_output=True)
-                result.write("%s\n" % netperf_output)
-            except:
-                logging.error("Test of protocol %s failed", i)
-                list_fail.append(i)
+            packet_size = params.get("packet_size", "1500")
+            for size in packet_size.split():
+                cmd = params.get("netperf_cmd") % (netperf_dir, i,
+                                                   guest_ip, size)
+                logging.info("Netperf: protocol %s", i)
+                try:
+                    netperf_output = utils.system_output(cmd,
+                                                         retain_output=True)
+                    result.write("%s\n" % netperf_output)
+                except:
+                    logging.error("Test of protocol %s failed", i)
+                    list_fail.append(i)
 
         result.close()
 
@@ -66,5 +69,5 @@
                                  ", ".join(list_fail))
 
     finally:
-        session.get_command_output("killall netserver")
+        session.cmd_output("killall netserver")
         session.close()
diff --git a/client/tests/kvm/tests/nic_bonding.py b/client/tests/kvm/tests/nic_bonding.py
new file mode 100644
index 0000000..edbf916
--- /dev/null
+++ b/client/tests/kvm/tests/nic_bonding.py
@@ -0,0 +1,55 @@
+import logging, time, threading
+from autotest_lib.client.tests.kvm.tests import file_transfer
+import kvm_utils
+
+
+def run_nic_bonding(test, params, env):
+    """
+    Nic bonding test in guest.
+
+    1) Start guest with four nic models.
+    2) Setup bond0 in guest by script nic_bonding_guest.py.
+    3) Execute file transfer test between guest and host.
+    4) Repeatedly put down/up interfaces by set_link
+    5) Execute file transfer test between guest and host.
+
+    @param test: Kvm test object.
+    @param params: Dictionary with the test parameters.
+    @param env: Dictionary with test environment.
+    """
+    def control_link_loop(vm, termination_event):
+        logging.info("Repeatedly put down/up interfaces by set_link")
+        while True:
+            for i in range(len(params.get("nics").split())):
+                linkname = "%s.%s" % (params.get("nic_model"), i)
+                cmd = "set_link %s down" % linkname
+                vm.monitor.cmd(cmd)
+                time.sleep(1)
+                cmd = "set_link %s up" % linkname
+                vm.monitor.cmd(cmd)
+            if termination_event.isSet():
+                break
+
+    timeout = int(params.get("login_timeout", 1200))
+    vm = env.get_vm(params["main_vm"])
+    vm.verify_alive()
+    session_serial = vm.wait_for_serial_login(timeout=timeout)
+    script_path = kvm_utils.get_path(test.bindir,
+                                     "scripts/nic_bonding_guest.py")
+    vm.copy_files_to(script_path, "/tmp/nic_bonding_guest.py")
+    cmd = "python /tmp/nic_bonding_guest.py %s" % vm.get_mac_address()
+    session_serial.cmd(cmd)
+
+    termination_event = threading.Event()
+    t = threading.Thread(target=control_link_loop,
+                         args=(vm, termination_event))
+    try:
+        logging.info("Do some basic test before testing high availability")
+        file_transfer.run_file_transfer(test, params, env)
+        t.start()
+        logging.info("Do file transfer testing")
+        file_transfer.run_file_transfer(test, params, env)
+    finally:
+        termination_event.set()
+        t.join(10)
+        session_serial.close()
diff --git a/client/tests/kvm/tests/nic_hotplug.py b/client/tests/kvm/tests/nic_hotplug.py
new file mode 100644
index 0000000..50a3ce9
--- /dev/null
+++ b/client/tests/kvm/tests/nic_hotplug.py
@@ -0,0 +1,144 @@
+import logging
+from autotest_lib.client.common_lib import error
+import kvm_test_utils, kvm_utils
+
+
+def run_nic_hotplug(test, params, env):
+    """
+    Test hotplug of NIC devices
+
+    1) Boot up guest with one nic
+    2) Add a host network device through monitor cmd and check if it's added
+    3) Add nic device through monitor cmd and check if it's added
+    4) Check if new interface gets ip address
+    5) Disable primary link of guest
+    6) Ping guest new ip from host
+    7) Delete nic device and netdev
+    8) Re-enable primary link of guest
+
+    @param test:   KVM test object.
+    @param params: Dictionary with the test parameters.
+    @param env:    Dictionary with test environment.
+    """
+    vm = kvm_test_utils.get_living_vm(env, params.get("main_vm"))
+    timeout = int(params.get("login_timeout", 360))
+    guest_delay = int(params.get("guest_delay", 20))
+    session_serial = kvm_test_utils.wait_for_login(vm, timeout=timeout,
+                                                   serial=True)
+
+    # Modprobe the module if specified in config file
+    module = params.get("modprobe_module")
+    if module:
+        session_serial.get_command_output("modprobe %s" % module)
+
+    def netdev_add(vm):
+        netdev_id = kvm_utils.generate_random_id()
+        attach_cmd = ("netdev_add tap,id=%s,script=%s" %
+                      (netdev_id, kvm_utils.get_path(vm.root_dir,
+                                                     params.get("nic_script"))))
+        netdev_extra_params = params.get("netdev_extra_params")
+        if netdev_extra_params:
+            attach_cmd += ",%s" % netdev_extra_params
+        logging.info("Adding netdev through %s", attach_cmd)
+        vm.monitor.cmd(attach_cmd)
+
+        network = vm.monitor.info("network")
+        if netdev_id not in network:
+            logging.error(network)
+            raise error.TestError("Fail to add netdev: %s" % netdev_id)
+        else:
+            return netdev_id
+
+    def netdev_del(vm, n_id):
+        vm.monitor.cmd("netdev_del %s" % n_id)
+
+        network = vm.monitor.info("network")
+        if n_id in network:
+            logging.error(network)
+            raise error.TestError("Fail to remove netdev %s" % n_id)
+
+    def nic_add(vm, model, netdev_id, mac):
+        """
+        Add a nic to virtual machine
+
+        @vm: VM object
+        @model: nic model
+        @netdev_id: id of netdev
+        @mac: Mac address of new nic
+        """
+        nic_id = kvm_utils.generate_random_id()
+        if model == "virtio":
+            model = "virtio-net-pci"
+        device_add_cmd = "device_add %s,netdev=%s,mac=%s,id=%s" % (model,
+                                                                   netdev_id,
+                                                                   mac, nic_id)
+        logging.info("Adding nic through %s", device_add_cmd)
+        vm.monitor.cmd(device_add_cmd)
+
+        qdev = vm.monitor.info("qtree")
+        if id not in qdev:
+            logging.error(qdev)
+            raise error.TestFail("Device %s was not plugged into qdev"
+                                 "tree" % nic_id)
+        else:
+            return nic_id
+
+    def nic_del(vm, nic_id, wait=True):
+        """
+        Remove the nic from pci tree.
+
+        @vm: VM object
+        @id: the nic id
+        @wait: Whether need to wait for the guest to unplug the device
+        """
+        nic_del_cmd = "device_del %s" % nic_id
+        vm.monitor.cmd(nic_del_cmd)
+        if wait:
+            logging.info("waiting for the guest to finish the unplug")
+            if not kvm_utils.wait_for(lambda: nic_id not in
+                                      vm.monitor.info("qtree"),
+                                      guest_delay, 5 ,1):
+                logging.error(vm.monitor.info("qtree"))
+                raise error.TestError("Device is not unplugged by "
+                                      "guest, please check whether the "
+                                      "hotplug module was loaded in guest")
+
+    logging.info("Attach a virtio nic to vm")
+    mac = kvm_utils.generate_mac_address(vm.instance, 1)
+    if not mac:
+        mac = "00:00:02:00:00:02"
+    netdev_id = netdev_add(vm)
+    device_id = nic_add(vm, "virtio", netdev_id, mac)
+
+    if "Win" not in params.get("guest_name", ""):
+        session_serial.sendline("dhclient %s &" %
+                         kvm_test_utils.get_linux_ifname(session_serial, mac))
+
+    logging.info("Shutting down the primary link")
+    vm.monitor.cmd("set_link %s down" % vm.netdev_id[0])
+
+    try:
+        logging.info("Waiting for new nic's ip address acquisition...")
+        if not kvm_utils.wait_for(lambda: (vm.address_cache.get(mac) is
+                                           not None), 10, 1):
+            raise error.TestFail("Could not get ip address of new nic")
+        ip = vm.address_cache.get(mac)
+        if not kvm_utils.verify_ip_address_ownership(ip, mac):
+            raise error.TestFail("Could not verify the ip address of new nic")
+        else:
+            logging.info("Got the ip address of new nic: %s", ip)
+
+        logging.info("Ping test the new nic ...")
+        s, o = kvm_test_utils.ping(ip, 100)
+        if s != 0:
+            logging.error(o)
+            raise error.TestFail("New nic failed ping test")
+
+        logging.info("Detaching a virtio nic from vm")
+        nic_del(vm, device_id)
+        netdev_del(vm, netdev_id)
+
+    finally:
+        vm.free_mac_address(1)
+        logging.info("Re-enabling the primary link")
+        vm.monitor.cmd("set_link %s up" % vm.netdev_id[0])
diff --git a/client/tests/kvm/tests/nic_promisc.py b/client/tests/kvm/tests/nic_promisc.py
index 99bbf8c..c6d70b6 100644
--- a/client/tests/kvm/tests/nic_promisc.py
+++ b/client/tests/kvm/tests/nic_promisc.py
@@ -1,8 +1,9 @@
-import logging
+import logging, threading
 from autotest_lib.client.common_lib import error
 from autotest_lib.client.bin import utils
 import kvm_utils, kvm_test_utils
 
+
 def run_nic_promisc(test, params, env):
     """
     Test nic driver in promisc mode:
@@ -18,24 +19,15 @@
     @param params: Dictionary with the test parameters.
     @param env: Dictionary with test environment.
     """
+    vm = env.get_vm(params["main_vm"])
+    vm.verify_alive()
     timeout = int(params.get("login_timeout", 360))
-    vm = kvm_test_utils.get_living_vm(env, params.get("main_vm"))
-    session = kvm_test_utils.wait_for_login(vm, timeout=timeout)
-
-    logging.info("Trying to log into guest '%s' by serial", vm.name)
-    session2 = kvm_utils.wait_for(lambda: vm.serial_login(),
-                                  timeout, 0, step=2)
-    if not session2:
-        raise error.TestFail("Could not log into guest '%s'" % vm.name)
+    session = vm.wait_for_login(timeout=timeout)
+    session_serial = vm.wait_for_serial_login(timeout=timeout)
 
     def compare(filename):
-        cmd = "md5sum %s" % filename
         md5_host = utils.hash_file(filename, method="md5")
-        rc_guest, md5_guest = session.get_command_status_output(cmd)
-        if rc_guest:
-            logging.debug("Could not get MD5 hash for file %s on guest,"
-                          "output: %s", filename, md5_guest)
-            return False
+        md5_guest = session.cmd("md5sum %s" % filename)
         md5_guest = md5_guest.split()[0]
         if md5_host != md5_guest:
             logging.error("MD5 hash mismatch between file %s "
@@ -46,11 +38,28 @@
         return True
 
     ethname = kvm_test_utils.get_linux_ifname(session, vm.get_mac_address(0))
-    set_promisc_cmd = ("ip link set %s promisc on; sleep 0.01;"
-                       "ip link set %s promisc off; sleep 0.01" %
-                       (ethname, ethname))
-    logging.info("Set promisc change repeatedly in guest")
-    session2.sendline("while true; do %s; done" % set_promisc_cmd)
+
+    class ThreadPromiscCmd(threading.Thread):
+        def __init__(self, session, termination_event):
+            self.session = session
+            self.termination_event = termination_event
+            super(ThreadPromiscCmd, self).__init__()
+
+
+        def run(self):
+            set_promisc_cmd = ("ip link set %s promisc on; sleep 0.01;"
+                               "ip link set %s promisc off; sleep 0.01" %
+                               (ethname, ethname))
+            while True:
+                self.session.cmd_output(set_promisc_cmd)
+                if self.termination_event.isSet():
+                    break
+
+
+    logging.info("Started thread to change promisc mode in guest")
+    termination_event = threading.Event()
+    promisc_thread = ThreadPromiscCmd(session_serial, termination_event)
+    promisc_thread.start()
 
     dd_cmd = "dd if=/dev/urandom of=%s bs=%d count=1"
     filename = "/tmp/nic_promisc_file"
@@ -58,12 +67,14 @@
     success_counter = 0
     try:
         for size in file_size:
-            logging.info("Create %s bytes file on host" % size)
+            logging.info("Create %s bytes file on host", size)
             utils.run(dd_cmd % (filename, int(size)))
 
             logging.info("Transfer file from host to guest")
-            if not vm.copy_files_to(filename, filename):
-                logging.error("File transfer failed")
+            try:
+                vm.copy_files_to(filename, filename)
+            except kvm_utils.SCPError, e:
+                logging.error("File transfer failed (%s)", e)
                 continue
             if not compare(filename):
                 logging.error("Compare file failed")
@@ -71,15 +82,14 @@
             else:
                 success_counter += 1
 
-            logging.info("Create %s bytes file on guest" % size)
-            if session.get_command_status(dd_cmd % (filename, int(size)),
-                                                    timeout=100) != 0:
-                logging.error("Create file on guest failed")
-                continue
+            logging.info("Create %s bytes file on guest", size)
+            session.cmd(dd_cmd % (filename, int(size)), timeout=100)
 
             logging.info("Transfer file from guest to host")
-            if not vm.copy_files_from(filename, filename):
-                logging.error("File transfer failed")
+            try:
+                vm.copy_files_from(filename, filename)
+            except kvm_utils.SCPError, e:
+                logging.error("File transfer failed (%s)", e)
                 continue
             if not compare(filename):
                 logging.error("Compare file failed")
@@ -90,12 +100,14 @@
             logging.info("Clean temporary files")
             cmd = "rm -f %s" % filename
             utils.run(cmd)
-            session.get_command_status(cmd)
+            session.cmd_output(cmd)
 
     finally:
+        logging.info("Stopping the promisc thread")
+        termination_event.set()
+        promisc_thread.join(10)
         logging.info("Restore the %s to the nonpromisc mode", ethname)
-        session2.close()
-        session.get_command_status("ip link set %s promisc off" % ethname)
+        session.cmd_output("ip link set %s promisc off" % ethname)
         session.close()
 
     if success_counter != 2 * len(file_size):
diff --git a/client/tests/kvm/tests/nicdriver_unload.py b/client/tests/kvm/tests/nicdriver_unload.py
index 47318ba..065c60e 100644
--- a/client/tests/kvm/tests/nicdriver_unload.py
+++ b/client/tests/kvm/tests/nicdriver_unload.py
@@ -1,7 +1,8 @@
 import logging, threading, os
 from autotest_lib.client.common_lib import error
 from autotest_lib.client.bin import utils
-import kvm_utils, kvm_test_utils
+import kvm_test_utils
+
 
 def run_nicdriver_unload(test, params, env):
     """
@@ -18,19 +19,14 @@
     @param env: Dictionary with test environment.
     """
     timeout = int(params.get("login_timeout", 360))
-    vm = kvm_test_utils.get_living_vm(env, params.get("main_vm"))
-    session = kvm_test_utils.wait_for_login(vm, timeout=timeout)
-    logging.info("Trying to log into guest '%s' by serial", vm.name)
-    session2 = kvm_utils.wait_for(lambda: vm.serial_login(),
-                                  timeout, 0, step=2)
-    if not session2:
-        raise error.TestFail("Could not log into guest '%s'" % vm.name)
+    vm = env.get_vm(params["main_vm"])
+    vm.verify_alive()
+    session = vm.wait_for_login(timeout=timeout)
+    session_serial = vm.wait_for_serial_login(timeout=timeout)
 
     ethname = kvm_test_utils.get_linux_ifname(session, vm.get_mac_address(0))
     sys_path = "/sys/class/net/%s/device/driver" % (ethname)
-    s, o = session.get_command_status_output('readlink -e %s' % sys_path)
-    if s:
-        raise error.TestError("Could not find driver name")
+    o = session.cmd("readlink -e %s" % sys_path)
     driver = os.path.basename(o.strip())
     logging.info("driver is %s", driver)
 
@@ -38,19 +34,12 @@
         def run(self):
             remote_file = '/tmp/' + self.getName()
             file_list.append(remote_file)
-            ret = vm.copy_files_to(file_name, remote_file, timeout=scp_timeout)
-            if ret:
-                logging.debug("File %s was transfered successfuly", remote_file)
-            else:
-                logging.debug("Failed to transfer file %s", remote_file)
+            vm.copy_files_to(file_name, remote_file, timeout=scp_timeout)
+            logging.debug("File %s was transfered successfuly", remote_file)
 
     def compare(origin_file, receive_file):
-        cmd = "md5sum %s"
         check_sum1 = utils.hash_file(origin_file, method="md5")
-        s, output2 = session.get_command_status_output(cmd % receive_file)
-        if s != 0:
-            logging.error("Could not get md5sum of receive_file")
-            return False
+        output2 = session.cmd("md5sum %s" % receive_file)
         check_sum2 = output2.strip().split()[0]
         logging.debug("original file md5: %s, received file md5: %s",
                       check_sum1, check_sum2)
@@ -77,9 +66,11 @@
         logging.info("Unload/load NIC driver repeatedly in guest...")
         while True:
             logging.debug("Try to unload/load nic drive once")
-            if session2.get_command_status(unload_load_cmd, timeout=120) != 0:
-                session.get_command_output("rm -rf /tmp/Thread-*")
-                raise error.TestFail("Unload/load nic driver failed")
+            try:
+                session_serial.cmd(unload_load_cmd, timeout=120)
+            except:
+                session.cmd_output("rm -rf /tmp/Thread-*")
+                raise
             pid, s = os.waitpid(pid, os.WNOHANG)
             status = os.WEXITSTATUS(s)
             if (pid, status) != (0, 0):
@@ -96,7 +87,6 @@
             t.join(timeout = scp_timeout)
         os._exit(0)
 
-    session2.close()
 
     try:
         logging.info("Check MD5 hash for received files in multi-session")
@@ -105,11 +95,10 @@
                 raise error.TestFail("Fail to compare (guest) file %s" % f)
 
         logging.info("Test nic function after load/unload")
-        if not vm.copy_files_to(file_name, file_name):
-            raise error.TestFail("Fail to copy file from host to guest")
+        vm.copy_files_to(file_name, file_name)
         if not compare(file_name, file_name):
             raise error.TestFail("Test nic function after load/unload fail")
 
     finally:
-        session.get_command_output("rm -rf /tmp/Thread-*")
+        session.cmd_output("rm -rf /tmp/Thread-*")
         session.close()
diff --git a/client/tests/kvm/tests/pci_hotplug.py b/client/tests/kvm/tests/pci_hotplug.py
index 55cf666..4bb8bfb 100644
--- a/client/tests/kvm/tests/pci_hotplug.py
+++ b/client/tests/kvm/tests/pci_hotplug.py
@@ -1,6 +1,6 @@
-import logging, os, commands, re
+import re
 from autotest_lib.client.common_lib import error
-import kvm_subprocess, kvm_test_utils, kvm_utils, kvm_vm
+import kvm_subprocess, kvm_utils, kvm_vm
 
 
 def run_pci_hotplug(test, params, env):
@@ -19,21 +19,21 @@
     @param params: Dictionary with the test parameters.
     @param env:    Dictionary with test environment.
     """
-    vm = kvm_test_utils.get_living_vm(env, params.get("main_vm"))
+    vm = env.get_vm(params["main_vm"])
+    vm.verify_alive()
     timeout = int(params.get("login_timeout", 360))
-    session = kvm_test_utils.wait_for_login(vm, timeout=timeout)
+    session = vm.wait_for_login(timeout=timeout)
 
     # Modprobe the module if specified in config file
     module = params.get("modprobe_module")
     if module:
-        if session.get_command_status("modprobe %s" % module):
-            raise error.TestError("Modprobe module '%s' failed" % module)
+        session.cmd("modprobe %s" % module)
 
     # Get output of command 'info pci' as reference
     info_pci_ref = vm.monitor.info("pci")
 
     # Get output of command as reference
-    reference = session.get_command_output(params.get("reference_cmd"))
+    reference = session.cmd_output(params.get("reference_cmd"))
 
     tested_model = params.get("pci_model")
     test_type = params.get("pci_type")
@@ -48,11 +48,24 @@
     else:
         raise error.TestError("Unknow version of qemu")
 
+    # Determine syntax of drive hotplug
+    # __com.redhat_drive_add == qemu-kvm-0.12 on RHEL 6
+    if len(re.findall("\n__com.redhat_drive_add", cmd_output)) > 0:
+        drive_cmd_type = "__com.redhat_drive_add"
+    # drive_add == qemu-kvm-0.13 onwards
+    elif len(re.findall("\ndrive_add", cmd_output)) > 0:
+        drive_cmd_type = "drive_add"
+    else:
+        raise error.TestError("Unknow version of qemu")
+
+    # Probe qemu for a list of supported devices
+    devices_support = vm.monitor.cmd("%s ?" % cmd_type)
+
     if cmd_type == "pci_add":
         if test_type == "nic":
             pci_add_cmd = "pci_add pci_addr=auto nic model=%s" % tested_model
         elif test_type == "block":
-            image_params = kvm_utils.get_sub_dict(params, "stg")
+            image_params = params.object_params("stg")
             image_filename = kvm_vm.get_image_filename(image_params,
                                                        test.bindir)
             pci_add_cmd = ("pci_add pci_addr=auto storage file=%s,if=%s" %
@@ -67,31 +80,49 @@
 
     elif cmd_type == "device_add":
         driver_id = test_type + "-" + kvm_utils.generate_random_id()
-        id = test_type + "-" + kvm_utils.generate_random_id()
+        device_id = test_type + "-" + kvm_utils.generate_random_id()
         if test_type == "nic":
             if tested_model == "virtio":
                 tested_model = "virtio-net-pci"
-            pci_add_cmd = "device_add id=%s,driver=%s" % (id, tested_model)
+            pci_add_cmd = "device_add id=%s,driver=%s" % (device_id,
+                                                          tested_model)
 
         elif test_type == "block":
-            image_params = kvm_utils.get_sub_dict(params, "stg")
+            image_params = params.object_params("stg")
             image_filename = kvm_vm.get_image_filename(image_params,
                                                        test.bindir)
+            controller_model = None
             if tested_model == "virtio":
                 tested_model = "virtio-blk-pci"
 
             if tested_model == "scsi":
                 tested_model = "scsi-disk"
+                controller_model = "lsi53c895a"
+                if len(re.findall(controller_model, devices_support)) == 0:
+                    raise error.TestError("scsi controller device (%s) not "
+                                          "supported by qemu" %
+                                          controller_model)
 
-            driver_add_cmd = (" __com.redhat_drive_add "
-                              "file=%s,format=%s,id=%s" %
-                              (image_filename, image_format, driver_id))
+            if controller_model is not None:
+                controller_id = "controller-" + device_id
+                controller_add_cmd = ("device_add %s,id=%s" %
+                                      (controller_model, controller_id))
+                vm.monitor.cmd(controller_add_cmd)
+
+            if drive_cmd_type == "drive_add":
+                driver_add_cmd = ("drive_add auto "
+                                  "file=%s,if=none,id=%s,format=%s" %
+                                  (image_filename, driver_id, image_format))
+            elif drive_cmd_type == "__com.redhat_drive_add":
+                driver_add_cmd = ("__com.redhat_drive_add "
+                                  "file=%s,format=%s,id=%s" %
+                                  (image_filename, image_format, driver_id))
+
             pci_add_cmd = ("device_add id=%s,driver=%s,drive=%s" %
-                           (id, tested_model, driver_id))
-            driver_output = vm.monitor.cmd(driver_add_cmd)
+                           (device_id, tested_model, driver_id))
+            vm.monitor.cmd(driver_add_cmd)
 
         # Check if the device is support in qemu
-        devices_support = vm.monitor.cmd("%s ?" % cmd_type)
         if len(re.findall(tested_model, devices_support)) > 0:
             add_output = vm.monitor.cmd(pci_add_cmd)
         else:
@@ -106,10 +137,14 @@
     # Define a helper function to delete the device
     def pci_del(ignore_failure=False):
         if cmd_type == "pci_add":
-            slot_id = "0" + add_output.split(",")[2].split()[1]
-            cmd = "pci_del pci_addr=%s" % slot_id
+            result_domain, bus, slot, function = add_output.split(',')
+            domain = int(result_domain.split()[2])
+            bus = int(bus.split()[1])
+            slot = int(slot.split()[1])
+            pci_addr = "%x:%x:%x" % (domain, bus, slot)
+            cmd = "pci_del pci_addr=%s" % pci_addr
         elif cmd_type == "device_add":
-            cmd = "device_del %s" % id
+            cmd = "device_del %s" % device_id
         # This should be replaced by a proper monitor method call
         vm.monitor.cmd(cmd)
 
@@ -131,7 +166,7 @@
 
         # Define a helper function to compare the output
         def new_shown():
-            o = session.get_command_output(params.get("reference_cmd"))
+            o = session.cmd_output(params.get("reference_cmd"))
             return o != reference
 
         secs = int(params.get("wait_secs_for_hook_up"))
@@ -142,7 +177,7 @@
 
         # Define a helper function to catch PCI device string
         def find_pci():
-            o = session.get_command_output(params.get("find_pci_cmd"))
+            o = session.cmd_output(params.get("find_pci_cmd"))
             return params.get("match_string") in o
 
         if not kvm_utils.wait_for(find_pci, 30, 3, 3):
@@ -152,10 +187,11 @@
                                   params.get("find_pci_cmd")))
 
         # Test the newly added device
-        s, o = session.get_command_status_output(params.get("pci_test_cmd"))
-        if s != 0:
+        try:
+            session.cmd(params.get("pci_test_cmd"))
+        except kvm_subprocess.ShellError, e:
             raise error.TestFail("Check for %s device failed after PCI "
-                                 "hotplug. Output: %r" % (test_type, o))
+                                 "hotplug. Output: %r" % (test_type, e.output))
 
         session.close()
 
diff --git a/client/tests/kvm/tests/physical_resources_check.py b/client/tests/kvm/tests/physical_resources_check.py
index 682c7b2..f9e603c 100644
--- a/client/tests/kvm/tests/physical_resources_check.py
+++ b/client/tests/kvm/tests/physical_resources_check.py
@@ -1,6 +1,6 @@
 import re, string, logging
 from autotest_lib.client.common_lib import error
-import kvm_test_utils, kvm_utils, kvm_monitor
+import kvm_monitor
 
 
 def run_physical_resources_check(test, params, env):
@@ -17,9 +17,10 @@
     @param params: Dictionary with the test parameters
     @param env: Dictionary with test environment.
     """
-    vm = kvm_test_utils.get_living_vm(env, params.get("main_vm"))
+    vm = env.get_vm(params["main_vm"])
+    vm.verify_alive()
     timeout = int(params.get("login_timeout", 360))
-    session = kvm_test_utils.wait_for_login(vm, timeout=timeout)
+    session = vm.wait_for_login(timeout=timeout)
 
     logging.info("Starting physical resources check test")
     logging.info("Values assigned to VM are the values we expect "
@@ -35,8 +36,8 @@
     if expected_cpu_nr != actual_cpu_nr:
         n_fail += 1
         logging.error("CPU count mismatch:")
-        logging.error("    Assigned to VM: %s" % expected_cpu_nr)
-        logging.error("    Reported by OS: %s" % actual_cpu_nr)
+        logging.error("    Assigned to VM: %s", expected_cpu_nr)
+        logging.error("    Reported by OS: %s", actual_cpu_nr)
 
     # Check memory size
     logging.info("Memory size check")
@@ -45,13 +46,14 @@
     if actual_mem != expected_mem:
         n_fail += 1
         logging.error("Memory size mismatch:")
-        logging.error("    Assigned to VM: %s" % expected_mem)
-        logging.error("    Reported by OS: %s" % actual_mem)
+        logging.error("    Assigned to VM: %s", expected_mem)
+        logging.error("    Reported by OS: %s", actual_mem)
 
     # Define a function for checking number of hard drivers & NICs
     def check_num(devices, info_cmd, check_str):
         f_fail = 0
-        expected_num = kvm_utils.get_sub_dict_names(params, devices).__len__()
+        expected_num = params.objects(devices).__len__()
+        o = ""
         try:
             o = vm.monitor.info(info_cmd)
         except kvm_monitor.MonitorError, e:
@@ -63,26 +65,25 @@
         if expected_num != actual_num:
             f_fail += 1
             logging.error("%s number mismatch:")
-            logging.error("    Assigned to VM: %d" % expected_num)
-            logging.error("    Reported by OS: %d" % actual_num)
+            logging.error("    Assigned to VM: %d", expected_num)
+            logging.error("    Reported by OS: %d", actual_num)
         return expected_num, f_fail
 
     logging.info("Hard drive count check")
-    drives_num, f_fail = check_num("images", "block", "type=hd")
-    n_fail += f_fail
+    n_fail += check_num("images", "block", "type=hd")[1]
 
     logging.info("NIC count check")
-    nics_num, f_fail = check_num("nics", "network", "model=")
-    n_fail += f_fail
+    n_fail += check_num("nics", "network", "model=")[1]
 
     # Define a function for checking hard drives & NICs' model
-    def chk_fmt_model(device, fmt_model, info_cmd, str):
+    def chk_fmt_model(device, fmt_model, info_cmd, regexp):
         f_fail = 0
-        devices = kvm_utils.get_sub_dict_names(params, device)
+        devices = params.objects(device)
         for chk_device in devices:
-            expected = kvm_utils.get_sub_dict(params, chk_device).get(fmt_model)
+            expected = params.object_params(chk_device).get(fmt_model)
             if not expected:
                 expected = "rtl8139"
+            o = ""
             try:
                 o = vm.monitor.info(info_cmd)
             except kvm_monitor.MonitorError, e:
@@ -91,8 +92,8 @@
                 logging.error("info/query monitor command failed (%s)",
                               info_cmd)
 
-            device_found = re.findall(str, o)
-            logging.debug("Found devices: %s" % device_found)
+            device_found = re.findall(regexp, o)
+            logging.debug("Found devices: %s", device_found)
             found = False
             for fm in device_found:
                 if expected in fm:
@@ -101,8 +102,8 @@
             if not found:
                 f_fail += 1
                 logging.error("%s model mismatch:")
-                logging.error("    Assigned to VM: %s" % expected)
-                logging.error("    Reported by OS: %s" % device_found)
+                logging.error("    Assigned to VM: %s", expected)
+                logging.error("    Reported by OS: %s", device_found)
         return f_fail
 
     logging.info("NICs model check")
@@ -114,6 +115,7 @@
     n_fail += f_fail
 
     logging.info("Network card MAC check")
+    o = ""
     try:
         o = vm.monitor.info("network")
     except kvm_monitor.MonitorError, e:
@@ -121,26 +123,26 @@
         logging.error(e)
         logging.error("info/query monitor command failed (network)")
     found_mac_addresses = re.findall("macaddr=(\S+)", o)
-    logging.debug("Found MAC adresses: %s" % found_mac_addresses)
+    logging.debug("Found MAC adresses: %s", found_mac_addresses)
 
-    num_nics = len(kvm_utils.get_sub_dict_names(params, "nics"))
+    num_nics = len(params.objects("nics"))
     for nic_index in range(num_nics):
         mac = vm.get_mac_address(nic_index)
         if not string.lower(mac) in found_mac_addresses:
             n_fail += 1
             logging.error("MAC address mismatch:")
-            logging.error("    Assigned to VM (not found): %s" % mac)
+            logging.error("    Assigned to VM (not found): %s", mac)
 
     # Define a function to verify UUID & Serial number
     def verify_device(expect, name, verify_cmd):
         f_fail = 0
         if verify_cmd:
-            actual = session.get_command_output(verify_cmd)
+            actual = session.cmd_output(verify_cmd)
             if not string.upper(expect) in actual:
                 f_fail += 1
                 logging.error("%s mismatch:")
-                logging.error("    Assigned to VM: %s" % string.upper(expect))
-                logging.error("    Reported by OS: %s" % actual)
+                logging.error("    Assigned to VM: %s", string.upper(expect))
+                logging.error("    Reported by OS: %s", actual)
         return f_fail
 
     logging.info("UUID check")
diff --git a/client/tests/kvm/tests/ping.py b/client/tests/kvm/tests/ping.py
index 9b2308f..8dc4b9e 100644
--- a/client/tests/kvm/tests/ping.py
+++ b/client/tests/kvm/tests/ping.py
@@ -18,9 +18,9 @@
     @param params: Dictionary with the test parameters.
     @param env: Dictionary with test environment.
     """
-
-    vm = kvm_test_utils.get_living_vm(env, params.get("main_vm"))
-    session = kvm_test_utils.wait_for_login(vm)
+    vm = env.get_vm(params["main_vm"])
+    vm.verify_alive()
+    session = vm.wait_for_login(timeout=int(params.get("login_timeout", 360)))
 
     counts = params.get("ping_counts", 100)
     flood_minutes = float(params.get("flood_minutes", 10))
@@ -34,7 +34,8 @@
         for i, nic in enumerate(nics):
             ip = vm.get_address(i)
             if not ip:
-                logging.error("Could not get the ip of nic index %d", i)
+                logging.error("Could not get the ip of nic index %d: %s",
+                              i, nic)
                 continue
 
             for size in packet_size:
diff --git a/client/tests/kvm/tests/pxe.py b/client/tests/kvm/tests/pxe.py
index ec9a549..7c294c1 100644
--- a/client/tests/kvm/tests/pxe.py
+++ b/client/tests/kvm/tests/pxe.py
@@ -1,6 +1,6 @@
 import logging
 from autotest_lib.client.common_lib import error
-import kvm_subprocess, kvm_test_utils
+import kvm_subprocess
 
 
 def run_pxe(test, params, env):
@@ -15,14 +15,13 @@
     @param params: Dictionary with the test parameters.
     @param env: Dictionary with test environment.
     """
-    vm = kvm_test_utils.get_living_vm(env, params.get("main_vm"))
+    vm = env.get_vm(params["main_vm"])
+    vm.verify_alive()
     timeout = int(params.get("pxe_timeout", 60))
 
     logging.info("Try to boot from PXE")
-    status, output = kvm_subprocess.run_fg("tcpdump -nli %s" % vm.get_ifname(),
-                                           logging.debug,
-                                           "(pxe capture) ",
-                                           timeout)
+    output = kvm_subprocess.run_fg("tcpdump -nli %s" % vm.get_ifname(),
+                                   logging.debug, "(pxe capture) ", timeout)[1]
 
     logging.info("Analyzing the tcpdump result...")
     if not "tftp" in output:
diff --git a/client/tests/kvm/tests/qemu_img.py b/client/tests/kvm/tests/qemu_img.py
index d3f7ff1..c3449f4 100644
--- a/client/tests/kvm/tests/qemu_img.py
+++ b/client/tests/kvm/tests/qemu_img.py
@@ -1,6 +1,6 @@
 import re, os, logging, commands
 from autotest_lib.client.common_lib import utils, error
-import kvm_vm, kvm_utils
+import kvm_vm, kvm_utils, kvm_preprocessing
 
 
 def run_qemu_img(test, params, env):
@@ -162,13 +162,13 @@
             os.remove(output_filename)
 
 
-    def _info(cmd, img, string=None, fmt=None):
+    def _info(cmd, img, sub_info=None, fmt=None):
         """
         Simple wrapper of 'qemu-img info'.
 
         @param cmd: qemu-img base command.
         @param img: image file
-        @param string: sub info, say 'backing file'
+        @param sub_info: sub info, say 'backing file'
         @param fmt: image format
         """
         cmd += " info"
@@ -182,11 +182,11 @@
             logging.error("Get info of image '%s' failed: %s", img, str(e))
             return None
 
-        if not string:
+        if not sub_info:
             return output
 
-        string += ": (.*)"
-        matches = re.findall(string, output)
+        sub_info += ": (.*)"
+        matches = re.findall(sub_info, output)
         if matches:
             return matches[0]
         return None
@@ -223,7 +223,7 @@
             if s != 0:
                 raise error.TestFail("Create snapshot failed via command: %s;"
                                      "Output is: %s" % (crtcmd, o))
-            logging.info("Created snapshot '%s' in '%s'" % (sn_name,image_name))
+            logging.info("Created snapshot '%s' in '%s'", sn_name, image_name)
         listcmd = cmd
         listcmd += " -l %s" % image_name
         s, o = commands.getstatusoutput(listcmd)
@@ -243,10 +243,124 @@
     def commit_test(cmd):
         """
         Subcommand 'qemu-img commit' test.
+        1) Create a backing file of the qemu harddisk specified by image_name.
+        2) Start a VM using the backing file as its harddisk.
+        3) Touch a file "commit_testfile" in the backing_file, and shutdown the
+           VM.
+        4) Make sure touching the file does not affect the original harddisk.
+        5) Commit the change to the original harddisk by executing
+           "qemu-img commit" command.
+        6) Start the VM using the original harddisk.
+        7) Check if the file "commit_testfile" exists.
 
         @param cmd: qemu-img base command.
         """
-        pass
+        cmd += " commit"
+
+        logging.info("Commit testing started!")
+        image_name = params.get("image_name", "image")
+        image_format = params.get("image_format", "qcow2")
+        backing_file_name = "%s_bak" % (image_name)
+
+        try:
+            # Remove the existing backing file
+            backing_file = "%s.%s" % (backing_file_name, image_format)
+            if os.path.isfile(backing_file):
+                os.remove(backing_file)
+
+            # Create the new backing file
+            create_cmd = "qemu-img create -b %s.%s -f %s %s.%s" % (image_name,
+                                                                  image_format,
+                                                                  image_format,
+                                                             backing_file_name,
+                                                                  image_format)
+            try:
+                utils.system(create_cmd)
+            except error.CmdError, e:
+                raise error.TestFail("Could not create a backing file!")
+            logging.info("backing_file created!")
+
+            # Set the qemu harddisk to the backing file
+            logging.info("Original image_name is: %s", params.get('image_name'))
+            params['image_name'] = backing_file_name
+            logging.info("Param image_name changed to: %s",
+                         params.get('image_name'))
+
+            # Start a new VM, using backing file as its harddisk
+            vm_name = params.get('main_vm')
+            kvm_preprocessing.preprocess_vm(test, params, env, vm_name)
+            vm = env.get_vm(vm_name)
+            vm.create()
+            timeout = int(params.get("login_timeout", 360))
+            session = vm.wait_for_login(timeout=timeout)
+
+            # Do some changes to the backing_file harddisk
+            try:
+                output = session.cmd("touch /commit_testfile")
+                logging.info("Output of touch /commit_testfile: %s", output)
+                output = session.cmd("ls / | grep commit_testfile")
+                logging.info("Output of ls / | grep commit_testfile: %s",
+                             output)
+            except Exception, e:
+                raise error.TestFail("Could not create commit_testfile in the "
+                                     "backing file %s", e)
+            vm.destroy()
+
+            # Make sure there is no effect on the original harddisk
+            # First, set the harddisk back to the original one
+            logging.info("Current image_name is: %s", params.get('image_name'))
+            params['image_name'] = image_name
+            logging.info("Param image_name reverted to: %s",
+                         params.get('image_name'))
+
+            # Second, Start a new VM, using image_name as its harddisk
+            # Here, the commit_testfile should not exist
+            vm_name = params.get('main_vm')
+            kvm_preprocessing.preprocess_vm(test, params, env, vm_name)
+            vm = env.get_vm(vm_name)
+            vm.create()
+            timeout = int(params.get("login_timeout", 360))
+            session = vm.wait_for_login(timeout=timeout)
+            try:
+                output = session.cmd("[ ! -e /commit_testfile ] && echo $?")
+                logging.info("Output of [ ! -e /commit_testfile ] && echo $?: "
+                             "%s", output)
+            except:
+                output = session.cmd("rm -f /commit_testfile")
+                raise error.TestFail("The commit_testfile exists on the "
+                                     "original file")
+            vm.destroy()
+
+            # Excecute the commit command
+            logging.info("Commiting image")
+            cmitcmd = "%s -f %s %s.%s" % (cmd, image_format, backing_file_name,
+                                          image_format)
+            try:
+                utils.system(cmitcmd)
+            except error.CmdError, e:
+                raise error.TestFail("Could not commit the backing file")
+
+            # Start a new VM, using image_name as its harddisk
+            vm_name = params.get('main_vm')
+            kvm_preprocessing.preprocess_vm(test, params, env, vm_name)
+            vm = env.get_vm(vm_name)
+            vm.create()
+            timeout = int(params.get("login_timeout", 360))
+            session = vm.wait_for_login(timeout=timeout)
+            try:
+                output = session.cmd("[ -e /commit_testfile ] && echo $?")
+                logging.info("Output of [ -e /commit_testfile ] && echo $?: %s",
+                             output)
+                session.cmd("rm -f /commit_testfile")
+            except:
+                raise error.TestFail("Could not find commit_testfile after a "
+                                     "commit")
+            vm.destroy()
+
+        finally:
+            # Remove the backing file
+            if os.path.isfile(backing_file):
+                os.remove(backing_file)
 
 
     def _rebase(cmd, img_name, base_img, backing_fmt, mode="unsafe"):
@@ -263,7 +377,7 @@
         if mode == "unsafe":
             cmd += " -u"
         cmd += " -b %s -F %s %s" % (base_img, backing_fmt, img_name)
-        logging.info("Trying to rebase '%s' to '%s'..." % (img_name, base_img))
+        logging.info("Trying to rebase '%s' to '%s'...", img_name, base_img)
         s, o = commands.getstatusoutput(cmd)
         if s != 0:
             raise error.TestError("Failed to rebase '%s' to '%s': %s" %
diff --git a/client/tests/kvm/tests/qmp_basic.py b/client/tests/kvm/tests/qmp_basic.py
index 985ad15..9328c61 100644
--- a/client/tests/kvm/tests/qmp_basic.py
+++ b/client/tests/kvm/tests/qmp_basic.py
@@ -1,5 +1,6 @@
-import kvm_test_utils
 from autotest_lib.client.common_lib import error
+import kvm_test_utils, kvm_monitor
+
 
 def run_qmp_basic(test, params, env):
     """
@@ -197,24 +198,24 @@
         Check that QMP's "id" key is correctly handled.
         """
         # The "id" key must be echoed back in error responses
-        id = "kvm-autotest"
-        resp = monitor.cmd_qmp("eject", { "foobar": True }, id=id)
+        id_key = "kvm-autotest"
+        resp = monitor.cmd_qmp("eject", { "foobar": True }, id=id_key)
         check_error_resp(resp)
-        check_str_key(resp, "id", id)
+        check_str_key(resp, "id", id_key)
 
         # The "id" key must be echoed back in success responses
-        resp = monitor.cmd_qmp("query-status", id=id)
+        resp = monitor.cmd_qmp("query-status", id=id_key)
         check_success_resp(resp)
-        check_str_key(resp, "id", id)
+        check_str_key(resp, "id", id_key)
 
         # The "id" key can be any json-object
-        for id in [ True, 1234, "string again!", [1, [], {}, True, "foo"],
+        for id_key in [ True, 1234, "string again!", [1, [], {}, True, "foo"],
                     { "key": {} } ]:
-            resp = monitor.cmd_qmp("query-status", id=id)
+            resp = monitor.cmd_qmp("query-status", id=id_key)
             check_success_resp(resp)
-            if resp["id"] != id:
+            if resp["id"] != id_key:
                 raise error.TestFail("expected id '%s' but got '%s'" %
-                                     (str(id), str(resp["id"])))
+                                     (str(id_key), str(resp["id"])))
 
 
     def test_invalid_arg_key(monitor):
@@ -366,7 +367,8 @@
         # is to skip its checking and pass arguments through. Check this
         # works by providing invalid options to device_add and expecting
         # an error message from qdev
-        resp = monitor.cmd_qmp("device_add", { "driver": "e1000","foo": "bar" })
+        resp = monitor.cmd_qmp("device_add", { "driver": "e1000",
+                                              "foo": "bar" })
         check_error_resp(resp, "PropertyNotFound",
                                {"device": "e1000", "property": "foo"})
 
@@ -381,15 +383,25 @@
             check_error_resp(resp, "CommandNotFound", { "name": cmd })
 
 
-    vm = kvm_test_utils.get_living_vm(env, params.get("main_vm"))
+    vm = env.get_vm(params["main_vm"])
+    vm.verify_alive()
+
+    # Look for the first qmp monitor available, otherwise, fail the test
+    qmp_monitor = None
+    for m in vm.monitors:
+        if isinstance(m, kvm_monitor.QMPMonitor):
+            qmp_monitor = m
+
+    if qmp_monitor is None:
+        raise error.TestError('Could not find a QMP monitor, aborting test')
 
     # Run all suites
-    greeting_suite(vm.monitor)
-    input_object_suite(vm.monitor)
-    argument_checker_suite(vm.monitor)
-    unknown_commands_suite(vm.monitor)
-    json_parsing_errors_suite(vm.monitor)
+    greeting_suite(qmp_monitor)
+    input_object_suite(qmp_monitor)
+    argument_checker_suite(qmp_monitor)
+    unknown_commands_suite(qmp_monitor)
+    json_parsing_errors_suite(qmp_monitor)
 
     # check if QMP is still alive
-    if not vm.monitor.is_responsive():
-        raise error.TestFail('QEMU is not alive after QMP testing')
+    if not qmp_monitor.is_responsive():
+        raise error.TestFail('QMP monitor is not responsive after testing')
diff --git a/client/tests/kvm/tests/qmp_basic_rhel6.py b/client/tests/kvm/tests/qmp_basic_rhel6.py
new file mode 100644
index 0000000..24298b8
--- /dev/null
+++ b/client/tests/kvm/tests/qmp_basic_rhel6.py
@@ -0,0 +1,389 @@
+import logging
+from autotest_lib.client.common_lib import error
+import kvm_monitor
+
+
+def run_qmp_basic_rhel6(test, params, env):
+    """
+    QMP Specification test-suite: this checks if the *basic* protocol conforms
+    to its specification, which is file QMP/qmp-spec.txt in QEMU's source tree.
+
+    IMPORTANT NOTES:
+
+        o Most tests depend heavily on QMP's error information (eg. classes),
+          this might have bad implications as the error interface is going to
+          change in QMP
+
+        o Command testing is *not* covered in this suite. Each command has its
+          own specification and should be tested separately
+
+        o We use the same terminology as used by the QMP specification,
+          specially with regard to JSON types (eg. a Python dict is called
+          a json-object)
+
+        o This is divided in sub test-suites, please check the bottom of this
+          file to check the order in which they are run
+
+    TODO:
+
+        o Finding which test failed is not as easy as it should be
+
+        o Are all those check_*() functions really needed? Wouldn't a
+          specialized class (eg. a Response class) do better?
+    """
+    def fail_no_key(qmp_dict, key):
+        if not isinstance(qmp_dict, dict):
+            raise error.TestFail("qmp_dict is not a dict (it's '%s')" %
+                                 type(qmp_dict))
+        if not key in qmp_dict:
+            raise error.TestFail("'%s' key doesn't exist in dict ('%s')" %
+                                 (key, str(qmp_dict)))
+
+
+    def check_dict_key(qmp_dict, key, keytype):
+        """
+        Performs the following checks on a QMP dict key:
+
+        1. qmp_dict is a dict
+        2. key exists in qmp_dict
+        3. key is of type keytype
+
+        If any of these checks fails, error.TestFail is raised.
+        """
+        fail_no_key(qmp_dict, key)
+        if not isinstance(qmp_dict[key], keytype):
+            raise error.TestFail("'%s' key is not of type '%s', it's '%s'" %
+                                 (key, keytype, type(qmp_dict[key])))
+
+
+    def check_key_is_dict(qmp_dict, key):
+        check_dict_key(qmp_dict, key, dict)
+
+
+    def check_key_is_list(qmp_dict, key):
+        check_dict_key(qmp_dict, key, list)
+
+
+    def check_key_is_str(qmp_dict, key):
+        check_dict_key(qmp_dict, key, unicode)
+
+
+    def check_str_key(qmp_dict, keyname, value=None):
+        check_dict_key(qmp_dict, keyname, unicode)
+        if value and value != qmp_dict[keyname]:
+            raise error.TestFail("'%s' key value '%s' should be '%s'" %
+                                 (keyname, str(qmp_dict[keyname]), str(value)))
+
+
+    def check_key_is_int(qmp_dict, key):
+        fail_no_key(qmp_dict, key)
+        try:
+            int(qmp_dict[key])
+        except:
+            raise error.TestFail("'%s' key is not of type int, it's '%s'" %
+                                 (key, type(qmp_dict[key])))
+
+
+    def check_bool_key(qmp_dict, keyname, value=None):
+        check_dict_key(qmp_dict, keyname, bool)
+        if value and value != qmp_dict[keyname]:
+            raise error.TestFail("'%s' key value '%s' should be '%s'" %
+                                 (keyname, str(qmp_dict[keyname]), str(value)))
+
+
+    def check_success_resp(resp, empty=False):
+        """
+        Check QMP OK response.
+
+        @param resp: QMP response
+        @param empty: if True, response should not contain data to return
+        """
+        check_key_is_dict(resp, "return")
+        if empty and len(resp["return"]) > 0:
+            raise error.TestFail("success response is not empty ('%s')" %
+                                 str(resp))
+
+
+    def check_error_resp(resp, classname=None, datadict=None):
+        """
+        Check QMP error response.
+
+        @param resp: QMP response
+        @param classname: Expected error class name
+        @param datadict: Expected error data dictionary
+        """
+        logging.debug("resp %s", str(resp))
+        check_key_is_dict(resp, "error")
+        check_key_is_str(resp["error"], "class")
+        if classname and resp["error"]["class"] != classname:
+            raise error.TestFail("got error class '%s' expected '%s'" %
+                                 (resp["error"]["class"], classname))
+        check_key_is_dict(resp["error"], "data")
+        if datadict and resp["error"]["data"] != datadict:
+            raise error.TestFail("got data dict '%s' expected '%s'" %
+                                 (resp["error"]["data"], datadict))
+
+
+    def test_version(version):
+        """
+        Check the QMP greeting message version key which, according to QMP's
+        documentation, should be:
+
+        { "qemu": { "major": json-int, "minor": json-int, "micro": json-int }
+          "package": json-string }
+        """
+        check_key_is_str(version, "qemu")
+        check_key_is_str(version, "package")
+
+
+    def test_greeting(greeting):
+        check_key_is_dict(greeting, "QMP")
+        check_key_is_dict(greeting["QMP"], "version")
+        check_key_is_list(greeting["QMP"], "capabilities")
+
+
+    def greeting_suite(monitor):
+        """
+        Check the greeting message format, as described in the QMP
+        specfication section '2.2 Server Greeting'.
+
+        { "QMP": { "version": json-object, "capabilities": json-array } }
+        """
+        greeting = monitor.get_greeting()
+        test_greeting(greeting)
+        test_version(greeting["QMP"]["version"])
+
+
+    def json_parsing_errors_suite(monitor):
+        """
+        Check that QMP's parser is able to recover from parsing errors, please
+        check the JSON spec for more info on the JSON syntax (RFC 4627).
+        """
+        # We're quite simple right now and the focus is on parsing errors that
+        # have already biten us in the past.
+        #
+        # TODO: The following test-cases are missing:
+        #
+        #   - JSON numbers, strings and arrays
+        #   - More invalid characters or malformed structures
+        #   - Valid, but not obvious syntax, like zillion of spaces or
+        #     strings with unicode chars (different suite maybe?)
+        bad_json = []
+
+        # A JSON value MUST be an object, array, number, string, true, false,
+        # or null
+        #
+        # NOTE: QMP seems to ignore a number of chars, like: | and ?
+        bad_json.append(":")
+        bad_json.append(",")
+
+        # Malformed json-objects
+        #
+        # NOTE: sending only "}" seems to break QMP
+        # NOTE: Duplicate keys are accepted (should it?)
+        bad_json.append("{ \"execute\" }")
+        bad_json.append("{ \"execute\": \"query-version\", }")
+        bad_json.append("{ 1: \"query-version\" }")
+        bad_json.append("{ true: \"query-version\" }")
+        bad_json.append("{ []: \"query-version\" }")
+        bad_json.append("{ {}: \"query-version\" }")
+
+        for cmd in bad_json:
+            resp = monitor.cmd_raw(cmd)
+            check_error_resp(resp, "JSONParsing")
+
+
+    def test_id_key(monitor):
+        """
+        Check that QMP's "id" key is correctly handled.
+        """
+        # The "id" key must be echoed back in error responses
+        id_key = "kvm-autotest"
+        resp = monitor.cmd_qmp("eject", { "foobar": True }, id=id_key)
+        check_error_resp(resp)
+        check_str_key(resp, "id", id_key)
+
+        # The "id" key must be echoed back in success responses
+        resp = monitor.cmd_qmp("query-status", id=id_key)
+        check_success_resp(resp)
+        check_str_key(resp, "id", id_key)
+
+        # The "id" key can be any json-object
+        for id_key in [ True, 1234, "string again!", [1, [], {}, True, "foo"],
+                    { "key": {} } ]:
+            resp = monitor.cmd_qmp("query-status", id=id_key)
+            check_success_resp(resp)
+            if resp["id"] != id_key:
+                raise error.TestFail("expected id '%s' but got '%s'" %
+                                     (str(id_key), str(resp["id"])))
+
+
+    def test_invalid_arg_key(monitor):
+        """
+        Currently, the only supported keys in the input object are: "execute",
+        "arguments" and "id". Although expansion is supported, invalid key
+        names must be detected.
+        """
+        resp = monitor.cmd_obj({ "execute": "eject", "foobar": True })
+        expected_error = "MissingParameter"
+        data_dict = {"name": "device"}
+        check_error_resp(resp, expected_error, data_dict)
+
+
+    def test_bad_arguments_key_type(monitor):
+        """
+        The "arguments" key must be an json-object.
+
+        We use the eject command to perform the tests, but that's a random
+        choice, any command that accepts arguments will do, as the command
+        doesn't get called.
+        """
+        for item in [ True, [], 1, "foo" ]:
+            resp = monitor.cmd_obj({ "execute": "eject", "arguments": item })
+            check_error_resp(resp, "QMPBadInputObjectMember",
+                             { "member": "arguments", "expected": "object" })
+
+
+    def test_bad_execute_key_type(monitor):
+        """
+        The "execute" key must be a json-string.
+        """
+        for item in [ False, 1, {}, [] ]:
+            resp = monitor.cmd_obj({ "execute": item })
+            check_error_resp(resp, "QMPBadInputObjectMember",
+                             { "member": "execute", "expected": "string" })
+
+
+    def test_no_execute_key(monitor):
+        """
+        The "execute" key must exist, we also test for some stupid parsing
+        errors.
+        """
+        for cmd in [ {}, { "execut": "qmp_capabilities" },
+                     { "executee": "qmp_capabilities" }, { "foo": "bar" }]:
+            resp = monitor.cmd_obj(cmd)
+            check_error_resp(resp) # XXX: check class and data dict?
+
+
+    def test_bad_input_obj_type(monitor):
+        """
+        The input object must be... an json-object.
+        """
+        for cmd in [ "foo", [], True, 1 ]:
+            resp = monitor.cmd_obj(cmd)
+            check_error_resp(resp, "QMPBadInputObject", { "expected":"object" })
+
+
+    def test_good_input_obj(monitor):
+        """
+        Basic success tests for issuing QMP commands.
+        """
+        # NOTE: We don't use the cmd_qmp() method here because the command
+        # object is in a 'random' order
+        resp = monitor.cmd_obj({ "execute": "query-version" })
+        check_success_resp(resp)
+
+        resp = monitor.cmd_obj({ "arguments": {}, "execute": "query-version" })
+        check_success_resp(resp)
+
+        id_key = "1234foo"
+        resp = monitor.cmd_obj({ "id": id_key, "execute": "query-version",
+                                 "arguments": {} })
+        check_success_resp(resp)
+        check_str_key(resp, "id", id_key)
+
+        # TODO: would be good to test simple argument usage, but we don't have
+        # a read-only command that accepts arguments.
+
+
+    def input_object_suite(monitor):
+        """
+        Check the input object format, as described in the QMP specfication
+        section '2.3 Issuing Commands'.
+
+        { "execute": json-string, "arguments": json-object, "id": json-value }
+        """
+        test_good_input_obj(monitor)
+        test_bad_input_obj_type(monitor)
+        test_no_execute_key(monitor)
+        test_bad_execute_key_type(monitor)
+        test_bad_arguments_key_type(monitor)
+        test_id_key(monitor)
+        test_invalid_arg_key(monitor)
+
+
+    def argument_checker_suite(monitor):
+        """
+        Check that QMP's argument checker is detecting all possible errors.
+
+        We use a number of different commands to perform the checks, but the
+        command used doesn't matter much as QMP performs argument checking
+        _before_ calling the command.
+        """
+        # qmp in RHEL6 is different from 0.13.*:
+        # 1. 'stop' command just return {} evenif stop have arguments.
+        # 2. there is no 'screendump' command.
+        # 3. argument isn't checked in 'device' command.
+        # so skip these tests in RHEL6.
+
+        # test optional argument: 'force' is omitted, but it's optional, so
+        # the handler has to be called. Test this happens by checking an
+        # error that is generated by the handler itself.
+        resp = monitor.cmd_qmp("eject", { "device": "foobar" })
+        check_error_resp(resp, "DeviceNotFound")
+
+        # val argument must be a json-int
+        for arg in [ {}, [], True, "foo" ]:
+            resp = monitor.cmd_qmp("memsave", { "val": arg, "filename": "foo",
+                                                "size": 10 })
+            check_error_resp(resp, "InvalidParameterType",
+                             { "name": "val", "expected": "int" })
+
+        # value argument must be a json-number
+        for arg in [ {}, [], True, "foo" ]:
+            resp = monitor.cmd_qmp("migrate_set_speed", { "value": arg })
+            check_error_resp(resp, "InvalidParameterType",
+                             { "name": "value", "expected": "number" })
+
+        # qdev-type commands have their own argument checker, all QMP does
+        # is to skip its checking and pass arguments through. Check this
+        # works by providing invalid options to device_add and expecting
+        # an error message from qdev
+        resp = monitor.cmd_qmp("device_add", {"driver": "e1000",
+                                              "foo": "bar" })
+        check_error_resp(resp, "PropertyNotFound",
+                               {"device": "e1000", "property": "foo"})
+
+
+    def unknown_commands_suite(monitor):
+        """
+        Check that QMP handles unknown commands correctly.
+        """
+        # We also call a HMP-only command, to be sure it will fail as expected
+        for cmd in [ "bar", "query-", "query-foo", "q", "help" ]:
+            resp = monitor.cmd_qmp(cmd)
+            check_error_resp(resp, "CommandNotFound", { "name": cmd })
+
+
+    vm = env.get_vm(params["main_vm"])
+    vm.verify_alive()
+
+    # Look for the first qmp monitor available, otherwise, fail the test
+    qmp_monitor = None
+    for m in vm.monitors:
+        if isinstance(m, kvm_monitor.QMPMonitor):
+            qmp_monitor = m
+
+    if qmp_monitor is None:
+        raise error.TestError('Could not find a QMP monitor, aborting test')
+
+    # Run all suites
+    greeting_suite(qmp_monitor)
+    input_object_suite(qmp_monitor)
+    argument_checker_suite(qmp_monitor)
+    unknown_commands_suite(qmp_monitor)
+    json_parsing_errors_suite(qmp_monitor)
+
+    # check if QMP is still alive
+    if not qmp_monitor.is_responsive():
+        raise error.TestFail('QMP monitor is not responsive after testing')
diff --git a/client/tests/kvm/tests/set_link.py b/client/tests/kvm/tests/set_link.py
new file mode 100644
index 0000000..d73a1b8
--- /dev/null
+++ b/client/tests/kvm/tests/set_link.py
@@ -0,0 +1,60 @@
+import logging
+from autotest_lib.client.common_lib import error
+from autotest_lib.client.tests.kvm.tests import file_transfer
+import kvm_test_utils
+
+
+def run_set_link(test, params, env):
+    """
+    KVM guest link test:
+    1) Boot up guest with one nic
+    2) Ping guest from host
+    3) Disable guest link and ping guest from host
+    4) Re-enable guest link and ping guest from host
+    5) Do file transfer test
+
+    @param test: kvm test object
+    @param params: Dictionary with the test parameters
+    @param env: Dictionary with test environment.
+    """
+    vm = kvm_test_utils.get_living_vm(env, params.get("main_vm"))
+    timeout = float(params.get("login_timeout", 360))
+    session = kvm_test_utils.wait_for_login(vm, 0, timeout, 0, 2)
+
+    ip = vm.get_address(0)
+    linkname = vm.netdev_id[0]
+
+    logging.info("Pinging guest from host")
+    s, o = kvm_test_utils.ping(ip, count=10, timeout=20)
+    if s != 0:
+        raise error.TestFail("Ping failed, status: %s, output: %s" % (s, o))
+    ratio = kvm_test_utils.get_loss_ratio(o)
+    if ratio != 0:
+        raise error.TestFail("Loss ratio is %s, output: %s" % (ratio, o))
+
+    logging.info("Executing 'set link %s off'", linkname)
+    vm.monitor.cmd("set_link %s off" % linkname)
+    logging.info(vm.monitor.info("network"))
+    logging.info("Pinging guest from host")
+    s, o = kvm_test_utils.ping(ip, count=10, timeout=20)
+    if s == 0:
+        raise error.TestFail("Ping unexpectedly succeeded, status: %s,"
+                             "output: %s" % (s, o))
+    ratio = kvm_test_utils.get_loss_ratio(o)
+    if ratio != 100:
+        raise error.TestFail("Loss ratio is not 100%%,"
+                             "Loss ratio is %s" % ratio)
+
+    logging.info("Executing 'set link %s on'", linkname)
+    vm.monitor.cmd("set_link %s on" % linkname)
+    logging.info(vm.monitor.info("network"))
+    logging.info("Pinging guest from host")
+    s, o = kvm_test_utils.ping(ip, count=10, timeout=20)
+    if s != 0:
+        raise error.TestFail("Ping failed, status: %s, output: %s" % (s, o))
+    ratio = kvm_test_utils.get_loss_ratio(o)
+    if ratio != 0:
+        raise error.TestFail("Loss ratio is %s, output: %s" % (ratio, o))
+
+    file_transfer.run_file_transfer(test, params, env)
+    session.close()
diff --git a/client/tests/kvm/tests/shutdown.py b/client/tests/kvm/tests/shutdown.py
index bfc5477..fc0407f 100644
--- a/client/tests/kvm/tests/shutdown.py
+++ b/client/tests/kvm/tests/shutdown.py
@@ -15,9 +15,10 @@
     @param params: Dictionary with the test parameters
     @param env: Dictionary with test environment
     """
-    vm = kvm_test_utils.get_living_vm(env, params.get("main_vm"))
+    vm = env.get_vm(params["main_vm"])
+    vm.verify_alive()
     timeout = int(params.get("login_timeout", 360))
-    session = kvm_test_utils.wait_for_login(vm, timeout=timeout)
+    session = vm.wait_for_login(timeout=timeout)
 
     try:
         if params.get("shutdown_method") == "shell":
diff --git a/client/tests/kvm/tests/stepmaker.py b/client/tests/kvm/tests/stepmaker.py
index ee0ed92..5a9acdc 100755
--- a/client/tests/kvm/tests/stepmaker.py
+++ b/client/tests/kvm/tests/stepmaker.py
@@ -7,10 +7,10 @@
 @version: "20090401"
 """
 
-import pygtk, gtk, gobject, time, os, commands
+import pygtk, gtk, gobject, time, os, commands, logging
 import common
 from autotest_lib.client.common_lib import error
-import kvm_utils, logging, ppm_utils, stepeditor, kvm_monitor
+import kvm_utils, ppm_utils, stepeditor, kvm_monitor
 pygtk.require('2.0')
 
 
@@ -337,7 +337,7 @@
 
 
 def run_stepmaker(test, params, env):
-    vm = kvm_utils.env_get_vm(env, params.get("main_vm"))
+    vm = env.get_vm(params.get("main_vm"))
     if not vm:
         raise error.TestError("VM object not found in environment")
     if not vm.is_alive():
diff --git a/client/tests/kvm/tests/steps.py b/client/tests/kvm/tests/steps.py
index 6f782f5..91b864d 100644
--- a/client/tests/kvm/tests/steps.py
+++ b/client/tests/kvm/tests/steps.py
@@ -4,9 +4,9 @@
 @copyright: Red Hat 2008-2009
 """
 
-import os, time, re, shutil, logging
-from autotest_lib.client.common_lib import utils, error
-import kvm_utils, ppm_utils, kvm_subprocess, kvm_monitor
+import os, time, shutil, logging
+from autotest_lib.client.common_lib import error
+import kvm_utils, ppm_utils, kvm_monitor
 try:
     import PIL.Image
 except ImportError:
@@ -97,7 +97,7 @@
         # Make sure image is valid
         if not ppm_utils.image_verify_ppm_file(scrdump_filename):
             logging.warn("Got invalid screendump: dimensions: %dx%d, "
-                         "data size: %d" % (w, h, len(data)))
+                         "data size: %d", w, h, len(data))
             continue
 
         # Compute md5sum of whole image
@@ -181,7 +181,7 @@
 
 
 def run_steps(test, params, env):
-    vm = kvm_utils.env_get_vm(env, params.get("main_vm"))
+    vm = env.get_vm(params.get("main_vm"))
     if not vm:
         raise error.TestError("VM object not found in environment")
     if not vm.is_alive():
@@ -231,7 +231,7 @@
             vm.send_key(words[1])
         elif words[0] == "var":
             if not handle_var(vm, params, words[1]):
-                logging.error("Variable not defined: %s" % words[1])
+                logging.error("Variable not defined: %s", words[1])
         elif words[0] == "barrier_2":
             if current_screendump:
                 scrdump_filename = os.path.join(
diff --git a/client/tests/kvm/tests/stress_boot.py b/client/tests/kvm/tests/stress_boot.py
index b7916b4..0c422c0 100644
--- a/client/tests/kvm/tests/stress_boot.py
+++ b/client/tests/kvm/tests/stress_boot.py
@@ -1,9 +1,10 @@
-import logging, time
+import logging
 from autotest_lib.client.common_lib import error
-import kvm_subprocess, kvm_test_utils, kvm_utils, kvm_preprocessing
+import kvm_preprocessing
 
 
-def run_stress_boot(tests, params, env):
+@error.context_aware
+def run_stress_boot(test, params, env):
     """
     Boots VMs until one of them becomes unresponsive, and records the maximum
     number of VMs successfully started:
@@ -16,51 +17,37 @@
     @param params: Dictionary with the test parameters
     @param env:    Dictionary with test environment.
     """
-    # boot the first vm
-    vm = kvm_test_utils.get_living_vm(env, params.get("main_vm"))
-
-    logging.info("Waiting for first guest to be up...")
-
+    error.base_context("waiting for the first guest to be up", logging.info)
+    vm = env.get_vm(params["main_vm"])
+    vm.verify_alive()
     login_timeout = float(params.get("login_timeout", 240))
-    session = kvm_utils.wait_for(vm.remote_login, login_timeout, 0, 2)
-    if not session:
-        raise error.TestFail("Could not log into first guest")
+    session = vm.wait_for_login(timeout=login_timeout)
 
     num = 2
     sessions = [session]
 
-    # boot the VMs
-    while num <= int(params.get("max_vms")):
-        try:
-            # clone vm according to the first one
-            vm_name = "vm" + str(num)
-            vm_params = vm.get_params().copy()
+    # Boot the VMs
+    try:
+        while num <= int(params.get("max_vms")):
+            # Clone vm according to the first one
+            error.base_context("booting guest #%d" % num, logging.info)
+            vm_name = "vm%d" % num
+            vm_params = vm.params.copy()
             curr_vm = vm.clone(vm_name, vm_params)
-            kvm_utils.env_register_vm(env, vm_name, curr_vm)
-            logging.info("Booting guest #%d" % num)
-            kvm_preprocessing.preprocess_vm(tests, vm_params, env, vm_name)
-            params['vms'] += " " + vm_name
+            env.register_vm(vm_name, curr_vm)
+            kvm_preprocessing.preprocess_vm(test, vm_params, env, vm_name)
+            params["vms"] += " " + vm_name
 
-            curr_vm_session = kvm_utils.wait_for(curr_vm.remote_login,
-                                                 login_timeout, 0, 2)
-            if not curr_vm_session:
-                raise error.TestFail("Could not log into guest #%d" % num)
+            sessions.append(curr_vm.wait_for_login(timeout=login_timeout))
+            logging.info("Guest #%d booted up successfully", num)
 
-            logging.info("Guest #%d boots up successfully" % num)
-            sessions.append(curr_vm_session)
-
-            # check whether all previous shell sessions are responsive
+            # Check whether all previous shell sessions are responsive
             for i, se in enumerate(sessions):
-                if se.get_command_status(params.get("alive_test_cmd")) != 0:
-                    raise error.TestFail("Session #%d is not responsive" % i)
+                error.context("checking responsiveness of guest #%d" % (i + 1),
+                              logging.debug)
+                se.cmd(params.get("alive_test_cmd"))
             num += 1
-
-        except (error.TestFail, OSError):
-            for se in sessions:
-                se.close()
-            logging.info("Total number booted: %d" % (num - 1))
-            raise
-    else:
+    finally:
         for se in sessions:
             se.close()
         logging.info("Total number booted: %d" % (num -1))
diff --git a/client/tests/kvm/tests/timedrift.py b/client/tests/kvm/tests/timedrift.py
index a6d3076..9f62b4a 100644
--- a/client/tests/kvm/tests/timedrift.py
+++ b/client/tests/kvm/tests/timedrift.py
@@ -1,6 +1,6 @@
-import logging, time, commands, re
+import logging, time, commands
 from autotest_lib.client.common_lib import error
-import kvm_subprocess, kvm_test_utils, kvm_utils
+import kvm_subprocess, kvm_test_utils
 
 
 def run_timedrift(test, params, env):
@@ -52,9 +52,10 @@
         for tid, mask in prev_masks.items():
             commands.getoutput("taskset -p %s %s" % (mask, tid))
 
-    vm = kvm_test_utils.get_living_vm(env, params.get("main_vm"))
+    vm = env.get_vm(params["main_vm"])
+    vm.verify_alive()
     timeout = int(params.get("login_timeout", 360))
-    session = kvm_test_utils.wait_for_login(vm, timeout=timeout)
+    session = vm.wait_for_login(timeout=timeout)
 
     # Collect test parameters:
     # Command to run to get the current time
@@ -87,9 +88,7 @@
             # Open shell sessions with the guest
             logging.info("Starting load on guest...")
             for i in range(guest_load_instances):
-                load_session = vm.remote_login()
-                if not load_session:
-                    raise error.TestFail("Could not log into guest")
+                load_session = vm.login()
                 # Set output func to None to stop it from being called so we
                 # can change the callback function and the parameters it takes
                 # with no problems
@@ -123,7 +122,7 @@
                 set_cpu_affinity(pid, cpu_mask)
 
             # Sleep for a while (during load)
-            logging.info("Sleeping for %s seconds..." % load_duration)
+            logging.info("Sleeping for %s seconds...", load_duration)
             time.sleep(load_duration)
 
             # Get time delta after load
@@ -136,9 +135,9 @@
             host_delta = ht1 - ht0
             guest_delta = gt1 - gt0
             drift = 100.0 * (host_delta - guest_delta) / host_delta
-            logging.info("Host duration: %.2f" % host_delta)
-            logging.info("Guest duration: %.2f" % guest_delta)
-            logging.info("Drift: %.2f%%" % drift)
+            logging.info("Host duration: %.2f", host_delta)
+            logging.info("Guest duration: %.2f", guest_delta)
+            logging.info("Drift: %.2f%%", drift)
 
         finally:
             logging.info("Cleaning up...")
@@ -146,7 +145,7 @@
             restore_cpu_affinity(prev_affinity)
             # Stop the guest load
             if guest_load_stop_command:
-                session.get_command_output(guest_load_stop_command)
+                session.cmd_output(guest_load_stop_command)
             # Close all load shell sessions
             for load_session in guest_load_sessions:
                 load_session.close()
@@ -154,7 +153,7 @@
                 load_session.close()
 
         # Sleep again (rest)
-        logging.info("Sleeping for %s seconds..." % rest_duration)
+        logging.info("Sleeping for %s seconds...", rest_duration)
         time.sleep(rest_duration)
 
         # Get time after rest
@@ -170,9 +169,9 @@
     host_delta_total = ht2 - ht0
     guest_delta_total = gt2 - gt0
     drift_total = 100.0 * (host_delta_total - guest_delta_total) / host_delta
-    logging.info("Total host duration including rest: %.2f" % host_delta_total)
-    logging.info("Total guest duration including rest: %.2f" % guest_delta_total)
-    logging.info("Total drift after rest: %.2f%%" % drift_total)
+    logging.info("Total host duration including rest: %.2f", host_delta_total)
+    logging.info("Total guest duration including rest: %.2f", guest_delta_total)
+    logging.info("Total drift after rest: %.2f%%", drift_total)
 
     # Fail the test if necessary
     if abs(drift) > drift_threshold:
diff --git a/client/tests/kvm/tests/timedrift_with_migration.py b/client/tests/kvm/tests/timedrift_with_migration.py
index e953ed3..b1d4f3e 100644
--- a/client/tests/kvm/tests/timedrift_with_migration.py
+++ b/client/tests/kvm/tests/timedrift_with_migration.py
@@ -1,6 +1,6 @@
-import logging, time, commands, re
+import logging
 from autotest_lib.client.common_lib import error
-import kvm_subprocess, kvm_test_utils, kvm_utils
+import kvm_test_utils
 
 
 def run_timedrift_with_migration(test, params, env):
@@ -17,9 +17,10 @@
     @param params: Dictionary with test parameters.
     @param env: Dictionary with the test environment.
     """
-    vm = kvm_test_utils.get_living_vm(env, params.get("main_vm"))
+    vm = env.get_vm(params["main_vm"])
+    vm.verify_alive()
     timeout = int(params.get("login_timeout", 360))
-    session = kvm_test_utils.wait_for_login(vm, timeout=timeout)
+    session = vm.wait_for_login(timeout=timeout)
 
     # Collect test parameters:
     # Command to run to get the current time
@@ -45,14 +46,12 @@
                                                    time_filter_re, time_format)
             session.close()
             # Run current iteration
-            logging.info("Migrating: iteration %d of %d..." %
-                         (i + 1, migration_iterations))
-            vm = kvm_test_utils.migrate(vm, env)
+            logging.info("Migrating: iteration %d of %d...",
+                         (i + 1), migration_iterations)
+            vm.migrate()
             # Log in
             logging.info("Logging in after migration...")
-            session = kvm_utils.wait_for(vm.remote_login, 30, 0, 2)
-            if not session:
-                raise error.TestFail("Could not log in after migration")
+            session = vm.wait_for_login(timeout=30)
             logging.info("Logged in after migration")
             # Get time after current iteration
             (ht1_, gt1_) = kvm_test_utils.get_time(session, time_command,
@@ -61,12 +60,12 @@
             host_delta = ht1_ - ht0_
             guest_delta = gt1_ - gt0_
             drift = abs(host_delta - guest_delta)
-            logging.info("Host duration (iteration %d): %.2f" %
-                         (i + 1, host_delta))
-            logging.info("Guest duration (iteration %d): %.2f" %
-                         (i + 1, guest_delta))
-            logging.info("Drift at iteration %d: %.2f seconds" %
-                         (i + 1, drift))
+            logging.info("Host duration (iteration %d): %.2f",
+                         (i + 1), host_delta)
+            logging.info("Guest duration (iteration %d): %.2f",
+                         (i + 1), guest_delta)
+            logging.info("Drift at iteration %d: %.2f seconds",
+                         (i + 1), drift)
             # Fail if necessary
             if drift > drift_threshold_single:
                 raise error.TestFail("Time drift too large at iteration %d: "
@@ -84,12 +83,12 @@
     host_delta = ht1 - ht0
     guest_delta = gt1 - gt0
     drift = abs(host_delta - guest_delta)
-    logging.info("Host duration (%d migrations): %.2f" %
-                 (migration_iterations, host_delta))
-    logging.info("Guest duration (%d migrations): %.2f" %
-                 (migration_iterations, guest_delta))
-    logging.info("Drift after %d migrations: %.2f seconds" %
-                 (migration_iterations, drift))
+    logging.info("Host duration (%d migrations): %.2f",
+                 migration_iterations, host_delta)
+    logging.info("Guest duration (%d migrations): %.2f",
+                 migration_iterations, guest_delta)
+    logging.info("Drift after %d migrations: %.2f seconds",
+                 migration_iterations, drift)
 
     # Fail if necessary
     if drift > drift_threshold:
diff --git a/client/tests/kvm/tests/timedrift_with_reboot.py b/client/tests/kvm/tests/timedrift_with_reboot.py
index 22dfd45..05ef21f 100644
--- a/client/tests/kvm/tests/timedrift_with_reboot.py
+++ b/client/tests/kvm/tests/timedrift_with_reboot.py
@@ -1,6 +1,6 @@
-import logging, time, commands, re
+import logging
 from autotest_lib.client.common_lib import error
-import kvm_subprocess, kvm_test_utils, kvm_utils
+import kvm_test_utils
 
 
 def run_timedrift_with_reboot(test, params, env):
@@ -17,9 +17,10 @@
     @param params: Dictionary with test parameters.
     @param env: Dictionary with the test environment.
     """
-    vm = kvm_test_utils.get_living_vm(env, params.get("main_vm"))
+    vm = env.get_vm(params["main_vm"])
+    vm.verify_alive()
     timeout = int(params.get("login_timeout", 360))
-    session = kvm_test_utils.wait_for_login(vm, timeout=timeout)
+    session = vm.wait_for_login(timeout=timeout)
 
     # Collect test parameters:
     # Command to run to get the current time
@@ -44,9 +45,9 @@
             (ht0_, gt0_) = kvm_test_utils.get_time(session, time_command,
                                                    time_filter_re, time_format)
             # Run current iteration
-            logging.info("Rebooting: iteration %d of %d..." %
-                         (i + 1, reboot_iterations))
-            session = kvm_test_utils.reboot(vm, session)
+            logging.info("Rebooting: iteration %d of %d...",
+                         (i + 1), reboot_iterations)
+            session = vm.reboot(session)
             # Get time after current iteration
             (ht1_, gt1_) = kvm_test_utils.get_time(session, time_command,
                                                    time_filter_re, time_format)
@@ -54,12 +55,12 @@
             host_delta = ht1_ - ht0_
             guest_delta = gt1_ - gt0_
             drift = abs(host_delta - guest_delta)
-            logging.info("Host duration (iteration %d): %.2f" %
-                         (i + 1, host_delta))
-            logging.info("Guest duration (iteration %d): %.2f" %
-                         (i + 1, guest_delta))
-            logging.info("Drift at iteration %d: %.2f seconds" %
-                         (i + 1, drift))
+            logging.info("Host duration (iteration %d): %.2f",
+                         (i + 1), host_delta)
+            logging.info("Guest duration (iteration %d): %.2f",
+                         (i + 1), guest_delta)
+            logging.info("Drift at iteration %d: %.2f seconds",
+                         (i + 1), drift)
             # Fail if necessary
             if drift > drift_threshold_single:
                 raise error.TestFail("Time drift too large at iteration %d: "
@@ -77,12 +78,12 @@
     host_delta = ht1 - ht0
     guest_delta = gt1 - gt0
     drift = abs(host_delta - guest_delta)
-    logging.info("Host duration (%d reboots): %.2f" %
-                 (reboot_iterations, host_delta))
-    logging.info("Guest duration (%d reboots): %.2f" %
-                 (reboot_iterations, guest_delta))
-    logging.info("Drift after %d reboots: %.2f seconds" %
-                 (reboot_iterations, drift))
+    logging.info("Host duration (%d reboots): %.2f",
+                 reboot_iterations, host_delta)
+    logging.info("Guest duration (%d reboots): %.2f",
+                 reboot_iterations, guest_delta)
+    logging.info("Drift after %d reboots: %.2f seconds",
+                 reboot_iterations, drift)
 
     # Fail if necessary
     if drift > drift_threshold:
diff --git a/client/tests/kvm/tests/timedrift_with_stop.py b/client/tests/kvm/tests/timedrift_with_stop.py
index 3473276..9f51ff9 100644
--- a/client/tests/kvm/tests/timedrift_with_stop.py
+++ b/client/tests/kvm/tests/timedrift_with_stop.py
@@ -1,6 +1,6 @@
-import logging, time, commands, re
+import logging, time
 from autotest_lib.client.common_lib import error
-import kvm_subprocess, kvm_test_utils, kvm_utils
+import kvm_test_utils
 
 
 def run_timedrift_with_stop(test, params, env):
@@ -21,8 +21,9 @@
     """
     login_timeout = int(params.get("login_timeout", 360))
     sleep_time = int(params.get("sleep_time", 30))
-    vm = kvm_test_utils.get_living_vm(env, params.get("main_vm"))
-    session = kvm_test_utils.wait_for_login(vm, timeout=login_timeout)
+    vm = env.get_vm(params["main_vm"])
+    vm.verify_alive()
+    session = vm.wait_for_login(timeout=login_timeout)
 
     # Collect test parameters:
     # Command to run to get the current time
@@ -48,8 +49,8 @@
             (ht0_, gt0_) = kvm_test_utils.get_time(session, time_command,
                                                    time_filter_re, time_format)
             # Run current iteration
-            logging.info("Stop %s second: iteration %d of %d..." %
-                         (stop_time, i + 1, stop_iterations))
+            logging.info("Stop %s second: iteration %d of %d...",
+                         stop_time, (i + 1), stop_iterations)
 
             vm.monitor.cmd("stop")
             time.sleep(stop_time)
@@ -66,12 +67,12 @@
             host_delta = ht1_ - ht0_
             guest_delta = gt1_ - gt0_
             drift = abs(host_delta - guest_delta)
-            logging.info("Host duration (iteration %d): %.2f" %
-                         (i + 1, host_delta))
-            logging.info("Guest duration (iteration %d): %.2f" %
-                         (i + 1, guest_delta))
-            logging.info("Drift at iteration %d: %.2f seconds" %
-                         (i + 1, drift))
+            logging.info("Host duration (iteration %d): %.2f",
+                         (i + 1), host_delta)
+            logging.info("Guest duration (iteration %d): %.2f",
+                         (i + 1), guest_delta)
+            logging.info("Drift at iteration %d: %.2f seconds",
+                         (i + 1), drift)
             # Fail if necessary
             if drift > drift_threshold_single:
                 raise error.TestFail("Time drift too large at iteration %d: "
@@ -89,12 +90,12 @@
     host_delta = ht1 - ht0
     guest_delta = gt1 - gt0
     drift = abs(host_delta - guest_delta)
-    logging.info("Host duration (%d stops): %.2f" %
-                 (stop_iterations, host_delta))
-    logging.info("Guest duration (%d stops): %.2f" %
-                 (stop_iterations, guest_delta))
-    logging.info("Drift after %d stops: %.2f seconds" %
-                 (stop_iterations, drift))
+    logging.info("Host duration (%d stops): %.2f",
+                 stop_iterations, host_delta)
+    logging.info("Guest duration (%d stops): %.2f",
+                 stop_iterations, guest_delta)
+    logging.info("Drift after %d stops: %.2f seconds",
+                 stop_iterations, drift)
 
     # Fail if necessary
     if drift > drift_threshold:
diff --git a/client/tests/kvm/tests/unattended_install.py b/client/tests/kvm/tests/unattended_install.py
index 471ab56..7c6d845 100644
--- a/client/tests/kvm/tests/unattended_install.py
+++ b/client/tests/kvm/tests/unattended_install.py
@@ -1,8 +1,9 @@
-import logging, time, socket
+import logging, time, socket, re
 from autotest_lib.client.common_lib import error
-import kvm_utils, kvm_test_utils
+import kvm_vm
 
 
+@error.context_aware
 def run_unattended_install(test, params, env):
     """
     Unattended install test:
@@ -13,46 +14,56 @@
     @param params: Dictionary with the test parameters.
     @param env: Dictionary with test environment.
     """
-    buf = 1024
-    vm = kvm_test_utils.get_living_vm(env, params.get("main_vm"))
+    vm = env.get_vm(params["main_vm"])
+    vm.verify_alive()
 
+    install_timeout = int(params.get("timeout", 3000))
+    post_install_delay = int(params.get("post_install_delay", 0))
     port = vm.get_port(int(params.get("guest_port_unattended_install")))
-    if params.get("post_install_delay"):
-        post_install_delay = int(params.get("post_install_delay"))
-    else:
-        post_install_delay = 0
 
-    install_timeout = float(params.get("timeout", 3000))
-    logging.info("Starting unattended install watch process. "
-                 "Timeout set to %ds (%d min)", install_timeout,
-                 install_timeout/60)
+    migrate_background = params.get("migrate_background") == "yes"
+    if migrate_background:
+        mig_timeout = float(params.get("mig_timeout", "3600"))
+        mig_protocol = params.get("migration_protocol", "tcp")
+
+    logging.info("Waiting for installation to finish. Timeout set to %d s "
+                 "(%d min)", install_timeout, install_timeout/60)
+    error.context("waiting for installation to finish")
+
     start_time = time.time()
-    time_elapsed = 0
-    while time_elapsed < install_timeout:
-        if not vm.is_alive():
-            raise error.TestError("Guest died before end of OS install")
+    while (time.time() - start_time) < install_timeout:
+        vm.verify_alive()
         client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
-        addr = vm.get_address()
-        if addr is not None:
-            try:
-                client.connect((addr, port))
-                msg = client.recv(1024)
-                if msg == 'done':
-                    if post_install_delay:
-                        logging.debug("Post install delay specified, "
-                                      "waiting %ss...", post_install_delay)
-                        time.sleep(post_install_delay)
-                    break
-            except socket.error:
-                pass
-        time.sleep(1)
+        try:
+            client.connect((vm.get_address(), port))
+            if client.recv(1024) == "done":
+                break
+        except (socket.error, kvm_vm.VMAddressError):
+            pass
+        if migrate_background:
+            # Drop the params which may break the migration
+            # Better method is to use dnsmasq to do the
+            # unattended installation
+            if vm.params.get("initrd"):
+                vm.params["initrd"] = None
+            if vm.params.get("kernel"):
+                vm.params["kernel"] = None
+            if vm.params.get("extra_params"):
+                vm.params["extra_params"] = re.sub("--append '.*'", "",
+                                                   vm.params["extra_params"])
+            vm.migrate(timeout=mig_timeout, protocol=mig_protocol)
+        else:
+            time.sleep(1)
         client.close()
-        end_time = time.time()
-        time_elapsed = int(end_time - start_time)
-
-    if time_elapsed < install_timeout:
-        logging.info('Guest reported successful installation after %ds '
-                     '(%d min)', time_elapsed, time_elapsed/60)
     else:
-        raise error.TestFail('Timeout elapsed while waiting for install to '
-                             'finish.')
+        raise error.TestFail("Timeout elapsed while waiting for install to "
+                             "finish")
+
+    time_elapsed = time.time() - start_time
+    logging.info("Guest reported successful installation after %d s (%d min)",
+                 time_elapsed, time_elapsed/60)
+
+    if post_install_delay:
+        logging.debug("Post install delay specified, waiting %s s...",
+                      post_install_delay)
+        time.sleep(post_install_delay)
diff --git a/client/tests/kvm/tests/unittest.py b/client/tests/kvm/tests/unittest.py
index 54e5f73..9a126a5 100644
--- a/client/tests/kvm/tests/unittest.py
+++ b/client/tests/kvm/tests/unittest.py
@@ -1,6 +1,6 @@
-import logging, time, os, shutil, glob, ConfigParser
+import logging, os, shutil, glob, ConfigParser
 from autotest_lib.client.common_lib import error
-import kvm_subprocess, kvm_test_utils, kvm_utils, kvm_preprocessing
+import kvm_utils, kvm_preprocessing
 
 
 def run_unittest(test, params, env):
@@ -34,12 +34,12 @@
     if not test_list:
         raise error.TestError("No tests listed on config file %s" %
                               unittest_cfg)
-    logging.debug('Unit test list: %s' % test_list)
+    logging.debug('Unit test list: %s', test_list)
 
-    if params.get('test_list', None):
-        test_list = kvm_utils.get_sub_dict_names(params, 'test_list')
+    if params.get('test_list'):
+        test_list = params.get('test_list').split()
         logging.info('Original test list overriden by user')
-        logging.info('User defined unit test list: %s' % test_list)
+        logging.info('User defined unit test list: %s', test_list)
 
     nfail = 0
     tests_failed = []
@@ -51,23 +51,23 @@
     for t in test_list:
         logging.info('Running %s', t)
 
-        file = None
+        flat_file = None
         if parser.has_option(t, 'file'):
-            file = parser.get(t, 'file')
+            flat_file = parser.get(t, 'file')
 
-        if file is None:
+        if flat_file is None:
             nfail += 1
             tests_failed.append(t)
             logging.error('Unittest config file %s has section %s but no '
-                          'mandatory option file' % (unittest_cfg, t))
+                          'mandatory option file', unittest_cfg, t)
             continue
 
-        if file not in unittest_list:
+        if flat_file not in unittest_list:
             nfail += 1
             tests_failed.append(t)
             logging.error('Unittest file %s referenced in config file %s but '
-                          'was not find under the unittest dir' %
-                          (file, unittest_cfg))
+                          'was not find under the unittest dir', flat_file,
+                          unittest_cfg)
             continue
 
         smp = None
@@ -81,14 +81,14 @@
             params['extra_params'] += ' %s' % extra_params
 
         vm_name = params.get("main_vm")
-        params['kernel'] = os.path.join(unittest_dir, file)
+        params['kernel'] = os.path.join(unittest_dir, flat_file)
         testlog_path = os.path.join(test.debugdir, "%s.log" % t)
 
         try:
             try:
                 vm_name = params.get('main_vm')
                 kvm_preprocessing.preprocess_vm(test, params, env, vm_name)
-                vm = kvm_utils.env_get_vm(env, vm_name)
+                vm = env.get_vm(vm_name)
                 vm.create()
                 vm.monitor.cmd("cont")
                 logging.info("Waiting for unittest %s to complete, timeout %s, "
@@ -111,7 +111,7 @@
                 shutil.copy(vm.get_testlog_filename(), testlog_path)
                 logging.info("Unit test log collected and available under %s",
                              testlog_path)
-            except NameError, IOError:
+            except (NameError, IOError):
                 logging.error("Not possible to collect logs")
 
         # Restore the extra params so other tests can run normally
diff --git a/client/tests/kvm/tests/unittest_kvmctl.py b/client/tests/kvm/tests/unittest_kvmctl.py
index 4afd862..dd72cb2 100644
--- a/client/tests/kvm/tests/unittest_kvmctl.py
+++ b/client/tests/kvm/tests/unittest_kvmctl.py
@@ -21,7 +21,7 @@
     cmd = "./kvmctl test/x86/bootstrap test/x86/%s.flat" % case
     try:
         results = utils.system_output(cmd)
-    except error.CmdError, e:
+    except error.CmdError:
         raise error.TestFail("Unit test %s failed" % case)
 
     result_file = os.path.join(test.resultsdir, case)
diff --git a/client/tests/kvm/tests/virtio_console.py b/client/tests/kvm/tests/virtio_console.py
index 008ec63..af32bf2 100644
--- a/client/tests/kvm/tests/virtio_console.py
+++ b/client/tests/kvm/tests/virtio_console.py
@@ -1,15 +1,16 @@
 """
 virtio_console test
 
-@copyright: Red Hat 2010
+@copyright: 2010 Red Hat, Inc.
 """
 import array, logging, os, random, re, select, shutil, socket, sys, tempfile
-import threading, time
+import threading, time, traceback
 from collections import deque
 from threading import Thread
 
-import kvm_subprocess, kvm_test_utils, kvm_utils, kvm_preprocessing
 from autotest_lib.client.common_lib import error
+from autotest_lib.client.bin import utils
+import kvm_subprocess, kvm_test_utils, kvm_preprocessing
 
 
 def run_virtio_console(test, params, env):
@@ -30,7 +31,228 @@
     @param params: Dictionary with the test parameters
     @param env: Dictionary with test environment
     """
-    class th_send(Thread):
+    class SubTest(object):
+        """
+        Collect result of subtest of main test.
+        """
+        def __init__(self):
+            """
+            Initialize object
+            """
+            self.result = []
+            self.passed = 0
+            self.failed = 0
+            self.cleanup_func = None
+            self.cleanup_args = None
+
+
+        def set_cleanup_func(self, func, args):
+            """
+            Set cleanup function which is called when subtest fails.
+
+            @param func: Function which should be called when test fails.
+            @param args: Arguments of cleanup function.
+            """
+            self.cleanup_func = func
+            self.cleanup_args = args
+
+
+        def do_test(self, function, args=None, fatal=False, cleanup=True):
+            """
+            Execute subtest function.
+
+            @param function: Object of function.
+            @param args: List of arguments of function.
+            @param fatal: If true exception is forwarded to main test.
+            @param cleanup: If true call cleanup function after crash of test.
+            @return: Return what returned executed subtest.
+            @raise TestError: If collapse of test is fatal raise forward
+                        exception from subtest.
+            """
+            if args == None:
+                args = []
+            res = [None, function.func_name, args]
+            try:
+                logging.debug("Start test %s.", function.func_name)
+                ret = function(*args)
+                res[0] = True
+                logging.info(self.result_to_string(res))
+                self.result.append(res)
+                self.passed += 1
+                return ret
+            except:
+                exc_type, exc_value, exc_traceback = sys.exc_info()
+                logging.error("In function (" + function.func_name + "):")
+                logging.error("Call from:\n" +
+                              traceback.format_stack()[-2][:-1])
+                logging.error("Exception from:\n" +
+                              "".join(traceback.format_exception(
+                                                        exc_type, exc_value,
+                                                        exc_traceback.tb_next)))
+                # Clean up environment after subTest crash
+                res[0] = False
+                logging.info(self.result_to_string(res))
+                self.result.append(res)
+                self.failed += 1
+
+                if cleanup:
+                    try:
+                        self.cleanup_func(*self.cleanup_args)
+                    except:
+                        error.TestFail("Cleanup function crash too.")
+                if fatal:
+                    raise
+
+
+        def is_failed(self):
+            """
+            @return: If any of subtest not pass return True.
+            """
+            if self.failed > 0:
+                return True
+            else:
+                return False
+
+
+        def get_result(self):
+            """
+            @return: Result of subtests.
+               Format:
+                 tuple(pass/fail,function_name,call_arguments)
+            """
+            return self.result
+
+
+        def result_to_string_debug(self, result):
+            """
+            @param result: Result of test.
+            """
+            sargs = ""
+            for arg in result[2]:
+                sargs += str(arg) + ","
+            sargs = sargs[:-1]
+            if result[0]:
+                status = "PASS"
+            else:
+                status = "FAIL"
+            return ("Subtest (%s(%s)): --> %s") % (result[1], sargs, status)
+
+
+        def result_to_string(self, result):
+            """
+            @param result: Result of test.
+            """
+            if result[0]:
+                status = "PASS"
+            else:
+                status = "FAIL"
+            return ("Subtest (%s): --> %s") % (result[1], status)
+
+
+        def headline(self, msg):
+            """
+            Add headline to result output.
+
+            @param msg: Test of headline
+            """
+            self.result.append([msg])
+
+
+        def _gen_res(self, format_func):
+            """
+            Format result with foramting function
+
+            @param format_func: Func for formating result.
+            """
+            result = ""
+            for res in self.result:
+                if (len(res) == 3):
+                    result += format_func(res) + "\n"
+                else:
+                    result += res[0] + "\n"
+            return result
+
+
+        def get_full_text_result(self):
+            """
+            @return string with text form of result
+            """
+            return self._gen_res(lambda str: self.result_to_string_debug(str))
+
+
+        def get_text_result(self):
+            """
+            @return string with text form of result
+            """
+            return self._gen_res(lambda str: self.result_to_string(str))
+
+
+    class Port(object):
+        """
+        Define structure to keep information about used port.
+        """
+        def __init__(self, sock, name, port_type, path):
+            """
+            @param vm: virtual machine object that port owned
+            @param sock: Socket of port if port is open.
+            @param name: Name of port for guest side.
+            @param port_type: Type of port yes = console, no= serialport.
+            @param path: Path to port on host side.
+            """
+            self.sock = sock
+            self.name = name
+            self.port_type = port_type
+            self.path = path
+            self.is_open = False
+
+
+        def for_guest(self):
+            """
+            Format data for communication with guest side.
+            """
+            return [self.name, self.port_type]
+
+
+        def open(self):
+            """
+            Open port on host side.
+            """
+            self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+            self.sock.connect(self.path)
+            self.is_open = True
+
+
+        def clean_port(self):
+            """
+            Clean all data from opened port on host side.
+            """
+            if self.is_open:
+                self.close()
+            self.open()
+            ret = select.select([self.sock], [], [], 1.0)
+            if ret[0]:
+                buf = self.sock.recv(1024)
+                logging.debug("Rest in socket: " + buf)
+
+
+        def close(self):
+            """
+            Close port.
+            """
+            self.sock.shutdown(socket.SHUT_RDWR)
+            self.sock.close()
+            self.is_open = False
+
+
+        def __str__(self):
+            """
+            Convert to text.
+            """
+            return ("%s,%s,%s,%s,%d" % ("Socket", self.name, self.port_type,
+                                        self.path, self.is_open))
+
+
+    class ThSend(Thread):
         """
         Random data sender thread.
         """
@@ -53,14 +275,14 @@
 
 
         def run(self):
-            logging.debug("th_send %s: run", self.getName())
+            logging.debug("ThSend %s: run", self.getName())
             while not self.exitevent.isSet():
                 self.idx += self.port.send(self.data)
-            logging.debug("th_send %s: exit(%d)", self.getName(),
+            logging.debug("ThSend %s: exit(%d)", self.getName(),
                           self.idx)
 
 
-    class th_send_check(Thread):
+    class ThSendCheck(Thread):
         """
         Random data sender thread.
         """
@@ -85,7 +307,7 @@
 
 
         def run(self):
-            logging.debug("th_send_check %s: run", self.getName())
+            logging.debug("ThSendCheck %s: run", self.getName())
             too_much_data = False
             while not self.exitevent.isSet():
                 # FIXME: workaround the problem with qemu-kvm stall when too
@@ -109,14 +331,14 @@
                         idx = self.port.send(buf)
                         buf = buf[idx:]
                         self.idx += idx
-            logging.debug("th_send_check %s: exit(%d)", self.getName(),
+            logging.debug("ThSendCheck %s: exit(%d)", self.getName(),
                           self.idx)
             if too_much_data:
-                logging.error("th_send_check: workaround the 'too_much_data'"
+                logging.error("ThSendCheck: workaround the 'too_much_data'"
                               "bug")
 
 
-    class th_recv(Thread):
+    class ThRecv(Thread):
         """
         Recieves data and throws it away.
         """
@@ -134,7 +356,7 @@
             self.blocklen = blocklen
             self.idx = 0
         def run(self):
-            logging.debug("th_recv %s: run", self.getName())
+            logging.debug("ThRecv %s: run", self.getName())
             while not self.exitevent.isSet():
                 # TODO: Workaround, it didn't work with select :-/
                 try:
@@ -142,33 +364,33 @@
                 except socket.timeout:
                     pass
             self.port.settimeout(self._port_timeout)
-            logging.debug("th_recv %s: exit(%d)", self.getName(), self.idx)
+            logging.debug("ThRecv %s: exit(%d)", self.getName(), self.idx)
 
 
-    class th_recv_check(Thread):
+    class ThRecvCheck(Thread):
         """
         Random data receiver/checker thread.
         """
-        def __init__(self, port, buffer, event, blocklen=1024):
+        def __init__(self, port, buf, event, blocklen=1024):
             """
             @param port: Source port.
-            @param buffer: Control data buffer (FIFO).
+            @param buf: Control data buffer (FIFO).
             @param length: Amount of data we want to receive.
             @param blocklen: Block length.
             """
             Thread.__init__(self)
             self.port = port
-            self.buffer = buffer
+            self.buffer = buf
             self.exitevent = event
             self.blocklen = blocklen
             self.idx = 0
 
 
         def run(self):
-            logging.debug("th_recv_check %s: run", self.getName())
+            logging.debug("ThRecvCheck %s: run", self.getName())
             while not self.exitevent.isSet():
                 ret = select.select([self.port], [], [], 1.0)
-                if ret and (not self.exitevent.isSet()):
+                if ret[0] and (not self.exitevent.isSet()):
                     buf = self.port.recv(self.blocklen)
                     if buf:
                         # Compare the recvd data with the control data
@@ -186,156 +408,13 @@
                                 for buf in self.buffer:
                                     ch_ += buf
                                 logging.error("Queue = %s", repr(ch_))
-                                raise error.TestFail("th_recv_check: incorrect "
+                                raise error.TestFail("ThRecvCheck: incorrect "
                                                      "data")
                         self.idx += len(buf)
-            logging.debug("th_recv_check %s: exit(%d)", self.getName(),
+            logging.debug("ThRecvCheck %s: exit(%d)", self.getName(),
                           self.idx)
 
 
-    class cpu_load():
-        """
-        Get average cpu load between start and get_load.
-        """
-        def __init__ (self):
-            self.old_load = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
-            self.startTime = 0
-            self.endTime = 0
-
-
-        def _get_cpu_load(self):
-            # Let's see if we can calc system load.
-            try:
-                f = open("/proc/stat", "r")
-                tmp = f.readlines(200)
-                f.close()
-            except:
-                logging.critical("Error reading /proc/stat")
-                error.TestFail("average_cpu_load: Error reading /proc/stat")
-
-            # 200 bytes should be enough because the information we need
-            # is typically stored in the first line
-            # Info about individual processors (not yet supported) is in
-            # the second (third, ...?) line
-            for line in tmp:
-                if line[0:4] == "cpu ":
-                    reg = re.compile('[0-9]+')
-                    load_values = reg.findall(line)
-                    # extract values from /proc/stat
-                    load = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
-                    for i in range(8):
-                        load[i] = int(load_values[i]) - self.old_load[i]
-
-                    for i in range(8):
-                        self.old_load[i] = int(load_values[i])
-                    return load
-
-
-        def start (self):
-            """
-            Start CPU usage measurement
-            """
-            self.old_load = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
-            self.startTime = time.time()
-            self._get_cpu_load()
-
-
-        def get_load(self):
-            """
-            Get and reset CPU usage
-
-            @return: return group cpu (user[%], system[%], sum[%], testTime[s])
-            """
-            self.endTime = time.time()
-            testTime = self.endTime - self.startTime
-            load = self._get_cpu_load()
-
-            user = load[0] / testTime
-            system = load[2] / testTime
-            sum = user + system
-
-            return (user, system, sum, testTime)
-
-
-    class pid_load():
-        """
-        Get average process cpu load between start and get_load
-        """
-        def __init__ (self, pid, name):
-            self.old_load = [0, 0]
-            self.startTime = 0
-            self.endTime = 0
-            self.pid = pid
-            self.name = name
-
-
-        def _get_cpu_load(self, pid):
-            # Let's see if we can calc system load.
-            try:
-                f = open("/proc/%d/stat" % (pid), "r")
-                line = f.readline()
-                f.close()
-            except:
-                logging.critical("Error reading /proc/%d/stat", pid)
-                error.TestFail("average_process_cpu_load: Error reading "
-                               "/proc/stat")
-            else:
-                reg = re.compile('[0-9]+')
-                load_values = reg.findall(line)
-                del load_values[0:11]
-                # extract values from /proc/stat
-                load = [0, 0]
-                for i in range(2):
-                    load[i] = int(load_values[i]) - self.old_load[i]
-
-                for i in range(2):
-                    self.old_load[i] = int(load_values[i])
-                return load
-
-
-        def start (self):
-            """
-            Start CPU usage measurement
-            """
-            self.old_load = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
-            self.startTime = time.time()
-            self._get_cpu_load(self.pid)
-
-
-        def get_load(self):
-            """
-            Get and reset CPU usage.
-
-            @return: Group cpu
-                    (pid, user[%], system[%], sum[%], testTime[s])
-            """
-            self.endTime = time.time()
-            testTime = self.endTime - self.startTime
-            load = self._get_cpu_load(self.pid)
-
-            user = load[0] / testTime
-            system = load[1] / testTime
-            sum = user + system
-
-            return (self.name, self.pid, user, system, sum, testTime)
-
-
-    def print_load(process, system):
-        """
-        Print load in tabular mode.
-
-        @param process: List of process statistic tuples.
-        @param system: Tuple of system cpu usage.
-        """
-
-        logging.info("%-10s %6s %5s %5s %5s %11s",
-                     "NAME", "PID", "USER", "SYS", "SUM", "TIME")
-        for pr in process:
-            logging.info("%-10s %6d %4.0f%% %4.0f%% %4.0f%% %10.3fs" % pr)
-        logging.info("TOTAL:     ------ %4.0f%% %4.0f%% %4.0f%% %10.3fs" %
-                     system)
-
-
     def process_stats(stats, scale=1.0):
         """
         Process and print the statistic.
@@ -352,31 +431,32 @@
         return stats
 
 
-    def init_guest(vm, timeout=2):
+    def _init_guest(vm, timeout=2):
         """
-        Execute virtio_guest.py on guest, wait until it is initialized.
+        Execute virtio_console_guest.py on guest, wait until it is initialized.
 
         @param vm: Informations about the guest.
         @param timeout: Timeout that will be used to verify if the script
                 started properly.
         """
-        logging.debug("compile virtio_guest.py on guest %s", vm[0].name)
-        vm[1].sendline("python -OO /tmp/virtio_guest.py -c &&"
+        logging.debug("compile virtio_console_guest.py on guest %s", vm[0].name)
+
+        (match, data) = _on_guest("python -OO /tmp/virtio_console_guest.py -c &&"
                        "echo -n 'PASS: Compile virtio_guest finished' ||"
-                       "echo -n 'FAIL: Compile virtio_guest failed'")
-        (match, data) = vm[1].read_until_last_line_matches(["PASS:", "FAIL:"],
-                                                           timeout)
-        if match == 1 or match is None:
+                       "echo -n 'FAIL: Compile virtio_guest failed'",
+                        vm, timeout)
+
+        if match != 0:
             raise error.TestFail("Command console_switch.py on guest %s failed."
                                  "\nreturn code: %s\n output:\n%s" %
                                  (vm[0].name, match, data))
-        logging.debug("Starting virtio_guest.py on guest %s", vm[0].name)
-        vm[1].sendline("python /tmp/virtio_guest.pyo &&"
+        logging.debug("Starting virtio_console_guest.py on guest %s", vm[0].name)
+        vm[1].sendline()
+        (match, data) = _on_guest("python /tmp/virtio_console_guest.pyo &&"
                        "echo -n 'PASS: virtio_guest finished' ||"
-                       "echo -n 'FAIL: virtio_guest failed'")
-        (match, data) = vm[1].read_until_last_line_matches(["PASS:", "FAIL:"],
-                                                           timeout)
-        if match == 1 or match is None:
+                       "echo -n 'FAIL: virtio_guest failed'",
+                       vm, timeout)
+        if match != 0:
             raise error.TestFail("Command console_switch.py on guest %s failed."
                                  "\nreturn code: %s\n output:\n%s" %
                                  (vm[0].name, match, data))
@@ -384,6 +464,47 @@
         time.sleep(2)
 
 
+    def init_guest(vm, consoles):
+        """
+        Prepares guest, executes virtio_console_guest.py and initializes test.
+
+        @param vm: Informations about the guest.
+        @param consoles: Informations about consoles.
+        """
+        conss = []
+        for mode in consoles:
+            for cons in mode:
+                conss.append(cons.for_guest())
+        _init_guest(vm, 10)
+        on_guest("virt.init(%s)" % (conss), vm, 10)
+
+
+    def _search_kernel_crashlog(vm, timeout = 2):
+        """
+        Find kernel crash message.
+
+        @param vm: Informations about the guest.
+        @param timeout: Timeout used to verify expected output.
+
+        @return: Kernel crash log or None.
+        """
+        data = vm[3].read_nonblocking()
+        match = re.search("^BUG:", data, re.MULTILINE)
+        if match == None:
+            return None
+
+        match = re.search(r"^BUG:.*^---\[ end trace .* \]---",
+                  data, re.DOTALL |re.MULTILINE)
+        if match == None:
+            data += vm[3].read_until_last_line_matches(
+                                            ["---\[ end trace .* \]---"],timeout)
+
+        match = re.search(r"(^BUG:.*^---\[ end trace .* \]---)",
+                  data, re.DOTALL |re.MULTILINE)
+        return match.group(0)
+
+
+
     def _on_guest(command, vm, timeout=2):
         """
         Execute given command inside the script's main loop, indicating the vm
@@ -395,12 +516,23 @@
 
         @return: Tuple (match index, data)
         """
-        logging.debug("Executing '%s' on virtio_guest.py loop, vm: %s," +
+        logging.debug("Executing '%s' on virtio_console_guest.py loop, vm: %s," +
                       "timeout: %s", command, vm[0].name, timeout)
         vm[1].sendline(command)
-        (match, data) = vm[1].read_until_last_line_matches(["PASS:", 
-                                                    "FAIL:[Failed to execute]"],
-                                                    timeout)
+        try:
+            (match, data) = vm[1].read_until_last_line_matches(["PASS:",
+                                                                "FAIL:"],
+                                                               timeout)
+
+        except (kvm_subprocess.ExpectError):
+            match = None
+            data = "Timeout."
+
+        kcrash_data = _search_kernel_crashlog(vm)
+        if (kcrash_data != None):
+            logging.error(kcrash_data)
+            vm[4] = True
+
         return (match, data)
 
 
@@ -418,36 +550,13 @@
         """
         match, data = _on_guest(command, vm, timeout)
         if match == 1 or match is None:
-            raise error.TestFail("Failed to execute '%s' on virtio_guest.py, "
+            raise error.TestFail("Failed to execute '%s' on virtio_console_guest.py, "
                                  "vm: %s, output:\n%s" %
                                  (command, vm[0].name, data))
 
         return (match, data)
 
 
-    def socket_readall(sock, read_timeout, mesagesize):
-        """
-        Read everything from the socket.
-
-        @param sock: Socket.
-        @param read_timeout: Read timeout.
-        @param mesagesize: Size of message.
-        """
-        sock_decriptor = sock.fileno()
-        sock.settimeout(read_timeout)
-        message = ""
-        try:
-            while (len(message) < mesagesize):
-                message += sock.recv(mesagesize)
-        except Exception as inst:
-            if (inst.args[0] == "timed out"):
-                logging.debug("Reading timeout")
-            else:
-                logging.debug(inst)
-        sock.setblocking(1)
-        return message
-
-
     def _guest_exit_threads(vm, send_pts, recv_pts):
         """
         Safely executes on_guest("virt.exit_threads()") using workaround of
@@ -463,7 +572,7 @@
             logging.debug("Workaround the stuck thread on guest")
             # Thread is stucked in read/write
             for send_pt in send_pts:
-                send_pt[0].sendall(".")
+                send_pt.sock.sendall(".")
         elif match != 0:
             # Something else
             raise error.TestFail("Unexpected fail\nMatch: %s\nData:\n%s"
@@ -471,8 +580,8 @@
 
         # Read-out all remaining data
         for recv_pt in recv_pts:
-            while select.select([recv_pt[0]], [], [], 0.1)[0]:
-                recv_pt[0].recv(1024)
+            while select.select([recv_pt.sock], [], [], 0.1)[0]:
+                recv_pt.sock.recv(1024)
 
         # This will cause fail in case anything went wrong.
         on_guest("print 'PASS: nothing'", vm, 10)
@@ -482,11 +591,18 @@
         """
         Creates the VM and connects the specified number of consoles and serial
         ports.
+        Ports are allocated by 2 per 1 virtio-serial-pci device starting with
+        console. (3+2 => CC|CS|S; 0+2 => SS; 3+4 => CC|CS|SS|S, ...) This way
+        it's easy to test communication on the same or different
+        virtio-serial-pci device.
+        Further in tests the consoles are being picked always from the first
+        available one (3+2: 2xC => CC|cs|s <communication on the same PCI>;
+        2xC,1xS => CC|cS|s <communication between 2 PCI devs)
 
         @param no_console: Number of desired virtconsoles.
         @param no_serialport: Number of desired virtserialports.
         @return: Tuple with (guest information, consoles information)
-                guest informations = [vm, session, tmp_dir]
+                guest informations = [vm, session, tmp_dir, kcrash]
                 consoles informations = [consoles[], serialports[]]
         """
         consoles = []
@@ -494,204 +610,326 @@
         tmp_dir = tempfile.mkdtemp(prefix="virtio-console-", dir="/tmp/")
         if not params.get('extra_params'):
             params['extra_params'] = ''
-        params['extra_params'] += " -device virtio-serial"
 
-        for i in  range(0, no_console):
+        for i in range(0, no_console):
+            # Spread consoles between multiple PCI devices (2 per a dev)
+            if not i % 2:
+                pci = "virtio-serial-pci%d" % (i / 2)
+                params['extra_params'] += (" -device virtio-serial-pci,id="
+                                           + pci)
+                pci += ".0"
             params['extra_params'] += (" -chardev socket,path=%s/%d,id=vc%d,"
                                        "server,nowait" % (tmp_dir, i, i))
             params['extra_params'] += (" -device virtconsole,chardev=vc%d,"
-                                      "name=console-%d,id=c%d" % (i, i, i))
+                                      "name=console-%d,id=c%d,bus=%s"
+                                      % (i, i, i, pci))
 
         for i in  range(no_console, no_console + no_serialport):
+            # Spread seroal ports between multiple PCI devices (2 per a dev)
+            if not i % 2:
+                pci = "virtio-serial-pci%d" % (i / 2)
+                params['extra_params'] += (" -device virtio-serial-pci,id="
+                                           + pci)
+                pci += ".0"
             params['extra_params'] += (" -chardev socket,path=%s/%d,id=vs%d,"
                                        "server,nowait" % (tmp_dir, i, i))
             params['extra_params'] += (" -device virtserialport,chardev=vs%d,"
-                                       "name=serialport-%d,id=p%d" % (i, i, i))
-
+                                       "name=serialport-%d,id=p%d,bus=%s"
+                                       % (i, i, i, pci))
 
         logging.debug("Booting first guest %s", params.get("main_vm"))
         kvm_preprocessing.preprocess_vm(test, params, env,
                                         params.get("main_vm"))
 
+        vm = env.get_vm(params.get("main_vm"))
 
-        vm = kvm_utils.env_get_vm(env, params.get("main_vm"))
+        session = vm.wait_for_login(timeout=float(params.get("boot_timeout", 240)))
 
-        session = kvm_test_utils.wait_for_login(vm, 0,
+        sserial = kvm_test_utils.wait_for_login(vm, 0,
                                          float(params.get("boot_timeout", 240)),
-                                         0, 2)
+                                         0, 2, serial=True)
 
         # connect the sockets
         for i in range(0, no_console):
-            sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
-            sock.connect("%s/%d" % (tmp_dir, i))
-            consoles.append([sock, "console-%d" % i, "yes"])
+            consoles.append(Port(None ,"console-%d" % i,
+                                 "yes", "%s/%d" % (tmp_dir, i)))
         for i in range(no_console, no_console + no_serialport):
-            sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
-            sock.connect("%s/%d" % (tmp_dir, i))
-            serialports.append([sock, "serialport-%d" % i, "no"])
+            serialports.append(Port(None ,"serialport-%d" % i,
+                                    "no", "%s/%d" % (tmp_dir, i)))
 
-        return [vm, session, tmp_dir], [consoles, serialports]
+        kcrash = False
+
+        return [vm, session, tmp_dir, sserial, kcrash], [consoles, serialports]
 
 
-    def test_smoke(vm, consoles, params):
+    def topen(vm, port):
         """
-        Virtio console smoke test.
+        Open virtioconsole port.
 
-        Tests the basic functionalities (poll, read/write with and without
-        connected host, etc.
-
-        @param vm: target virtual machine [vm, session, tmp_dir]
-        @param consoles: a field of virtio ports with the minimum of 2 items
-        @param params: test parameters '$console_type:$data;...'
+        @param vm: Target virtual machine [vm, session, tmp_dir, ser_session].
+        @param port: Port identifier.
         """
-        logging.info("Smoke test: Tests the basic capabilities of "
-                     "virtio_consoles.")
-        # PREPARE
-        for param in params.split(';'):
-            if not param:
-                continue
-            logging.info("test_smoke: params: %s", param)
-            param = param.split(':')
-            if len(param) > 1:
-                data = param[1]
-            else:
-                data = "Smoke test data"
-            param = (param[0] == 'serialport')
-            send_pt = consoles[param][0]
-            recv_pt = consoles[param][1]
-
-            # TEST
-            # Poll (OUT)
-            on_guest("virt.poll('%s', %s)" % (send_pt[1], select.POLLOUT), vm,
-                     2)
-
-            # Poll (IN, OUT)
-            send_pt[0].sendall("test")
-            for test in [select.POLLIN, select.POLLOUT]:
-                on_guest("virt.poll('%s', %s)" % (send_pt[1], test), vm, 2)
-
-            # Poll (IN HUP)
-            # I store the socket informations and close the socket
-            sock = send_pt[0]
-            send_pt[0] = sock.getpeername()
-            sock.shutdown(2)
-            sock.close()
-            del sock
-            for test in [select.POLLIN, select.POLLHUP]:
-                on_guest("virt.poll('%s', %s)" % (send_pt[1], test), vm, 2)
-
-            # Poll (HUP)
-            on_guest("virt.recv('%s', 4, 1024, False)" % (send_pt[1]), vm, 2)
-            on_guest("virt.poll('%s', %s)" % (send_pt[1], select.POLLHUP), vm,
-                     2)
-
-            # Reconnect the socket
-            sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
-            sock.connect(send_pt[0])
-            send_pt[0] = sock
-            # Redefine socket in consoles
-            consoles[param][0] = send_pt
-            on_guest("virt.poll('%s', %s)" % (send_pt[1], select.POLLOUT), vm,
-                     2)
-
-            # Read/write without host connected
-            # I store the socket informations and close the socket
-            sock = send_pt[0]
-            send_pt[0] = sock.getpeername()
-            sock.shutdown(2)
-            sock.close()
-            del sock
-            # Read should pass
-            on_guest("virt.recv('%s', 0, 1024, False)" % send_pt[1], vm, 2)
-            # Write should timed-out
-            match, tmp = _on_guest("virt.send('%s', 10, False)"
-                                    % send_pt[1], vm, 2)
-            if match != None:
-                raise error.TestFail("Read on guest while host disconnected "
-                                     "didn't timed out.\nOutput:\n%s"
-                                     % tmp)
-            sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
-            sock.connect(send_pt[0])
-            send_pt[0] = sock
-
-            # Redefine socket in consoles
-            consoles[param][0] = send_pt
-            if (send_pt[0].recv(1024) < 10):
-                raise error.TestFail("Didn't received data from guest")
-            # Now the _on_guest("virt.send('%s'... command should be finished
-            on_guest("print 'PASS: nothing'", vm, 2)
-
-            # Non-blocking mode
-            on_guest("virt.blocking('%s', False)" % send_pt[1], vm, 2)
-            # Recv should return FAIL with 0 received data
-            match, tmp = _on_guest("virt.recv('%s', 10, 1024, False)" %
-                                   send_pt[1], vm, 2)
-            if match == 0:
-                raise error.TestFail("Received data even when non were sent\n"
-                                     "Data:\n%s" % tmp)
-            elif match == None:
-                raise error.TestFail("Timed out, probably in blocking mode\n"
-                                     "Data:\n%s" % tmp)
-            elif match != 1:
-                raise error.TestFail("Unexpected fail\nMatch: %s\nData:\n%s" %
-                                     (match, tmp))
-            send_pt[0].sendall("1234567890")
-            on_guest("virt.recv('%s', 10, 1024, False)" % send_pt[1], vm, 2)
-
-            # Blocking mode
-            on_guest("virt.blocking('%s', True)" % send_pt[1], vm, 2)
-            # Recv should timed out
-            match, tmp = _on_guest("virt.recv('%s', 10, 1024, False)" %
-                                   send_pt[1], vm, 2)
-            if match == 0:
-                raise error.TestFail("Received data even when non were sent\n"
-                                     "Data:\n%s" % tmp)
-            elif match != None:
-                raise error.TestFail("Unexpected fail\nMatch: %s\nData:\n%s" %
-                                     (match, tmp))
-            send_pt[0].sendall("1234567890")
-            # Now guest received the data end escaped from the recv()
-            on_guest("print 'PASS: nothing'", vm, 2)
-
-            # Basic loopback test
-            on_guest("virt.loopback(['%s'], ['%s'], 1024, virt.LOOP_NONE)" %
-                     (send_pt[1], recv_pt[1]), vm, 2)
-            send_pt[0].sendall(data)
-            tmp = ""
-            i = 0
-            while i <= 10:
-                i += 1
-                ret = select.select([recv_pt[0]], [], [], 1.0)
-                if ret:
-                    tmp += recv_pt[0].recv(1024)
-                if len(tmp) >= len(data):
-                    break
-            if tmp != data:
-                raise error.TestFail("Incorrect data: '%s' != '%s'",
-                                     data, tmp)
-            _guest_exit_threads(vm, [send_pt], [recv_pt])
-
-        return consoles
+        on_guest("virt.open('%s')" % (port.name), vm, 10)
+        port.open()
 
 
-    def test_loopback(vm, consoles, params):
+    def tmulti_open(vm, port):
         """
-        Virtio console loopback test.
+        Multiopen virtioconsole port.
+
+        @param vm: Target virtual machine [vm, session, tmp_dir, ser_session].
+        @param port: Port identifier.
+        """
+        on_guest("virt.close('%s')" % (port.name), vm, 10)
+        on_guest("virt.open('%s')" % (port.name), vm, 10)
+        (match, data) = _on_guest("virt.open('%s')" % (port.name), vm, 10)
+        # Console is permitted to open the device multiple times
+        if port.port_type == "yes": #is console?
+            if match != 0: #Multiopen not pass
+                raise error.TestFail("Unexpected fail of opening the console"
+                                     " device for the 2nd time.\n%s" % data)
+        else:
+            if match != 1: #Multiopen not fail:
+                raise error.TestFail("Unexpetded pass of opening the"
+                                     " serialport device for the 2nd time.")
+            elif not "[Errno 24]" in data:
+                raise error.TestFail("Multiple opening fail but with another"
+                                     " exception %s" % data)
+        port.open()
+
+    def tclose(vm, port):
+        """
+        Close socket.
+
+        @param vm: Target virtual machine [vm, session, tmp_dir, ser_session].
+        @param port: Port to open.
+        """
+        on_guest("virt.close('%s')" % (port.name), vm, 10)
+        port.close()
+
+
+    def tpooling(vm, port):
+        """
+        Test try pooling function.
+
+        @param vm: Target virtual machine [vm, session, tmp_dir, ser_session].
+        @param port: Port used in test.
+        """
+        # Poll (OUT)
+        on_guest("virt.poll('%s', %s)" % (port.name, select.POLLOUT), vm,
+                 2)
+
+        # Poll (IN, OUT)
+        port.sock.sendall("test")
+        for test in [select.POLLIN, select.POLLOUT]:
+            on_guest("virt.poll('%s', %s)" % (port.name, test), vm, 10)
+
+        # Poll (IN HUP)
+        # I store the socket informations and close the socket
+        port.close()
+        for test in [select.POLLIN, select.POLLHUP]:
+            on_guest("virt.poll('%s', %s)" % (port.name, test), vm, 10)
+
+        # Poll (HUP)
+        on_guest("virt.recv('%s', 4, 1024, False)" % (port.name), vm, 10)
+        on_guest("virt.poll('%s', %s)" % (port.name, select.POLLHUP), vm,
+                 2)
+
+        # Reconnect the socket
+        port.open()
+        # Redefine socket in consoles
+        on_guest("virt.poll('%s', %s)" % (port.name, select.POLLOUT), vm,
+                 2)
+
+
+    def tsigio(vm, port):
+        """
+        Test try sigio function.
+
+        @param vm: Target virtual machine [vm, session, tmp_dir, ser_session].
+        @param port: Port used in test.
+        """
+        if port.is_open:
+            port.close()
+
+        # Enable sigio on specific port
+        on_guest("virt.async('%s', True, 0)" %
+                 (port.name) , vm, 10)
+        on_guest("virt.get_sigio_poll_return('%s')" % (port.name) , vm, 10)
+
+        #Test sigio when port open
+        on_guest("virt.set_pool_want_return('%s', select.POLLOUT)" %
+                 (port.name), vm, 10)
+        port.open()
+        match = _on_guest("virt.get_sigio_poll_return('%s')" %
+                          (port.name) , vm, 10)[0]
+        if match == 1:
+            raise error.TestFail("Problem with HUP on console port.")
+
+        #Test sigio when port receive data
+        on_guest("virt.set_pool_want_return('%s', select.POLLOUT |"
+                 " select.POLLIN)" % (port.name), vm, 10)
+        port.sock.sendall("0123456789")
+        on_guest("virt.get_sigio_poll_return('%s')" % (port.name) , vm, 10)
+
+        #Test sigio port close event
+        on_guest("virt.set_pool_want_return('%s', select.POLLHUP |"
+                 " select.POLLIN)" % (port.name), vm, 10)
+        port.close()
+        on_guest("virt.get_sigio_poll_return('%s')" % (port.name) , vm, 10)
+
+        #Test sigio port open event and persistence of written data on port.
+        on_guest("virt.set_pool_want_return('%s', select.POLLOUT |"
+                 " select.POLLIN)" % (port.name), vm, 10)
+        port.open()
+        on_guest("virt.get_sigio_poll_return('%s')" % (port.name) , vm, 10)
+
+        #Test event when erase data.
+        on_guest("virt.clean_port('%s')" % (port.name), vm, 10)
+        port.close()
+        on_guest("virt.set_pool_want_return('%s', select.POLLOUT)"
+                 % (port.name), vm, 10)
+        port.open()
+        on_guest("virt.get_sigio_poll_return('%s')" % (port.name) , vm, 10)
+
+        # Disable sigio on specific port
+        on_guest("virt.async('%s', False, 0)" %
+                 (port.name) , vm, 10)
+
+
+    def tlseek(vm, port):
+        """
+        Tests the correct handling of lseek (expected fail)
+
+        @param vm: Target virtual machine [vm, session, tmp_dir, ser_session].
+        @param port: Port used in test.
+        """
+        # The virt.lseek returns PASS when the seek fails
+        on_guest("virt.lseek('%s', 0, 0)" % (port.name), vm, 10)
+
+
+    def trw_host_offline(vm, port):
+        """
+        Guest read/write from host when host is disconnected.
+
+        @param vm: Target virtual machine [vm, session, tmp_dir, ser_session].
+        @param port: Port used in test.
+        """
+        if port.is_open:
+            port.close()
+
+        on_guest("virt.recv('%s', 0, 1024, False)" % port.name, vm, 10)
+        match, tmp = _on_guest("virt.send('%s', 10, False)"
+                                % port.name, vm, 10)
+        if match != None:
+            raise error.TestFail("Write on guest while host disconnected "
+                                 "didn't timed out.\nOutput:\n%s"
+                                 % tmp)
+
+        port.open()
+
+        if (port.sock.recv(1024) < 10):
+            raise error.TestFail("Didn't received data from guest")
+        # Now the _on_guest("virt.send('%s'... command should be finished
+        on_guest("print 'PASS: nothing'", vm, 10)
+
+
+    def trw_blocking_mode(vm, port):
+        """
+        Guest read\write data in blocking mode.
+
+        @param vm: Target virtual machine [vm, session, tmp_dir, ser_session].
+        @param port: Port used in test.
+        """
+        # Blocking mode
+        if not port.is_open:
+            port.open()
+        on_guest("virt.blocking('%s', True)" % port.name, vm, 10)
+        # Recv should timed out
+        match, tmp = _on_guest("virt.recv('%s', 10, 1024, False)" %
+                               port.name, vm, 10)
+        if match == 0:
+            raise error.TestFail("Received data even when non were sent\n"
+                                 "Data:\n%s" % tmp)
+        elif match != None:
+            raise error.TestFail("Unexpected fail\nMatch: %s\nData:\n%s" %
+                                 (match, tmp))
+        port.sock.sendall("1234567890")
+        # Now guest received the data end escaped from the recv()
+        on_guest("print 'PASS: nothing'", vm, 10)
+
+
+    def trw_nonblocking_mode(vm, port):
+        """
+        Guest read\write data in nonblocking mode.
+
+        @param vm: Target virtual machine [vm, session, tmp_dir, ser_session].
+        @param port: Port used in test.
+        """
+        # Non-blocking mode
+        if not port.is_open:
+            port.open()
+        on_guest("virt.blocking('%s', False)" % port.name, vm, 10)
+        # Recv should return FAIL with 0 received data
+        match, tmp = _on_guest("virt.recv('%s', 10, 1024, False)" %
+                              port.name, vm, 10)
+        if match == 0:
+            raise error.TestFail("Received data even when non were sent\n"
+                                 "Data:\n%s" % tmp)
+        elif match == None:
+            raise error.TestFail("Timed out, probably in blocking mode\n"
+                                 "Data:\n%s" % tmp)
+        elif match != 1:
+            raise error.TestFail("Unexpected fail\nMatch: %s\nData:\n%s" %
+                                 (match, tmp))
+        port.sock.sendall("1234567890")
+        on_guest("virt.recv('%s', 10, 1024, False)" % port.name, vm, 10)
+
+
+    def tbasic_loopback(vm, send_port, recv_port, data="Smoke test data"):
+        """
+        Easy loop back test with loop over only two port.
+
+        @param vm: Target virtual machine [vm, session, tmp_dir, ser_session].
+        @param port: Port used in test.
+        """
+        if not send_port.is_open:
+            send_port.open()
+        if not recv_port.is_open:
+            recv_port.open()
+        on_guest("virt.loopback(['%s'], ['%s'], 1024, virt.LOOP_NONE)" %
+                     (send_port.name, recv_port.name), vm, 10)
+        send_port.sock.sendall(data)
+        tmp = ""
+        i = 0
+        while i <= 10:
+            i += 1
+            ret = select.select([recv_port.sock], [], [], 1.0)
+            if ret:
+                tmp += recv_port.sock.recv(1024)
+            if len(tmp) >= len(data):
+                break
+        if tmp != data:
+            raise error.TestFail("Incorrect data: '%s' != '%s'",
+                                 data, tmp)
+        _guest_exit_threads(vm, [send_port], [recv_port])
+
+
+    def tloopback(vm, consoles, params):
+        """
+        Virtio console loopback subtest.
 
         Creates loopback on the vm machine between send_pt and recv_pts
         ports and sends length amount of data through this connection.
         It validates the correctness of the data sent.
 
-        @param vm: target virtual machine [vm, session, tmp_dir]
-        @param consoles: a field of virtio ports with the minimum of 2 items
+        @param vm: Target virtual machine [vm, session, tmp_dir, ser_session].
+        @param consoles: Field of virtio ports with the minimum of 2 items.
         @param params: test parameters, multiple recievers allowed.
             '$source_console_type@buffer_length:
              $destination_console_type1@$buffer_length:...:
              $loopback_buffer_length;...'
         """
-        logging.info("Loopback test: Creates a loopback between sender port "
-                     "and receiving port, send data through this connection, "
-                     "verify data correctness.")
         # PREPARE
         for param in params.split(';'):
             if not param:
@@ -730,6 +968,13 @@
             if len(buf_len) == (idx_console + idx_serialport):
                 buf_len.append(1024)
 
+            for p in recv_pts:
+                if not p.is_open:
+                    p.open()
+
+            if not send_pt.is_open:
+                send_pt.open()
+
             if len(recv_pts) == 0:
                 raise error.TestFail("test_loopback: incorrect recv consoles"
                                      "definition")
@@ -739,21 +984,22 @@
             for i in range(0, len(recv_pts)):
                 queues.append(deque())
 
-            tmp = "'%s'" % recv_pts[0][1]
+            tmp = "'%s'" % recv_pts[0].name
             for recv_pt in recv_pts[1:]:
-                tmp += ", '%s'" % (recv_pt[1])
+                tmp += ", '%s'" % (recv_pt.name)
             on_guest("virt.loopback(['%s'], [%s], %d, virt.LOOP_POLL)"
-                     % (send_pt[1], tmp, buf_len[-1]), vm, 2)
+                     % (send_pt.name, tmp, buf_len[-1]), vm, 10)
 
             exit_event = threading.Event()
 
             # TEST
-            thread = th_send_check(send_pt[0], exit_event, queues, buf_len[0])
+            thread = ThSendCheck(send_pt.sock, exit_event, queues,
+                                   buf_len[0])
             thread.start()
             threads.append(thread)
 
             for i in range(len(recv_pts)):
-                thread = th_recv_check(recv_pts[i][0], queues[i], exit_event,
+                thread = ThRecvCheck(recv_pts[i].sock, queues[i], exit_event,
                                        buf_len[i + 1])
                 thread.start()
                 threads.append(thread)
@@ -770,8 +1016,8 @@
 
             # Read-out all remaining data
             for recv_pt in recv_pts:
-                while select.select([recv_pt[0]], [], [], 0.1)[0]:
-                    recv_pt[0].recv(1024)
+                while select.select([recv_pt.sock], [], [], 0.1)[0]:
+                    recv_pt.sock.recv(1024)
 
             _guest_exit_threads(vm, [send_pt], recv_pts)
 
@@ -779,19 +1025,17 @@
             del threads[:]
 
 
-    def test_perf(vm, consoles, params):
+    def tperf(vm, consoles, params):
         """
         Tests performance of the virtio_console tunel. First it sends the data
         from host to guest and than back. It provides informations about
         computer utilisation and statistic informations about the troughput.
 
-        @param vm: target virtual machine [vm, session, tmp_dir]
-        @param consoles: a field of virtio ports with the minimum of 2 items
+        @param vm: Target virtual machine [vm, session, tmp_dir, ser_session].
+        @param consoles: Field of virtio ports with the minimum of 2 items.
         @param params: test parameters:
                 '$console_type@$buffer_length:$test_duration;...'
         """
-        logging.info("Performance test: Measure performance for the "
-                     "virtio console tunnel")
         for param in params.split(';'):
             if not param:
                 continue
@@ -811,111 +1055,233 @@
             param = (param[0] == 'serialport')
             port = consoles[param][0]
 
+            if not port.is_open:
+                port.open()
+
             data = ""
             for i in range(buf_len):
                 data += "%c" % random.randrange(255)
 
             exit_event = threading.Event()
-            slice = float(duration)/100
+            time_slice = float(duration) / 100
 
             # HOST -> GUEST
             on_guest('virt.loopback(["%s"], [], %d, virt.LOOP_NONE)' %
-                     (port[1], buf_len), vm, 2)
-            thread = th_send(port[0], data, exit_event)
+                     (port.name, buf_len), vm, 10)
+            thread = ThSend(port.sock, data, exit_event)
             stats = array.array('f', [])
-            loads = []
-            loads.append(cpu_load())
-            loads.append(pid_load(os.getpid(), 'autotest'))
-            loads.append(pid_load(vm[0].get_pid(), 'VM'))
-
-            for load in loads:
-                load.start()
+            loads = utils.SystemLoad([(os.getpid(), 'autotest'),
+                                      (vm[0].get_pid(), 'VM'), 0])
+            loads.start()
             _time = time.time()
             thread.start()
             for i in range(100):
                 stats.append(thread.idx)
-                time.sleep(slice)
+                time.sleep(time_slice)
             _time = time.time() - _time - duration
-            print_load([loads[1].get_load(), loads[2].get_load()],
-                       loads[0].get_load())
+            logging.info("\n" + loads.get_cpu_status_string()[:-1])
+            logging.info("\n" + loads.get_mem_status_string()[:-1])
             exit_event.set()
             thread.join()
 
             # Let the guest read-out all the remaining data
             while not _on_guest("virt.poll('%s', %s)" %
-                                (port[1], select.POLLIN), vm, 2)[0]:
+                                (port.name, select.POLLIN), vm, 10)[0]:
                 time.sleep(1)
 
             _guest_exit_threads(vm, [port], [])
 
-            if (_time > slice):
+            if (_time > time_slice):
                 logging.error(
-                "Test ran %fs longer which is more than one slice", _time)
+                "Test ran %fs longer which is more than one time slice", _time)
             else:
                 logging.debug("Test ran %fs longer", _time)
-            stats = process_stats(stats[1:], slice*1048576)
+            stats = process_stats(stats[1:], time_slice * 1048576)
             logging.debug("Stats = %s", stats)
             logging.info("Host -> Guest [MB/s] (min/med/max) = %.3f/%.3f/%.3f",
-                         stats[0], stats[len(stats)/2], stats[-1])
+                        stats[0], stats[len(stats) / 2], stats[-1])
 
             del thread
 
             # GUEST -> HOST
             exit_event.clear()
             stats = array.array('f', [])
-            on_guest("virt.send_loop_init('%s', %d)" % (port[1], buf_len),
+            on_guest("virt.send_loop_init('%s', %d)" % (port.name, buf_len),
                      vm, 30)
-            thread = th_recv(port[0], exit_event, buf_len)
+            thread = ThRecv(port.sock, exit_event, buf_len)
             thread.start()
-            for load in loads:
-                load.start()
-            on_guest("virt.send_loop()", vm, 2)
+            loads.start()
+            on_guest("virt.send_loop()", vm, 10)
             _time = time.time()
             for i in range(100):
                 stats.append(thread.idx)
-                time.sleep(slice)
+                time.sleep(time_slice)
             _time = time.time() - _time - duration
-            print_load([loads[1].get_load(), loads[2].get_load()],
-                       loads[0].get_load())
-            on_guest("virt.exit_threads()", vm, 2)
+            logging.info("\n" + loads.get_cpu_status_string()[:-1])
+            logging.info("\n" + loads.get_mem_status_string()[:-1])
+            on_guest("virt.exit_threads()", vm, 10)
             exit_event.set()
             thread.join()
-            if (_time > slice): # Deviation is higher than 1 slice
+            if (_time > time_slice): # Deviation is higher than 1 time_slice
                 logging.error(
-                "Test ran %fs longer which is more than one slice", _time)
+                "Test ran %fs longer which is more than one time slice", _time)
             else:
-                logging.debug("Test ran %fs longer" % _time)
-            stats = process_stats(stats[1:], slice*1048576)
+                logging.debug("Test ran %fs longer", _time)
+            stats = process_stats(stats[1:], time_slice * 1048576)
             logging.debug("Stats = %s", stats)
             logging.info("Guest -> Host [MB/s] (min/med/max) = %.3f/%.3f/%.3f",
-                         stats[0], stats[len(stats)/2], stats[-1])
+                         stats[0], stats[len(stats) / 2], stats[-1])
 
             del thread
-
             del exit_event
-            del loads[:]
+
+
+    def _clean_ports(vm, consoles):
+        """
+        Read all data all port from both side of port.
+
+        @param vm: Target virtual machine [vm, session, tmp_dir, ser_session].
+        @param consoles: Consoles which should be clean.
+        """
+        for ctype in consoles:
+            for port in ctype:
+                openned = port.is_open
+                port.clean_port()
+                #on_guest("virt.blocking('%s', True)" % port.name, vm, 10)
+                on_guest("virt.clean_port('%s'),1024" % port.name, vm, 10)
+                if not openned:
+                    port.close()
+                    on_guest("virt.close('%s'),1024" % port.name, vm, 10)
+
+
+    def clean_ports(vm, consoles):
+        """
+        Clean state of all ports and set port to default state.
+        Default state:
+           No data on port or in port buffer.
+           Read mode = blocking.
+
+        @param vm: Target virtual machine [vm, session, tmp_dir, ser_session].
+        @param consoles: Consoles which should be clean.
+        """
+        # Check if python is still alive
+        print "CLEANING"
+        match, tmp = _on_guest("is_alive()", vm, 10)
+        if (match == None) or (match != 0):
+            logging.error("Python died/is stuck/has remaining threads")
+            logging.debug(tmp)
+            try:
+                if vm[4] == True:
+                    raise error.TestFail("Kernel crash.")
+                match, tmp = _on_guest("guest_exit()", vm, 10)
+                if (match == None) or (match == 0):
+                    vm[1].close()
+                vm[1] = vm[0].wait_for_login(timeout=float(params.get("boot_timeout", 240)))
+                on_guest("killall -9 python "
+                         "&& echo -n PASS: python killed"
+                         "|| echo -n PASS: python died",
+                         vm, 10)
+
+                init_guest(vm, consoles)
+                _clean_ports(vm, consoles)
+
+            except (error.TestFail, kvm_subprocess.ExpectError,
+                    Exception), inst:
+                logging.error(inst)
+                logging.error("Virtio-console driver is irreparably"
+                              " blocked. Every comd end with sig KILL."
+                              "Try reboot vm for continue in testing.")
+                vm[1] = vm[0].reboot(vm[1], "system_reset")
+                init_guest(vm, consoles)
+                match = _on_guest("virt.clean_port('%s'),1024" %
+                                      consoles[0][0].name, vm, 10)[0]
+
+                if (match == None) or (match != 0):
+                    raise error.TestFail("Virtio-console driver is irrepar"
+                                         "ably blocked. Every comd end"
+                                         " with sig KILL. Neither the "
+                                         "restart did not help.")
+                _clean_ports(vm, consoles)
+
+
+    def test_smoke(test, vm, consoles, params):
+        """
+        Virtio console smoke test.
+
+        Tests the basic functionalities (poll, read/write with and without
+        connected host, etc.
+
+        @param test: Main test object.
+        @param vm: Target virtual machine [vm, session, tmp_dir, ser_session].
+        @param consoles: Field of virtio ports with the minimum of 2 items.
+        @param params: Test parameters '$console_type:$data;...'
+        """
+        # PREPARE
+        for param in params.split(';'):
+            if not param:
+                continue
+            headline = "test_smoke: params: %s" % (param)
+            logging.info(headline)
+            param = param.split(':')
+            if len(param) > 1:
+                data = param[1]
+            else:
+                data = "Smoke test data"
+            param = (param[0] == 'serialport')
+            send_pt = consoles[param][0]
+            recv_pt = consoles[param][1]
+            test.headline(headline)
+            test.do_test(topen, [vm, send_pt], True)
+            test.do_test(tclose, [vm, send_pt], True)
+            test.do_test(tmulti_open, [vm, send_pt])
+            test.do_test(tpooling, [vm, send_pt])
+            test.do_test(tsigio, [vm, send_pt])
+            test.do_test(tlseek, [vm, send_pt])
+            test.do_test(trw_host_offline, [vm, send_pt])
+            test.do_test(trw_nonblocking_mode, [vm, send_pt])
+            test.do_test(trw_blocking_mode, [vm, send_pt])
+            test.do_test(tbasic_loopback, [vm, send_pt, recv_pt, data], True)
+
+
+    def test_multiport(test, vm, consoles, params):
+        """
+        This is group of test which test virtio_console in maximal load and
+        with multiple ports.
+
+        @param test: Main test object.
+        @param vm: Target virtual machine [vm, session, tmp_dir, ser_session].
+        @param consoles: Field of virtio ports with the minimum of 2 items.
+        @param params: Test parameters '$console_type:$data;...'
+        """
+        test.headline("test_multiport:")
+        #Test Loopback
+        test.do_test(tloopback, [vm, consoles, params[0]])
+
+        #Test Performance
+        test.do_test(tperf, [vm, consoles, params[1]])
 
 
     # INITIALIZE
-    test_smoke_params = params.get('virtio_console_smoke', '')
-    test_loopback_params = params.get('virtio_console_loopback', '')
-    test_perf_params = params.get('virtio_console_perf', '')
+
+    tsmoke_params = params.get('virtio_console_smoke', '')
+    tloopback_params = params.get('virtio_console_loopback', '')
+    tperf_params = params.get('virtio_console_perf', '')
 
     no_serialports = 0
     no_consoles = 0
     # consoles required for Smoke test
-    if (test_smoke_params.count('serialport')):
+    if (tsmoke_params.count('serialport')):
         no_serialports = max(2, no_serialports)
-    if (test_smoke_params.count('console')):
+    if (tsmoke_params.count('console')):
         no_consoles = max(2, no_consoles)
     # consoles required for Loopback test
-    for param in test_loopback_params.split(';'):
+    for param in tloopback_params.split(';'):
         no_serialports = max(no_serialports, param.count('serialport'))
         no_consoles = max(no_consoles, param.count('console'))
     # consoles required for Performance test
-    if (test_perf_params.count('serialport')):
+    if (tperf_params.count('serialport')):
         no_serialports = max(1, no_serialports)
-    if (test_perf_params.count('console')):
+    if (tperf_params.count('console')):
         no_consoles = max(1, no_consoles)
 
     if (no_serialports + no_consoles) == 0:
@@ -924,28 +1290,34 @@
 
     vm, consoles = _vm_create(no_consoles, no_serialports)
 
-    # Copy allocator.py into guests
+    # Copy virtio_console_guest.py into guests
     pwd = os.path.join(os.environ['AUTODIR'], 'tests/kvm')
-    vksmd_src = os.path.join(pwd, "scripts/virtio_guest.py")
+    vksmd_src = os.path.join(pwd, "scripts/virtio_console_guest.py")
     dst_dir = "/tmp"
-    if not vm[0].copy_files_to(vksmd_src, dst_dir):
-        raise error.TestFail("copy_files_to failed %s" % vm[0].name)
+    vm[0].copy_files_to(vksmd_src, dst_dir)
 
     # ACTUAL TESTING
     # Defines all available consoles; tests udev and sysfs
-    conss = []
-    for mode in consoles:
-        for cons in mode:
-            conss.append(cons[1:3])
-    init_guest(vm, 10)
-    on_guest("virt.init(%s)" % (conss), vm, 10)
 
-    consoles = test_smoke(vm, consoles, test_smoke_params)
-    test_loopback(vm, consoles, test_loopback_params)
-    test_perf(vm, consoles, test_perf_params)
+    test = SubTest()
+    try:
+        init_guest(vm, consoles)
+
+        test.set_cleanup_func(clean_ports, [vm, consoles])
+        #Test Smoke
+        test_smoke(test, vm, consoles, tsmoke_params)
+
+        #Test multiport functionality and performance.
+        test_multiport(test, vm, consoles, [tloopback_params, tperf_params])
+    finally:
+        logging.info(("Summary: %d tests passed  %d test failed :\n" %
+                      (test.passed, test.failed)) + test.get_text_result())
+
+    if test.is_failed():
+        raise error.TestFail("Virtio_console test FAILED.")
+
 
     # CLEANUP
     vm[1].close()
     vm[0].destroy(gracefully=False)
     shutil.rmtree(vm[2])
-
diff --git a/client/tests/kvm/tests/vlan.py b/client/tests/kvm/tests/vlan.py
index f41ea6a..b7cfda2 100644
--- a/client/tests/kvm/tests/vlan.py
+++ b/client/tests/kvm/tests/vlan.py
@@ -1,6 +1,7 @@
 import logging, time, re
 from autotest_lib.client.common_lib import error
-import kvm_test_utils, kvm_utils
+import kvm_test_utils, kvm_utils, kvm_subprocess
+
 
 def run_vlan(test, params, env):
     """
@@ -18,7 +19,6 @@
     @param params: Dictionary with the test parameters.
     @param env: Dictionary with test environment.
     """
-
     vm = []
     session = []
     ifname = []
@@ -31,28 +31,26 @@
     maximal = int(params.get("maximal"))
     file_size = params.get("file_size")
 
-    vm.append(kvm_test_utils.get_living_vm(env, params.get("main_vm")))
-    vm.append(kvm_test_utils.get_living_vm(env, "vm2"))
+    vm.append(env.get_vm(params["main_vm"]))
+    vm.append(env.get_vm("vm2"))
+    for vm_ in vm:
+        vm_.verify_alive()
 
-    def add_vlan(session, id, iface="eth0"):
-        if session.get_command_status("vconfig add %s %s" % (iface, id)) != 0:
-            raise error.TestError("Fail to add %s.%s" % (iface, id))
+    def add_vlan(session, v_id, iface="eth0"):
+        session.cmd("vconfig add %s %s" % (iface, v_id))
 
-    def set_ip_vlan(session, id, ip, iface="eth0"):
-        iface = "%s.%s" % (iface, id)
-        if session.get_command_status("ifconfig %s %s" % (iface, ip)) != 0:
-            raise error.TestError("Fail to configure ip for %s" % iface)
+    def set_ip_vlan(session, v_id, ip, iface="eth0"):
+        iface = "%s.%s" % (iface, v_id)
+        session.cmd("ifconfig %s %s" % (iface, ip))
 
     def set_arp_ignore(session, iface="eth0"):
         ignore_cmd = "echo 1 > /proc/sys/net/ipv4/conf/all/arp_ignore"
-        if session.get_command_status(ignore_cmd) != 0:
-            raise error.TestError("Fail to set arp_ignore of %s" % session)
+        session.cmd(ignore_cmd)
 
-    def rem_vlan(session, id, iface="eth0"):
+    def rem_vlan(session, v_id, iface="eth0"):
         rem_vlan_cmd = "if [[ -e /proc/net/vlan/%s ]];then vconfig rem %s;fi"
-        iface = "%s.%s" % (iface, id)
-        s = session.get_command_status(rem_vlan_cmd % (iface, iface))
-        return s
+        iface = "%s.%s" % (iface, v_id)
+        return session.cmd_status(rem_vlan_cmd % (iface, iface))
 
     def nc_transfer(src, dst):
         nc_port = kvm_utils.find_free_port(1025, 5334, vm_ip[dst])
@@ -65,27 +63,26 @@
         time.sleep(2)
         #send file from src to dst
         send_cmd = send_cmd % (vlan_ip[dst], str(nc_port), "file")
-        if session[src].get_command_status(send_cmd, timeout = 60) != 0:
-            raise error.TestFail ("Fail to send file"
-                                    " from vm%s to vm%s" % (src+1, dst+1))
-        s, o = session[dst].read_up_to_prompt(timeout=60)
-        if s != True:
+        session[src].cmd(send_cmd, timeout=60)
+        try:
+            session[dst].read_up_to_prompt(timeout=60)
+        except kvm_subprocess.ExpectError:
             raise error.TestFail ("Fail to receive file"
                                     " from vm%s to vm%s" % (src+1, dst+1))
         #check MD5 message digest of receive file in dst
-        output = session[dst].get_command_output("md5sum receive").strip()
+        output = session[dst].cmd_output("md5sum receive").strip()
         digest_receive = re.findall(r'(\w+)', output)[0]
         if digest_receive == digest_origin[src]:
-            logging.info("file succeed received in vm %s" % vlan_ip[dst])
+            logging.info("file succeed received in vm %s", vlan_ip[dst])
         else:
-            logging.info("digest_origin is  %s" % digest_origin[src])
-            logging.info("digest_receive is %s" % digest_receive)
+            logging.info("digest_origin is  %s", digest_origin[src])
+            logging.info("digest_receive is %s", digest_receive)
             raise error.TestFail("File transfered differ from origin")
-        session[dst].get_command_status("rm -f receive")
+        session[dst].cmd_output("rm -f receive")
 
     for i in range(2):
-        session.append(kvm_test_utils.wait_for_login(vm[i],
-                       timeout=int(params.get("login_timeout", 360))))
+        session.append(vm[i].wait_for_login(
+            timeout=int(params.get("login_timeout", 360))))
         if not session[i] :
             raise error.TestError("Could not log into guest(vm%d)" % i)
         logging.info("Logged in")
@@ -97,22 +94,16 @@
 
         #produce sized file in vm
         dd_cmd = "dd if=/dev/urandom of=file bs=1024k count=%s"
-        if session[i].get_command_status(dd_cmd % file_size) != 0:
-            raise error.TestFail("File producing failed")
+        session[i].cmd(dd_cmd % file_size)
         #record MD5 message digest of file
-        s, output =session[i].get_command_status_output("md5sum file",
-                                                        timeout=60)
-        if s != 0:
-            raise error.TestFail("File MD5_checking failed" )
+        output = session[i].cmd("md5sum file", timeout=60)
         digest_origin.append(re.findall(r'(\w+)', output)[0])
 
         #stop firewall in vm
-        session[i].get_command_status("/etc/init.d/iptables stop")
+        session[i].cmd_output("/etc/init.d/iptables stop")
 
         #load 8021q module for vconfig
-        load_8021q_cmd = "modprobe 8021q"
-        if session[i].get_command_status(load_8021q_cmd) != 0:
-            raise error.TestError("Fail to load 8021q module on VM%s" % i)
+        session[i].cmd("modprobe 8021q")
 
     try:
         for i in range(2):
@@ -123,7 +114,7 @@
             set_arp_ignore(session[i], ifname[i])
 
         for vlan in range(1, vlan_num+1):
-            logging.info("Test for vlan %s" % vlan)
+            logging.info("Test for vlan %s", vlan)
 
             logging.info("Ping between vlans")
             interface = ifname[0] + '.' + str(vlan)
@@ -146,15 +137,14 @@
                 # we must use a dedicated session becuase the kvm_subprocess
                 # does not have the other method to interrupt the process in
                 # the guest rather than close the session.
-                session_flood = kvm_test_utils.wait_for_login(vm[src],
-                                                              timeout = 60)
+                session_flood = vm[src].wait_for_login(timeout=60)
                 kvm_test_utils.ping(vlan_ip[dst], flood=True,
                                    interface=ifname[src],
                                    session=session_flood, timeout=10)
                 session_flood.close()
 
-            flood_ping(0,1)
-            flood_ping(1,0)
+            flood_ping(0, 1)
+            flood_ping(1, 0)
 
             logging.info("Transfering data through nc")
             nc_transfer(0, 1)
@@ -164,7 +154,7 @@
         for vlan in range(1, vlan_num+1):
             rem_vlan(session[0], vlan, ifname[0])
             rem_vlan(session[1], vlan, ifname[1])
-            logging.info("rem vlan: %s" % vlan)
+            logging.info("rem vlan: %s", vlan)
 
     # Plumb/unplumb maximal unber of vlan interfaces
     i = 1
diff --git a/client/tests/kvm/tests/vmstop.py b/client/tests/kvm/tests/vmstop.py
new file mode 100644
index 0000000..74ecb23
--- /dev/null
+++ b/client/tests/kvm/tests/vmstop.py
@@ -0,0 +1,83 @@
+import logging, time, os
+from autotest_lib.client.common_lib import error
+from autotest_lib.client.bin import utils
+import kvm_utils
+
+
+def run_vmstop(test, params, env):
+    """
+    KVM guest stop test:
+    1) Log into a guest
+    2) Copy a file into guest
+    3) Stop guest
+    4) Check the status through monitor
+    5) Check the session
+    6) Migrat the vm to a file twice and compare them.
+
+    @param test: kvm test object
+    @param params: Dictionary with the test parameters
+    @param env: Dictionary with test environment.
+    """
+    vm = env.get_vm(params["main_vm"])
+    vm.verify_alive()
+    timeout = float(params.get("login_timeout", 240))
+    session = vm.wait_for_login(timeout=timeout)
+
+    save_path = params.get("save_path", "/tmp")
+    clean_save = params.get("clean_save") == "yes"
+    save1 = os.path.join(save_path, "save1")
+    save2 = os.path.join(save_path, "save2")
+
+    guest_path = params.get("guest_path", "/tmp")
+    file_size = params.get("file_size", "1000")
+
+    try:
+        utils.run("dd if=/dev/zero of=/tmp/file bs=1M count=%s" % file_size)
+        # Transfer file from host to guest, we didn't expect the finish of
+        # transfer, we just let it to be a kind of stress in guest.
+        bg = kvm_utils.Thread(vm.copy_files_to, ("/tmp/file", guest_path),
+                              dict(verbose=True, timeout=60))
+        logging.info("Start the background transfer")
+        bg.start()
+
+        try:
+            # wait for the transfer start
+            time.sleep(5)
+            logging.info("Stop the VM")
+            vm.monitor.cmd("stop")
+
+            # check with monitor
+            logging.info("Check the status through monitor")
+            if "paused" not in vm.monitor.info("status"):
+                raise error.TestFail("Guest did not pause after sending stop")
+
+            # check through session
+            logging.info("Check the session")
+            if session.is_responsive():
+                raise error.TestFail("Session still alive after sending stop")
+
+            # Check with the migration file
+            logging.info("Save and check the state files")
+            for p in [save1, save2]:
+                vm.save_to_file(p)
+                time.sleep(1)
+                if not os.path.isfile(p):
+                    raise error.TestFail("VM failed to save state file %s" % p)
+
+            # Fail if we see deltas
+            md5_save1 = utils.hash_file(save1)
+            md5_save2 = utils.hash_file(save2)
+            if md5_save1 != md5_save2:
+                raise error.TestFail("The produced state files differ")
+        finally:
+            bg.join(suppress_exception=True)
+
+    finally:
+        session.close()
+        if clean_save:
+            logging.debug("Clean the state files")
+            if os.path.isfile(save1):
+                os.remove(save1)
+            if os.path.isfile(save2):
+                os.remove(save2)
+        vm.monitor.cmd("cont")
diff --git a/client/tests/kvm/tests/whql_client_install.py b/client/tests/kvm/tests/whql_client_install.py
index 84b91bc..f5d725d 100644
--- a/client/tests/kvm/tests/whql_client_install.py
+++ b/client/tests/kvm/tests/whql_client_install.py
@@ -1,6 +1,6 @@
-import logging, time, os, re
+import logging, time, os
 from autotest_lib.client.common_lib import error
-import kvm_subprocess, kvm_test_utils, kvm_utils, rss_file_transfer
+import kvm_test_utils, kvm_utils, rss_file_transfer
 
 
 def run_whql_client_install(test, params, env):
@@ -13,13 +13,17 @@
     5) Move the client machine into the server's workgroup
     6) Reboot the client machine
     7) Install the DTM client software
+    8) Setup auto logon for the user created by the installation
+       (normally DTMLLUAdminUser)
+    9) Reboot again
 
     @param test: kvm test object
     @param params: Dictionary with the test parameters
     @param env: Dictionary with test environment.
     """
-    vm = kvm_test_utils.get_living_vm(env, params.get("main_vm"))
-    session = kvm_test_utils.wait_for_login(vm, 0, 240)
+    vm = env.get_vm(params["main_vm"])
+    vm.verify_alive()
+    session = vm.wait_for_login(timeout=int(params.get("login_timeout", 360)))
 
     # Collect test params
     server_address = params.get("server_address")
@@ -29,6 +33,8 @@
                                     "Microsoft Driver Test Manager\\Studio")
     server_username = params.get("server_username")
     server_password = params.get("server_password")
+    client_username = params.get("client_username")
+    client_password = params.get("client_password")
     dsso_delete_machine_binary = params.get("dsso_delete_machine_binary",
                                             "deps/whql_delete_machine_15.exe")
     dsso_delete_machine_binary = kvm_utils.get_path(test.bindir,
@@ -50,52 +56,50 @@
     server_session = kvm_utils.remote_login("nc", server_address,
                                             server_shell_port, "", "",
                                             session.prompt, session.linesep)
+    server_session.set_status_test_command(session.status_test_command)
 
     # Get server and client information
     cmd = "echo %computername%"
-    server_name = server_session.get_command_output(cmd).strip()
-    client_name = session.get_command_output(cmd).strip()
+    server_name = server_session.cmd_output(cmd).strip()
+    client_name = session.cmd_output(cmd).strip()
     cmd = "wmic computersystem get domain"
-    server_workgroup = server_session.get_command_output(cmd).strip()
+    server_workgroup = server_session.cmd_output(cmd).strip()
     server_workgroup = server_workgroup.splitlines()[-1]
     regkey = r"HKLM\SYSTEM\CurrentControlSet\Services\Tcpip\Parameters"
     cmd = "reg query %s /v Domain" % regkey
-    o = server_session.get_command_output(cmd).strip().splitlines()[-1]
+    o = server_session.cmd_output(cmd).strip().splitlines()[-1]
     try:
         server_dns_suffix = o.split(None, 2)[2]
     except IndexError:
         server_dns_suffix = ""
 
     # Delete the client machine from the server's data store (if it's there)
-    server_session.get_command_output("cd %s" % server_studio_path)
+    server_session.cmd("cd %s" % server_studio_path)
     cmd = "%s %s %s" % (os.path.basename(dsso_delete_machine_binary),
                         server_name, client_name)
-    server_session.get_command_output(cmd, print_func=logging.info)
+    server_session.cmd(cmd, print_func=logging.info)
     server_session.close()
 
     # Rename the client machine
     client_name = "autotest_%s" % kvm_utils.generate_random_string(4)
-    logging.info("Renaming client machine to '%s'" % client_name)
+    logging.info("Renaming client machine to '%s'", client_name)
     cmd = ('wmic computersystem where name="%%computername%%" rename name="%s"'
            % client_name)
-    if session.get_command_status(cmd, timeout=600) != 0:
-        raise error.TestError("Could not rename the client machine")
+    session.cmd(cmd, timeout=600)
 
     # Join the server's workgroup
-    logging.info("Joining workgroup '%s'" % server_workgroup)
+    logging.info("Joining workgroup '%s'", server_workgroup)
     cmd = ('wmic computersystem where name="%%computername%%" call '
            'joindomainorworkgroup name="%s"' % server_workgroup)
-    if session.get_command_status(cmd, timeout=600) != 0:
-        raise error.TestError("Could not change the client's workgroup")
+    session.cmd(cmd, timeout=600)
 
     # Set the client machine's DNS suffix
-    logging.info("Setting DNS suffix to '%s'" % server_dns_suffix)
+    logging.info("Setting DNS suffix to '%s'", server_dns_suffix)
     cmd = 'reg add %s /v Domain /d "%s" /f' % (regkey, server_dns_suffix)
-    if session.get_command_status(cmd, timeout=300) != 0:
-        raise error.TestError("Could not set the client's DNS suffix")
+    session.cmd(cmd, timeout=300)
 
     # Reboot
-    session = kvm_test_utils.reboot(vm, session)
+    session = vm.reboot(session)
 
     # Access shared resources on the server machine
     logging.info("Attempting to access remote share on server")
@@ -103,9 +107,11 @@
                                          server_password)
     end_time = time.time() + 120
     while time.time() < end_time:
-        s = session.get_command_status(cmd)
-        if s == 0:
+        try:
+            session.cmd(cmd)
             break
+        except:
+            pass
         time.sleep(5)
     else:
         raise error.TestError("Could not access server share from client "
@@ -114,7 +120,17 @@
     # Install
     logging.info("Installing DTM client (timeout=%ds)", install_timeout)
     install_cmd = r"cmd /c \\%s\%s" % (server_name, install_cmd.lstrip("\\"))
-    if session.get_command_status(install_cmd, timeout=install_timeout) != 0:
-        raise error.TestError("Client installation failed")
+    session.cmd(install_cmd, timeout=install_timeout)
 
+    # Setup auto logon
+    logging.info("Setting up auto logon for user '%s'", client_username)
+    cmd = ('reg add '
+           '"HKLM\\Software\\Microsoft\\Windows NT\\CurrentVersion\\winlogon" '
+           '/v "%s" /d "%s" /t REG_SZ /f')
+    session.cmd(cmd % ("AutoAdminLogon", "1"))
+    session.cmd(cmd % ("DefaultUserName", client_username))
+    session.cmd(cmd % ("DefaultPassword", client_password))
+
+    # Reboot one more time
+    session = vm.reboot(session)
     session.close()
diff --git a/client/tests/kvm/tests/whql_submission.py b/client/tests/kvm/tests/whql_submission.py
index 1fe27c9..c3621c4 100644
--- a/client/tests/kvm/tests/whql_submission.py
+++ b/client/tests/kvm/tests/whql_submission.py
@@ -1,25 +1,38 @@
-import logging, time, os, re
+import logging, os, re
 from autotest_lib.client.common_lib import error
-import kvm_subprocess, kvm_test_utils, kvm_utils, rss_file_transfer
+import kvm_subprocess, kvm_utils, rss_file_transfer
 
 
 def run_whql_submission(test, params, env):
     """
     WHQL submission test:
-    1) Log into the guest (the client machine) and into a DTM server machine
+    1) Log into the client machines and into a DTM server machine
     2) Copy the automation program binary (dsso_test_binary) to the server machine
     3) Run the automation program
     4) Pass the program all relevant parameters (e.g. device_data)
     5) Wait for the program to terminate
     6) Parse and report job results
-    (logs and HTML reports are placed in test.bindir)
+    (logs and HTML reports are placed in test.debugdir)
 
     @param test: kvm test object
     @param params: Dictionary with the test parameters
     @param env: Dictionary with test environment.
     """
-    vm = kvm_test_utils.get_living_vm(env, params.get("main_vm"))
-    session = kvm_test_utils.wait_for_login(vm, 0, 240)
+    # Log into all client VMs
+    login_timeout = int(params.get("login_timeout", 360))
+    vms = []
+    sessions = []
+    for vm_name in params.objects("vms"):
+        vms.append(env.get_vm(vm_name))
+        vms[-1].verify_alive()
+        sessions.append(vms[-1].wait_for_login(timeout=login_timeout))
+
+    # Make sure all NICs of all client VMs are up
+    for vm in vms:
+        nics = vm.params.objects("nics")
+        for nic_index in range(len(nics)):
+            s = vm.wait_for_login(nic_index, 600)
+            s.close()
 
     # Collect parameters
     server_address = params.get("server_address")
@@ -30,41 +43,61 @@
     dsso_test_binary = params.get("dsso_test_binary",
                                   "deps/whql_submission_15.exe")
     dsso_test_binary = kvm_utils.get_path(test.bindir, dsso_test_binary)
-    test_device = params.get("test_device")
-    job_filter = params.get("job_filter", ".*")
+    dsso_delete_machine_binary = params.get("dsso_delete_machine_binary",
+                                            "deps/whql_delete_machine_15.exe")
+    dsso_delete_machine_binary = kvm_utils.get_path(test.bindir,
+                                                    dsso_delete_machine_binary)
     test_timeout = float(params.get("test_timeout", 600))
-    wtt_services = params.get("wtt_services")
 
-    # Restart WTT service(s) on the client
-    logging.info("Restarting WTT services on client")
-    for svc in wtt_services.split():
-        kvm_test_utils.stop_windows_service(session, svc)
-    for svc in wtt_services.split():
-        kvm_test_utils.start_windows_service(session, svc)
-
-    # Copy dsso_test_binary to the server
-    rss_file_transfer.upload(server_address, server_file_transfer_port,
-                             dsso_test_binary, server_studio_path, timeout=60)
+    # Copy dsso binaries to the server
+    for filename in dsso_test_binary, dsso_delete_machine_binary:
+        rss_file_transfer.upload(server_address, server_file_transfer_port,
+                                 filename, server_studio_path, timeout=60)
 
     # Open a shell session with the server
     server_session = kvm_utils.remote_login("nc", server_address,
                                             server_shell_port, "", "",
-                                            session.prompt, session.linesep)
+                                            sessions[0].prompt,
+                                            sessions[0].linesep)
+    server_session.set_status_test_command(sessions[0].status_test_command)
 
-    # Get the computer names of the server and client
+    # Get the computer names of the server and clients
     cmd = "echo %computername%"
-    server_name = server_session.get_command_output(cmd).strip()
-    client_name = session.get_command_output(cmd).strip()
-    session.close()
+    server_name = server_session.cmd_output(cmd).strip()
+    client_names = [session.cmd_output(cmd).strip() for session in sessions]
+
+    # Delete all client machines from the server's data store
+    server_session.cmd("cd %s" % server_studio_path)
+    for client_name in client_names:
+        cmd = "%s %s %s" % (os.path.basename(dsso_delete_machine_binary),
+                            server_name, client_name)
+        server_session.cmd(cmd, print_func=logging.debug)
+
+    # Reboot the client machines
+    sessions = kvm_utils.parallel((vm.reboot, (session,))
+                                  for vm, session in zip(vms, sessions))
+
+    # Check the NICs again
+    for vm in vms:
+        nics = vm.params.objects("nics")
+        for nic_index in range(len(nics)):
+            s = vm.wait_for_login(nic_index, 600)
+            s.close()
+
+    # Run whql_pre_command and close the sessions
+    if params.get("whql_pre_command"):
+        for session in sessions:
+            session.cmd(params.get("whql_pre_command"),
+                        int(params.get("whql_pre_command_timeout", 600)))
+            session.close()
 
     # Run the automation program on the server
-    server_session.get_command_output("cd %s" % server_studio_path)
+    pool_name = "%s_pool" % client_names[0]
+    submission_name = "%s_%s" % (client_names[0],
+                                 params.get("submission_name"))
     cmd = "%s %s %s %s %s %s" % (os.path.basename(dsso_test_binary),
-                                 server_name,
-                                 client_name,
-                                 "%s_pool" % client_name,
-                                 "%s_submission" % client_name,
-                                 test_timeout)
+                                 server_name, pool_name, submission_name,
+                                 test_timeout, " ".join(client_names))
     server_session.sendline(cmd)
 
     # Helper function: wait for a given prompt and raise an exception if an
@@ -78,40 +111,69 @@
             if errors:
                 raise error.TestError(errors[0])
             else:
-                raise error.TestError("Error running automation program: could "
-                                      "not find '%s' prompt" % prompt)
+                raise error.TestError("Error running automation program: "
+                                      "could not find '%s' prompt" % prompt)
 
     # Tell the automation program which device to test
     find_prompt("Device to test:")
-    server_session.sendline(test_device)
+    server_session.sendline(params.get("test_device"))
 
     # Tell the automation program which jobs to run
     find_prompt("Jobs to run:")
-    server_session.sendline(job_filter)
+    server_session.sendline(params.get("job_filter", ".*"))
 
-    # Give the automation program all the device data supplied by the user
+    # Set submission DeviceData
     find_prompt("DeviceData name:")
-    for dd in kvm_utils.get_sub_dict_names(params, "device_data"):
-        dd_params = kvm_utils.get_sub_dict(params, dd)
+    for dd in params.objects("device_data"):
+        dd_params = params.object_params(dd)
         if dd_params.get("dd_name") and dd_params.get("dd_data"):
             server_session.sendline(dd_params.get("dd_name"))
             server_session.sendline(dd_params.get("dd_data"))
     server_session.sendline()
 
-    # Give the automation program all the descriptor information supplied by
-    # the user
+    # Set submission descriptors
     find_prompt("Descriptor path:")
-    for desc in kvm_utils.get_sub_dict_names(params, "descriptors"):
-        desc_params = kvm_utils.get_sub_dict(params, desc)
+    for desc in params.objects("descriptors"):
+        desc_params = params.object_params(desc)
         if desc_params.get("desc_path"):
             server_session.sendline(desc_params.get("desc_path"))
     server_session.sendline()
 
+    # Set machine dimensions for each client machine
+    for vm_name in params.objects("vms"):
+        vm_params = params.object_params(vm_name)
+        find_prompt(r"Dimension name\b.*:")
+        for dp in vm_params.objects("dimensions"):
+            dp_params = vm_params.object_params(dp)
+            if dp_params.get("dim_name") and dp_params.get("dim_value"):
+                server_session.sendline(dp_params.get("dim_name"))
+                server_session.sendline(dp_params.get("dim_value"))
+        server_session.sendline()
+
+    # Set extra parameters for tests that require them (e.g. NDISTest)
+    for vm_name in params.objects("vms"):
+        vm_params = params.object_params(vm_name)
+        find_prompt(r"Parameter name\b.*:")
+        for dp in vm_params.objects("device_params"):
+            dp_params = vm_params.object_params(dp)
+            if dp_params.get("dp_name") and dp_params.get("dp_regex"):
+                server_session.sendline(dp_params.get("dp_name"))
+                server_session.sendline(dp_params.get("dp_regex"))
+                # Make sure the prompt appears again (if the device isn't found
+                # the automation program will terminate)
+                find_prompt(r"Parameter name\b.*:")
+        server_session.sendline()
+
     # Wait for the automation program to terminate
-    m, o = server_session.read_up_to_prompt(print_func=logging.info,
-                                            timeout=test_timeout + 300)
-    # (test_timeout + 300 is used here because the automation program is
-    # supposed to terminate cleanly on its own when test_timeout expires)
+    try:
+        o = server_session.read_up_to_prompt(print_func=logging.info,
+                                             timeout=test_timeout + 300)
+        # (test_timeout + 300 is used here because the automation program is
+        # supposed to terminate cleanly on its own when test_timeout expires)
+        done = True
+    except kvm_subprocess.ExpectError, e:
+        o = e.output
+        done = False
     server_session.close()
 
     # Look for test results in the automation program's output
@@ -151,38 +213,63 @@
                 except (KeyError, OSError):
                     pass
 
-    # Print result summary
-    logging.info("")
-    logging.info("Result summary:")
-    name_length = max(len(r.get("job", "")) for r in results)
-    fmt = "%%-6s %%-%ds %%-15s %%-8s %%-8s %%-8s %%-15s" % name_length
-    logging.info(fmt % ("ID", "Job", "Status", "Pass", "Fail", "NotRun",
-                        "NotApplicable"))
-    logging.info(fmt % ("--", "---", "------", "----", "----", "------",
-                        "-------------"))
-    for r in results:
-        logging.info(fmt % (r.get("id"), r.get("job"), r.get("status"),
-                            r.get("pass"), r.get("fail"), r.get("notrun"),
-                            r.get("notapplicable")))
-    logging.info("(see logs and HTML reports in %s)" % test.debugdir)
+    # Print result summary (both to the regular logs and to a file named
+    # 'summary' in test.debugdir)
+    def print_summary_line(f, line):
+        logging.info(line)
+        f.write(line + "\n")
+    if results:
+        # Make sure all results have the required keys
+        for r in results:
+            r["id"] = str(r.get("id"))
+            r["job"] = str(r.get("job"))
+            r["status"] = str(r.get("status"))
+            r["pass"] = int(r.get("pass", 0))
+            r["fail"] = int(r.get("fail", 0))
+            r["notrun"] = int(r.get("notrun", 0))
+            r["notapplicable"] = int(r.get("notapplicable", 0))
+        # Sort the results by failures and total test count in descending order
+        results = [(r["fail"],
+                    r["pass"] + r["fail"] + r["notrun"] + r["notapplicable"],
+                    r) for r in results]
+        results.sort(reverse=True)
+        results = [r[-1] for r in results]
+        # Print results
+        logging.info("")
+        logging.info("Result summary:")
+        name_length = max(len(r["job"]) for r in results)
+        fmt = "%%-6s %%-%ds %%-15s %%-8s %%-8s %%-8s %%-15s" % name_length
+        f = open(os.path.join(test.debugdir, "summary"), "w")
+        print_summary_line(f, fmt % ("ID", "Job", "Status", "Pass", "Fail",
+                                     "NotRun", "NotApplicable"))
+        print_summary_line(f, fmt % ("--", "---", "------", "----", "----",
+                                     "------", "-------------"))
+        for r in results:
+            print_summary_line(f, fmt % (r["id"], r["job"], r["status"],
+                                         r["pass"], r["fail"], r["notrun"],
+                                         r["notapplicable"]))
+        f.close()
+        logging.info("(see logs and HTML reports in %s)", test.debugdir)
 
-    # Kill the VM and fail if the automation program did not terminate on time
-    if not m:
-        vm.destroy()
+    # Kill the client VMs and fail if the automation program did not terminate
+    # on time
+    if not done:
+        kvm_utils.parallel(vm.destroy for vm in vms)
         raise error.TestFail("The automation program did not terminate "
                              "on time")
 
-    # Fail if there are failed or incomplete jobs (kill the VM if there are
-    # incomplete jobs)
-    failed_jobs = [r.get("job") for r in results
-                   if r.get("status", "").lower() == "investigate"]
-    running_jobs = [r.get("job") for r in results
-                    if r.get("status", "").lower() == "inprogress"]
+    # Fail if there are failed or incomplete jobs (kill the client VMs if there
+    # are incomplete jobs)
+    failed_jobs = [r["job"] for r in results
+                   if r["status"].lower() == "investigate"]
+    running_jobs = [r["job"] for r in results
+                    if r["status"].lower() == "inprogress"]
     errors = []
     if failed_jobs:
         errors += ["Jobs failed: %s." % failed_jobs]
     if running_jobs:
-        vm.destroy()
+        for vm in vms:
+            vm.destroy()
         errors += ["Jobs did not complete on time: %s." % running_jobs]
     if errors:
         raise error.TestFail(" ".join(errors))
diff --git a/client/tests/kvm/tests/yum_update.py b/client/tests/kvm/tests/yum_update.py
index 1a2bfae..7c9b96c 100644
--- a/client/tests/kvm/tests/yum_update.py
+++ b/client/tests/kvm/tests/yum_update.py
@@ -1,6 +1,4 @@
 import logging, time
-from autotest_lib.client.common_lib import error
-import kvm_subprocess, kvm_test_utils, kvm_utils
 
 
 def internal_yum_update(session, command, prompt, timeout):
@@ -16,8 +14,9 @@
     session.sendline(command)
     end_time = time.time() + timeout
     while time.time() < end_time:
-        (match, text) = session.read_until_last_line_matches(
-                        ["[Ii]s this [Oo][Kk]", prompt], timeout=timeout)
+        match = session.read_until_last_line_matches(
+                                                ["[Ii]s this [Oo][Kk]", prompt],
+                                                timeout=timeout)[0]
         if match == 0:
             logging.info("Got 'Is this ok'; sending 'y'")
             session.sendline("y")
@@ -38,9 +37,10 @@
     @param params: Dictionary with test parameters.
     @param env: Dictionary with the test environment.
     """
-    vm = kvm_test_utils.get_living_vm(env, params.get("main_vm"))
+    vm = env.get_vm(params["main_vm"])
+    vm.verify_alive()
     timeout = int(params.get("login_timeout", 360))
-    session = kvm_test_utils.wait_for_login(vm, timeout=timeout)
+    session = vm.wait_for_login(timeout=timeout)
 
     internal_yum_update(session, "yum update", params.get("shell_prompt"), 600)
     internal_yum_update(session, "yum update kernel",
diff --git a/client/tests/kvm/tests_base.cfg.sample b/client/tests/kvm/tests_base.cfg.sample
index eddc02b..c076963 100644
--- a/client/tests/kvm/tests_base.cfg.sample
+++ b/client/tests/kvm/tests_base.cfg.sample
@@ -61,7 +61,10 @@
 # Misc
 profilers = kvm_stat
 login_timeout = 360
+image_raw_device = no
 
+# NFS directory of guests' images
+images_good = 0.0.0.0:/autotest/images_good
 
 # Tests
 variants:
@@ -75,6 +78,12 @@
         kill_vm_timeout = 60
         kill_vm_timeout_on_error = 0
 
+    - image_copy:
+        type = image_copy
+        vms = ''
+        parallel = no
+        profilers =
+
     - setup:        install
         type = steps
         fail_if_stuck_for = 300
@@ -88,35 +97,33 @@
         kill_vm_gracefully = yes
         kill_vm_on_error = yes
         force_create_image = yes
-        pre_command += " scripts/unattended.py;"
         extra_params += " -boot d"
         guest_port_unattended_install = 12323
         kernel = vmlinuz
         initrd = initrd.img
         nic_mode = tap
+        # uncomment the following line to test the migration in parallel
+        # migrate_background = yes
+
         variants:
             # Install guest from cdrom 
             - cdrom:
                 medium = cdrom
                 nic_mode = user
                 redirs += " unattended_install"
-                kernel =
-                initrd = 
             # Install guest from http/ftp url
             - url:
                 medium = url
-                extra_params += " --append ks=floppy"
                 url = REPLACE_THIS_WITH_TREE_URL
             # Install guest from nfs nfs_server:nfs_dir
             - nfs:
                 medium = nfs
-                extra_params += " --append ks=floppy"
                 nfs_server = REPLACE_THIS_WITH_NFS_SERVER
                 nfs_dir = REPLACE_THIS_WITH_NFS_DIRECTORY
             # Install guest with a remote kickstart
             - remote_ks:
                 medium = url
-                extra_params += " --append ks=REPLACE_THIS_WITH_URL_OF_KS"
+                extra_params = " --append ks=REPLACE_THIS_WITH_URL_OF_KS"
                 url = REPLACE_THIS_WITH_TREE_URL
 
     - boot:         install setup unattended_install.cdrom
@@ -141,6 +148,9 @@
         iterations = 2
         used_mem = 1024
         mig_timeout = 3600
+        # you can uncomment the following line to enable the state
+        # check
+        # vmstate_check = yes
         variants:
             - tcp:
                 migration_protocol = "tcp"
@@ -150,7 +160,39 @@
                 migration_protocol = "exec"
             - mig_cancel:
                 migration_protocol = "tcp"
-                mig_cancel = True
+                mig_cancel = yes
+        variants:
+            - @default:
+            - with_reboot:
+                iterations = 1
+                type = migration_with_reboot
+            - with_file_transfer:
+                iterations = 1
+                type = migration_with_file_transfer
+            - with_autotest:
+                type = autotest
+                migrate_background = yes
+                test_timeout = 1800
+                variants:
+                    - dbench:
+                        test_control_file = dbench.control
+                    - stress:
+                        test_control_file = stress.control
+                    - monotonic_time:
+                        test_control_file = monotonic_time.control
+
+    - migrate_multi_host:      install setup unattended_install.cdrom
+        type = migration_multi_host
+        migration_test_command = help
+        migration_bg_command = "cd /tmp; nohup tcpdump -q -t ip host localhost"
+        migration_bg_check_command = pgrep tcpdump
+        migration_bg_kill_command = pkill tcpdump
+        kill_vm_on_error = yes
+        iterations = 2
+        used_mem = 1024
+        mig_timeout = 3600
+        comm_port = 13234
+        regain_ip_cmd = dhclient
 
     - boot_savevm: install setup unattended_install.cdrom
         type = boot_savevm
@@ -199,6 +241,9 @@
                 test_control_file = hwclock.control
             - rtc:
                 test_control_file = rtc.control
+            - iozone:
+                test_control_file = iozone.control
+
 
     - linux_s3:     install setup unattended_install.cdrom
         type = linux_s3
@@ -291,7 +336,7 @@
         reboot = yes
         variants:
             - autoit:
-                interpreter = D:\AutoIt3.exe
+                interpreter = "cmd /c D:\AutoIt3.exe"
                 variants:
                     - notepad:
                         guest_script = autoit/notepad1.au3
@@ -303,7 +348,7 @@
                         dst_rsc_dir = "C:\"
                         dst_rsc_path = "C:\autoit\stub\stub.au3"
             - powershell:
-                interpreter = "powershell.exe -File"
+                interpreter = "cmd /c powershell.exe -File"
                 variants:
                     - stub:
                         download = yes
@@ -317,7 +362,7 @@
         iozone_cmd = "D:\IOzone\iozone.exe -a"
         iozone_timeout = 3600
 
-    - @whql:         install setup unattended_install.cdrom
+    - whql:         install setup unattended_install.cdrom
         nic_mode = tap
         # Replace this with the address of an installed DTM server
         server_address = 10.20.30.40
@@ -327,11 +372,25 @@
         server_shell_port = 10022
         server_file_transfer_port = 10023
         server_studio_path = %programfiles%\Microsoft Driver Test Manager\Studio
+        dsso_test_binary = deps/whql_submission_15.exe
+        dsso_delete_machine_binary = deps/whql_delete_machine_15.exe
         wtt_services = wttsvc
         variants:
-            - whql_client_install:
+            - support_vm_install:
+                # The support VM is identical to the tested VM in every way
+                # except for the image name which ends with '-supportvm'.
+                type = unattended_install
+                extra_params += " -boot d"
+                force_create_image = yes
+                kill_vm = yes
+                nic_mode = user
+                redirs += " unattended_install"
+                guest_port_unattended_install = 12323
+                medium = cdrom
+                kernel =
+                initrd = 
+            - client_install:    support_vm_install
                 type = whql_client_install
-                dsso_delete_machine_binary = deps/whql_delete_machine_15.exe
                 # The username and password are required for accessing the DTM client
                 # installer binary shared by the server
                 server_username = administrator
@@ -340,12 +399,22 @@
                 # (the final cmd will be something like \\servername\DTMInstall\...)
                 install_cmd = \DTMInstall\Client\Setup.exe /passive
                 install_timeout = 3600
-            - whql_submission:    whql_client_install
+                # The test will setup auto logon on the client machine using the
+                # following username and password:
+                client_username = DTMLLUAdminUser
+                client_password = Testpassword,1
+                # (These are created by the DTM client installer and should probably not
+                # be changed.)
+                variants:
+                    - @original:
+                    - support_vm:
+            - submission:    client_install support_vm_install
                 type = whql_submission
                 extra_params += " -snapshot"
-                dsso_test_binary = deps/whql_submission_15.exe
+                restart_vm = yes
+                cdroms =
                 test_timeout = 3600
-                device_data = cat0 cat1 cat2 cat3 logoarch logoos whqlos whqlqual prog desc filter virt
+                device_data = cat0 cat1 cat2 cat3 prog desc virt filter logoarch logoos whqlos whqlqual
                 descriptors = desc1 desc2 desc3
                 # DeviceData names
                 dd_name_cat0     = Category
@@ -363,43 +432,105 @@
                 # Common DeviceData data
                 dd_data_filter   = FilterIfNoInf
                 dd_data_virt     = True
+                # Exclude jobs that have '(Manual)' in their names
+                job_filter = ^((?!\(Manual\)).)*$
                 variants:
-                    - keyboard:
-                        # test_device is a regular expression that should match a device's
-                        # name as it appears in device manager.  The first device that matches
-                        # is used.
-                        test_device = keyboard
-                        # Set timeout to 10 hours
-                        test_timeout = 36000
-                        dd_data_cat0 = Input\Keyboard
-                        dd_data_cat1 = Device Fundamentals
-                        dd_data_cat2 = System Fundamentals\Dynamic Partitioning
-                        dd_data_prog = InputKbd
-                        dd_data_desc = Input > Keyboard
-                    - hdd:
-                        test_device = qemu harddisk
-                        device_data += " ex0 ex1 ex2 ex3"
-                        dd_data_cat0 = Storage\Device Class\Disk\Disk
-                        dd_data_cat1 = Storage\Device Class\Disk\Fixed
-                        dd_data_cat2 = Storage\Device Class\Disk\Bus\ATA
-                        dd_data_cat3 = Device Fundamentals
-                        dd_data_prog = StorHDD
-                        dd_data_desc = Storage > Hard Disk Drive (HDD)
-                        dd_name_ex0 = Storage_bus_type
-                        dd_data_ex0 = ATA/ATAPI
-                        dd_name_ex1 = Hybrid_HDD_Support
-                        dd_data_ex1 = 0
-                        dd_name_ex2 = Non_Rotating_Media
-                        dd_data_ex2 = 0
-                        dd_name_ex3 = Secure_Storage
-                        dd_data_ex3 = 0
+                    - unclassified:
+                        dd_data_cat0 = Device Fundamentals
+                        dd_data_cat1 = System Fundamentals\Dynamic Partitioning
+                        dd_data_prog = Unclassified
+                        dd_data_desc = Unclassified
+                        dd_data_whqlqual = Unclassified Signature
                         variants:
-                            - full:
-                                # Yes, 100 hours, this is not a mistake
-                                test_timeout = 360000
-                            - syscache_test:
-                                job_filter = syscache test
-                                test_timeout = 7200
+                            - tablet:
+                                submission_name = tablet
+                                extra_params += " -usbdevice tablet"
+                                test_device = HID-compliant mouse
+                                test_timeout = 36000
+                    - device:
+                        variants:
+                            - keyboard:
+                                submission_name = keyboard
+                                # test_device is a regular expression that should match a device's
+                                # name as it appears in device manager.  The first device that matches
+                                # is used.
+                                test_device = keyboard
+                                # Set timeout to 10 hours
+                                test_timeout = 36000
+                                dd_data_cat0 = Input\Keyboard
+                                dd_data_cat1 = Device Fundamentals
+                                dd_data_cat2 = System Fundamentals\Dynamic Partitioning
+                                dd_data_prog = InputKbd
+                                dd_data_desc = Input > Keyboard
+                            - net:
+                                submission_name = net
+                                # Add a support machine and extra NICs
+                                vms += " supportvm"
+                                nics += " nic2 nic3"
+                                test_device = RTL8139.*NIC$
+                                test_timeout = 86400
+                                dd_data_cat0 = Network\LAN (Ethernet)
+                                dd_data_cat1 = Device Fundamentals
+                                dd_data_cat2 = System Fundamentals\Dynamic Partitioning
+                                dd_data_prog = NetLan
+                                dd_data_desc = Network > LAN (Ethernet)
+                                # Machine dimensions
+                                dimensions = testrole
+                                dim_name_testrole = NetDevice\TestRole
+                                dim_value_testrole_vm1 = NdistestLanClient
+                                dim_value_testrole_supportvm = NdistestLanServer
+                                # Device selection for the NDISTest client machine
+                                device_params_vm1 = testdev clientmsgdev clientsupportdev
+                                dp_name_testdev = NdistestLanClientTestDevice
+                                dp_regex_testdev = RTL8139.*NIC$
+                                dp_name_clientmsgdev = NdistestLanClientMessageDevice
+                                dp_regex_clientmsgdev = RTL8139.*NIC #2$
+                                dp_name_clientsupportdev = NdistestLanClientSupportDevice0
+                                dp_regex_clientsupportdev = RTL8139.*NIC #3$
+                                # Device selection for the NDISTest server machine
+                                device_params_supportvm = servermsgdev serversupportdev
+                                dp_name_servermsgdev = NdistestLanServerMessageDevice
+                                dp_regex_servermsgdev = RTL8139.*NIC$
+                                dp_name_serversupportdev = NdistestLanServerSupportDevice0
+                                dp_regex_serversupportdev = RTL8139.*NIC #2$
+                            - hdd:
+                                submission_name = hdd
+                                # Run the tests on a non-system drive
+                                # (match device names that contain 'QEMU HARDDISK' and do not contain '[C]')
+                                test_device = ^(?=.*?\bQEMU HARDDISK\b)((?!\[C\]).)*$
+                                device_data += " ex0 ex1 ex2 ex3"
+                                dd_data_cat0 = Storage\Device Class\Disk\Disk
+                                dd_data_cat1 = Storage\Device Class\Disk\Fixed
+                                dd_data_cat2 = Storage\Device Class\Disk\Bus\ATA
+                                dd_data_cat3 = Device Fundamentals
+                                dd_data_prog = StorHDD
+                                dd_data_desc = Storage > Hard Disk Drive (HDD)
+                                dd_name_ex0 = Storage_bus_type
+                                dd_data_ex0 = ATA/ATAPI
+                                dd_name_ex1 = Hybrid_HDD_Support
+                                dd_data_ex1 = 0
+                                dd_name_ex2 = Non_Rotating_Media
+                                dd_data_ex2 = 0
+                                dd_name_ex3 = Secure_Storage
+                                dd_data_ex3 = 0
+                                # Add a 2nd disk which will become D:
+                                images += " tmp"
+                                image_name_tmp = tmp
+                                image_size_tmp = 4G
+                                force_create_image_tmp = yes
+                                # Run diskpart to partition the 2nd disk
+                                whql_pre_command = "echo select disk=1 > dp.txt && "
+                                whql_pre_command += "echo create partition primary >> dp.txt && "
+                                whql_pre_command += "echo assign letter=d >> dp.txt && "
+                                whql_pre_command += "diskpart /s dp.txt & "
+                                whql_pre_command += "format d: /fs:ntfs /q /y"
+                                variants:
+                                    - full:
+                                        # Yes, 100 hours, this is not a mistake
+                                        test_timeout = 360000
+                                    - syscache_test:
+                                        job_filter = syscache test
+                                        test_timeout = 7200
 
     - guest_s4:     install setup unattended_install.cdrom
         type = guest_s4
@@ -412,7 +543,6 @@
         relogin_timeout = 240
 
     - nic_hotplug:  install setup unattended_install.cdrom
-        type = pci_hotplug
         pci_type = nic
         reference_cmd = lspci
         find_pci_cmd = 'lspci | tail -n1'
@@ -428,6 +558,12 @@
             - nic_e1000:
                 pci_model = e1000
                 match_string = "Gigabit Ethernet Controller"
+        variants:
+            - default:
+                type = pci_hotplug
+            - additional:
+                type = nic_hotplug
+
 
     - block_hotplug: install setup unattended_install.cdrom
         type = pci_hotplug
@@ -449,16 +585,34 @@
                 match_string = "Virtio block device"
             - block_scsi:
                 pci_model = scsi
-                match_string = "SCSI"
+                match_string = "LSI Logic"
         variants:
             - fmt_qcow2:
                 image_format_stg = qcow2
             - fmt_raw:
                 image_format_stg = raw
 
+    - enospc:
+        type = enospc
+        images += " stg"
+        drive_werror = stop
+        drive_cache = none
+        image_name_stg = enospc
+        image_format_stg = qcow2
+        image_boot_stg = no
+        image_snapshot_stg = no
+        check_image_stg = no
+        vgtest_name = vg_kvm_test_enospc
+        lvtest_name = lv_kvm_test_enospc
+        background_cmd = "nohup dd if=/dev/zero of=%s bs=1024 &"
+        kill_vm = yes
+
     - qmp_basic: install setup unattended_install.cdrom
         type = qmp_basic
 
+    - qmp_basic_rhel6: install setup unattended_install.cdrom
+        type = qmp_basic_rhel6
+
     - vlan:  install setup unattended_install.cdrom
         type = vlan
         # subnet should not be used by host
@@ -530,17 +684,41 @@
         type = netperf
         nic_mode = tap
         netperf_files = netperf-2.4.5.tar.bz2 wait_before_data.patch
+        packet_size = 1500
         setup_cmd = "cd %s && tar xvfj netperf-2.4.5.tar.bz2 && cd netperf-2.4.5 && patch -p0 < ../wait_before_data.patch && ./configure && make"
         netserver_cmd =  %s/netperf-2.4.5/src/netserver
-        # test time is 60 seconds, set the buffer size to 1 for more hardware interrupt
-        netperf_cmd = %s/netperf-2.4.5/src/netperf -t %s -H %s -l 60 -- -m 1
-        protocols = "TCP_STREAM TCP_MAERTS TCP_RR TCP_CRR UDP_RR TCP_SENDFILE UDP_STREAM"
+        variants:
+            - stream:
+                netperf_cmd = %s/netperf-2.4.5/src/netperf -t %s -H %s -l 60 -- -m %s
+                protocols = "TCP_STREAM TCP_MAERTS TCP_SENDFILE UDP_STREAM"
+            - rr:
+                netperf_cmd = %s/netperf-2.4.5/src/netperf -t %s -H %s -l 60 -- -r %s
+                protocols = "TCP_RR TCP_CRR UDP_RR"
 
     - ethtool: install setup unattended_install.cdrom
         type = ethtool
         filesize = 512
         nic_mode = tap
 
+    - nic_bonding:
+        type = nic_bonding
+        nics += ' nic2 nic3 nic4'
+        image_snapshot = yes
+        serial_login = yes
+        test_timeout = 1000
+        filesize = 4000
+        transfer_timeout = 1000
+        transfer_type = remote
+        kill_vm = yes
+
+    - set_link:
+        type = set_link
+        test_timeout = 1000
+        filesize = 4000
+        transfer_timeout = 1000
+        transfer_type = remote
+        kill_vm =yes
+
     - physical_resources_check: install setup unattended_install.cdrom
         type = physical_resources_check
         catch_uuid_cmd = dmidecode | awk -F: '/UUID/ {print $2}'
@@ -616,9 +794,18 @@
             - vmexit:
                 case = vmexit
 
+    - module_probe:
+        type = module_probe
+        # You can specify your own module list, though it is not needed usually.
+        # mod_list = kvm
+        load_count = 100
+        vms = ''
+        profilers = ''
+        take_regular_screendumps = no
+
     - ioquit:
         type = ioquit
-        background_cmd = "for i in 1 2 3 4; do (nohup dd if=/dev/urandom of=/tmp/file bs=102400 count=10000000 &) done"
+        background_cmd = "for i in 1 2 3 4; do (dd if=/dev/urandom of=/tmp/file bs=102400 count=10000000 &); done"
         check_cmd = ps -a |grep dd
         login_timeout = 360
 
@@ -664,6 +851,31 @@
                 image_name_snapshot1 = sn1
                 image_name_snapshot2 = sn2
 
+    - clock_getres: install setup unattended_install.cdrom
+        type = clock_getres
+
+    - yum_update:    install setup unattended_install.cdrom
+        type = yum_update
+        shell_prompt = "Is this ok"
+
+    - kdump: unattended_install.cdrom
+        type = kdump
+        # time waited for the completion of crash dump
+        # crash_timeout = 360
+        # command to add the crashkernel=X@Y to kernel cmd line
+        # kernel_param_cmd = "grubby --update-kernel=`grubby --default-kernel` --args=crashkernel=128M@64M"
+        # command to enable kdump service
+        # kdump_enable_cmd = chkconfig kdump on && service kdump start
+        # command to probe the crash kernel
+        # crash_kernel_prob_cmd = "grep -q 1 /sys/kernel/kexec_crash_loaded"
+
+    - vmstop:
+        type = vmstop
+        # the path used to store the saved vm state
+        # save_path = /tmp
+        # clean the state file?
+        clean_save = yes
+
     # system_powerdown, system_reset and shutdown *must* be the last ones
     # defined (in this order), since the effect of such tests can leave
     # the VM on a bad state.
@@ -704,13 +916,24 @@
         nic_model = virtio
         # You can add advanced attributes on nic_extra_params such as mrg_rxbuf
         #nic_extra_params =
-        # You can set vhost = yes to enable the vhost kernel backend
-        # (This only works if nic_mode=tap)
-        vhost = no
+        # You can add advanced attributes through netdev_extra_params
+        # such as sndbuf, as an example, you can uncomment the
+        # following lines to enable the vhost support ( only available
+        # for tap )
+        #netdev_extra_params = "vhost=on"
         jumbo:
             mtu = 65520
         ethtool:
             supported_features = "tx sg tso gso"
+        whql.submission.device.net:
+            test_device = VirtIO Ethernet Adapter$
+            # Device selection for the NDISTest client machine
+            dp_regex_testdev = VirtIO Ethernet Adapter$
+            dp_regex_clientmsgdev = VirtIO Ethernet Adapter #2$
+            dp_regex_clientsupportdev = VirtIO Ethernet Adapter #3$
+            # Device selection for the NDISTest server machine
+            dp_regex_servermsgdev = VirtIO Ethernet Adapter$
+            dp_regex_serversupportdev = VirtIO Ethernet Adapter #2$
 
 # Guests
 variants:
@@ -729,7 +952,7 @@
         mem_chk_cmd = dmidecode -t 17 | awk -F: '/Size/ {print $2}'
         mem_chk_cur_cmd = grep MemTotal /proc/meminfo
         cpu_chk_cmd = grep -c processor /proc/cpuinfo
-        unattended_install.cdrom:
+        unattended_install:
             # If you want to use floppy to hold kickstarts,
             # comment the 3 lines below
             cdroms += " unattended"
@@ -761,162 +984,222 @@
             - Fedora:
                 no setup
                 shell_prompt = "^\[.*\][\#\$]\s*$"
-                unattended_install.cdrom:
-                    pxe_dir = "images/pxeboot"
-                    pxe_image = "vmlinuz"
-                    pxe_initrd = "initrd.img"
-                    tftp = "images/tftpboot"
-                    bootp = "/pxelinux.0"
-                    extra_params += " -boot cn"
+                unattended_install:
+                    boot_path = "images/pxeboot"
                     # You have to use ks=floppy if you want to use floppies to
                     # hold your kickstart file
-                    #kernel_args = "ks=floppy nicdelay=60 console=ttyS0,115200 console=tty0"
-                    kernel_args = "ks=cdrom nicdelay=60 console=ttyS0,115200 console=tty0"
+                    #extra_params += " --append 'ks=floppy nicdelay=60 console=ttyS0,115200 console=tty0'"
+                    extra_params += " --append 'ks=cdrom nicdelay=60 console=ttyS0,115200 console=tty0'"
 
                 variants:
                     - 8.32:
                         no setup
                         image_name = fc8-32
-                        cdrom_cd1 = linux/Fedora-8-i386-DVD.iso
-                        md5sum = dd6c79fddfff36d409d02242e7b10189
-                        md5sum_1m = dabae451bb69fbbad0e505b25144b1f9
                         install:
                             steps = Fedora-8-i386.steps
-                        unattended_install.cdrom:
+                            cdrom_cd1 = isos/linux/Fedora-8-i386-DVD.iso
+                            md5sum_cd1 = dd6c79fddfff36d409d02242e7b10189
+                            md5sum_1m_cd1 = dabae451bb69fbbad0e505b25144b1f9
+                        unattended_install:
                             unattended_file = unattended/Fedora-8.ks
-                            tftp = images/f8-32/tftpboot
                             #floppy = images/f8-32/ks.vfd
                             cdrom_unattended = images/f8-32/ks.iso
+                            kernel = images/f8-32/vmlinuz
+                            initrd = images/f8-32/initrd.img
+                        unattended_install.cdrom:
+                            cdrom_cd1 = isos/linux/Fedora-8-i386-DVD.iso
+                            md5sum_cd1 = dd6c79fddfff36d409d02242e7b10189
+                            md5sum_1m_cd1 = dabae451bb69fbbad0e505b25144b1f9
 
                     - 8.64:
                         no setup
                         image_name = f8-64
-                        cdrom_cd1 = linux/Fedora-8-x86_64-DVD.iso
-                        md5sum = 2cb231a86709dec413425fd2f8bf5295
-                        md5sum_1m = 145f6414e19492649a56c89f0a45e719
                         install:
                             steps = Fedora-8-64.steps
-                        unattended_install.cdrom:
+                            cdrom_cd1 = isos/linux/Fedora-8-x86_64-DVD.iso
+                            md5sum_cd1 = 2cb231a86709dec413425fd2f8bf5295
+                            md5sum_1m_cd1 = 145f6414e19492649a56c89f0a45e719
+                        unattended_install:
                             unattended_file = unattended/Fedora-8.ks
-                            tftp = images/f8-64/tftpboot
                             #floppy = images/f8-64/ks.vfd
                             cdrom_unattended = images/f8-64/ks.iso
+                            kernel = images/f8-64/vmlinuz
+                            initrd = images/f8-64/initrd.img
+                        unattended_install.cdrom:
+                            cdrom_cd1 = isos/linux/Fedora-8-x86_64-DVD.iso
+                            md5sum_cd1 = 2cb231a86709dec413425fd2f8bf5295
+                            md5sum_1m_cd1 = 145f6414e19492649a56c89f0a45e719
 
                     - 9.32:
                         image_name = f9-32
-                        cdrom_cd1 = linux/Fedora-9-i386-DVD.iso
-                        md5sum = 72601f685ea8c808c303353d8bf4d307
-                        md5sum_1m = f24fa25689e5863f1b99984c6feb787f
                         install:
                             steps = Fedora-9-i386.steps
-                        unattended_install.cdrom:
+                            cdrom_cd1 = isos/linux/Fedora-9-i386-DVD.iso
+                            md5sum_cd1 = 72601f685ea8c808c303353d8bf4d307
+                            md5sum_1m_cd1 = f24fa25689e5863f1b99984c6feb787f
+                        unattended_install:
                             unattended_file = unattended/Fedora-9.ks
-                            tftp = images/f9-32/tftpboot
                             #floppy = images/f9-32/ks.vfd
                             cdrom_unattended = images/f9-32/ks.iso
+                            kernel = images/f9-32/vmlinuz
+                            initrd = images/f9-32/initrd.img
+                        unattended_install.cdrom:
+                            cdrom_cd1 = isos/linux/Fedora-9-i386-DVD.iso
+                            md5sum_cd1 = 72601f685ea8c808c303353d8bf4d307
+                            md5sum_1m_cd1 = f24fa25689e5863f1b99984c6feb787f
+
 
                     - 9.64:
                         image_name = f9-64
-                        cdrom_cd1 = linux/Fedora-9-x86_64-DVD.iso
-                        md5sum = 05b2ebeed273ec54d6f9ed3d61ea4c96
-                        md5sum_1m = 9822ab5097e37e8fe306ef2192727db4
                         install:
                             steps = Fedora-9-64.steps
-                        unattended_install.cdrom:
+                            cdrom_cd1 = isos/linux/Fedora-9-x86_64-DVD.iso
+                            md5sum_cd1 = 05b2ebeed273ec54d6f9ed3d61ea4c96
+                            md5sum_1m_cd1 = 9822ab5097e37e8fe306ef2192727db4
+                        unattended_install:
                             unattended_file = unattended/Fedora-9.ks
-                            tftp = images/f9-64/tftpboot
                             #floppy = images/f9-64/ks.vfd
                             cdrom_unattended = images/f9-64/ks.iso
+                            kernel = images/f9-64/vmlinuz
+                            initrd = images/f9-64/initrd.img
+                        unattended_install.cdrom:
+                            cdrom_cd1 = isos/linux/Fedora-9-x86_64-DVD.iso
+                            md5sum_cd1 = 05b2ebeed273ec54d6f9ed3d61ea4c96
+                            md5sum_1m_cd1 = 9822ab5097e37e8fe306ef2192727db4
+
 
                     - 10.32:
                         image_name = f10-32
-                        cdrom_cd1 = linux/Fedora-10-i386-DVD.iso
-                        md5sum = 27e581edb392728c4a07d00d3fc5ced0
-                        md5sum_1m = bd67c68bdf595e4ba7131ec702159181
-                        unattended_install.cdrom:
+                        unattended_install:
                             unattended_file = unattended/Fedora-10.ks
-                            tftp = images/f10-32/tftpboot
                             #floppy = images/f10-32/ks.vfd
                             cdrom_unattended = images/f10-32/ks.iso
+                            kernel = images/f10-32/vmlinuz
+                            initrd = images/f10-32/initrd.img
+                        unattended_install.cdrom:
+                            cdrom_cd1 = isos/linux/Fedora-10-i386-DVD.iso
+                            md5sum_cd1 = 27e581edb392728c4a07d00d3fc5ced0
+                            md5sum_1m_cd1 = bd67c68bdf595e4ba7131ec702159181
 
                     - 10.64:
                         image_name = f10-64
-                        cdrom_cd1 = linux/Fedora-10-x86_64-DVD.iso
-                        sha1sum = f1e5ae7db6a1ba227de7294c4112385922388648
-                        md5sum_1m = 732857cbf40c80c34683e874601d982c
-                        unattended_install.cdrom:
+                        unattended_install:
                             unattended_file = unattended/Fedora-10.ks
-                            tftp = images/f10-64/tftpboot
                             #floppy = images/f10-64/ks.vfd
                             cdrom_unattended = images/f10-64/ks.iso
+                            kernel = images/f10-64/vmlinuz
+                            initrd = images/f10-64/initrd.img
+                        unattended_install.cdrom:
+                            cdrom_cd1 = isos/linux/Fedora-10-x86_64-DVD.iso
+                            sha1sum_cd1 = f1e5ae7db6a1ba227de7294c4112385922388648
+                            md5sum_1m_cd1 = 732857cbf40c80c34683e874601d982c
 
                     - 11.32:
                         image_name = f11-32
-                        cdrom_cd1 = linux/Fedora-11-i386-DVD.iso
-                        md5sum = e3b1e2d1ba42aa4705fa5f41771b3927
-                        md5sum_1m = dc8ddf90648c247339c721395aa49714
                         install:
                             steps = Fedora-11-32.steps
-                        unattended_install.cdrom:
+                        unattended_install:
                             unattended_file = unattended/Fedora-11.ks
-                            tftp = images/f11-32/tftpboot
                             #floppy = images/f11-32/ks.vfd
                             cdrom_unattended = images/f11-32/ks.iso
+                            kernel = images/f11-32/vmlinuz
+                            initrd = images/f11-32/initrd.img
+                        unattended_install.cdrom:
+                            cdrom_cd1 = isos/linux/Fedora-11-i386-DVD.iso
+                            md5sum_cd1 = e3b1e2d1ba42aa4705fa5f41771b3927
+                            md5sum_1m_cd1 = dc8ddf90648c247339c721395aa49714
 
                     - 11.64:
                         image_name = f11-64
-                        cdrom_cd1 = linux/Fedora-11-x86_64-DVD.iso
-                        md5sum = 9d419844adeb93120215fe7505c9bce8
-                        md5sum_1m = 405ee05e2387a2e4328b008d5bcbdd1e
-                        unattended_install.cdrom:
+                        unattended_install:
                             unattended_file = unattended/Fedora-11.ks
-                            tftp = images/f11-64/tftpboot
                             #floppy = images/f11-64/ks.vfd
                             cdrom_unattended = images/f11-64/ks.iso
+                            kernel = images/f11-64/vmlinuz
+                            initrd = images/f11-64/initrd.img
+                        unattended_install.cdrom:
+                            cdrom_cd1 = isos/linux/Fedora-11-x86_64-DVD.iso
+                            md5sum_cd1 = 9d419844adeb93120215fe7505c9bce8
+                            md5sum_1m_cd1 = 405ee05e2387a2e4328b008d5bcbdd1e
 
                     - 12.32:
                         image_name = f12-32
-                        cdrom_cd1 = linux/Fedora-12-i386-DVD.iso
-                        md5sum = 2c4c1c0d09f2fbcfd8ee6a0c5542eeb2
-                        md5sum_1m = eee935d7f0cf2ef03f6ddce3a2a50050
-                        unattended_install.cdrom:
+                        unattended_install:
                             unattended_file = unattended/Fedora-12.ks
-                            tftp = images/f12-32/tftpboot
                             #floppy = images/f12-32/ks.vfd
                             cdrom_unattended = images/f12-32/ks.iso
+                            kernel = images/f12-32/vmlinuz
+                            initrd = images/f12-32/initrd.img
+                        unattended_install.cdrom:
+                            cdrom_cd1 = isos/linux/Fedora-12-i386-DVD.iso
+                            md5sum_cd1 = 2c4c1c0d09f2fbcfd8ee6a0c5542eeb2
+                            md5sum_1m_cd1 = eee935d7f0cf2ef03f6ddce3a2a50050
 
                     - 12.64:
                         image_name = f12-64
-                        cdrom_cd1 = linux/Fedora-12-x86_64-DVD.iso
-                        md5sum = 6dd31e292cc2eb1140544e9b1ba61c56
-                        md5sum_1m = 514efbd7698b55ff6768c8605438bfc5
-                        unattended_install.cdrom:
+                        unattended_install:
                             unattended_file = unattended/Fedora-12.ks
-                            tftp = images/f12-64/tftpboot
                             #floppy = images/f12-64/ks.vfd
                             cdrom_unattended = images/f12-64/ks.iso
+                            kernel = images/f12-64/vmlinuz
+                            initrd = images/f12-64/initrd.img
+                        unattended_install.cdrom:
+                            cdrom_cd1 = isos/linux/Fedora-12-x86_64-DVD.iso
+                            md5sum_cd1 = 6dd31e292cc2eb1140544e9b1ba61c56
+                            md5sum_1m_cd1 = 514efbd7698b55ff6768c8605438bfc5
 
                     - 13.32:
                         image_name = f13-32
-                        cdrom_cd1 = linux/Fedora-13-i386-DVD.iso
-                        md5sum = 212fec517c2629b4b5eaf3662ac13136
-                        md5sum_1m = 4e1578a6ed5a6e7cd03b8fb074030746
-                        unattended_install.cdrom:
+                        unattended_install:
                             unattended_file = unattended/Fedora-13.ks
-                            tftp = images/f13-32/tftpboot
                             #floppy = images/f13-32/ks.vfd
                             cdrom_unattended = images/f13-32/ks.iso
+                            kernel = images/f13-32/vmlinuz
+                            initrd = images/f13-32/initrd.img
+                        unattended_install.cdrom:
+                            cdrom_cd1 = isos/linux/Fedora-13-i386-DVD.iso
+                            md5sum_cd1 = 212fec517c2629b4b5eaf3662ac13136
+                            md5sum_1m_cd1 = 4e1578a6ed5a6e7cd03b8fb074030746
 
                     - 13.64:
                         image_name = f13-64
-                        cdrom_cd1 = linux/Fedora-13-x86_64-DVD.iso
-                        md5sum = 6fbae6379cf27f36e1f2c7827ba7dc35
-                        md5sum_1m = 68821b9de4d3b5975d6634334e7f47a6
-                        unattended_install.cdrom:
+                        unattended_install:
                             unattended_file = unattended/Fedora-13.ks
-                            tftp = images/f13-64/tftpboot
                             #floppy = images/f13-64/ks.vfd
                             cdrom_unattended = images/f13-64/ks.iso
+                            kernel = images/f13-64/vmlinuz
+                            initrd = images/f13-64/initrd.img
+                        unattended_install.cdrom:
+                            cdrom_cd1 = isos/linux/Fedora-13-x86_64-DVD.iso
+                            md5sum_cd1 = 6fbae6379cf27f36e1f2c7827ba7dc35
+                            md5sum_1m_cd1 = 68821b9de4d3b5975d6634334e7f47a6
+
+                    - 14.32:
+                        image_name = f14-32
+                        unattended_install:
+                            unattended_file = unattended/Fedora-14.ks
+                            #floppy = images/f14-32/ks.vfd
+                            cdrom_unattended = images/f14-32/ks.iso
+                            kernel = images/f14-32/vmlinuz
+                            initrd = images/f14-32/initrd.img
+                        unattended_install.cdrom:
+                            cdrom_cd1 = isos/linux/Fedora-14-i386-DVD.iso
+                            md5sum_cd1 = 1cc67641506d2f931d669b8d3528dded
+                            md5sum_1m_cd1 = d314ab126dabab686111e6a0d71d2e67
+
+                    - 14.64:
+                        image_name = f14-64
+                        unattended_install:
+                            unattended_file = unattended/Fedora-14.ks
+                            #floppy = images/f14-64/ks.vfd
+                            cdrom_unattended = images/f14-64/ks.iso
+                            kernel = images/f14-64/vmlinuz
+                            initrd = images/f14-64/initrd.img
+                        unattended_install.cdrom:
+                            cdrom_cd1 = isos/linux/Fedora-14-x86_64-DVD.iso
+                            md5sum_cd1 = f2ebf941dc45f99ee3e8a457c9544552
+                            md5sum_1m_cd1 = df029f9cffbc3517937a91124a1e0c3a
+
 
 
             - DSL-4.2.5:
@@ -924,199 +1207,232 @@
                 image_name = dsl-4.2.5
                 install:
                     steps = DSL-4.2.5.steps
-                    cdrom_cd1 = linux/dsl-4.2.5.iso
-                    md5sum = 61694888aede3e01229865b8e6acd4a1
-                    md5sum_1m = 527f2481bd25310f2e3a6e5345ff3d12
+                    cdrom_cd1 = isos/linux/dsl-4.2.5.iso
+                    md5sum_cd1 = 61694888aede3e01229865b8e6acd4a1
+                    md5sum_1m_cd1 = 527f2481bd25310f2e3a6e5345ff3d12
 
             - Mandriva-One-2007:
                 only install
                 image_name = mandriva-one-2007
                 steps = Mandriva-One-2007-32.steps
-                cdrom_cd1 = linux/mandriva-one-2007-i386.iso
-                md5sum = 7e9e183dc11b9d39f480238e4e12bb05
-                md5sum_1m = dc7865a75db665efc86d59bca7c1fe07
+                cdrom_cd1 = isos/linux/mandriva-one-2007-i386.iso
+                md5sum_cd1 = 7e9e183dc11b9d39f480238e4e12bb05
+                md5sum_1m_cd1 = dc7865a75db665efc86d59bca7c1fe07
 
             - OpenSUSE:
                 no setup
                 shell_prompt = ".*:.*\s#"
-                unattended_install.cdrom:
-                    pxe_image = "linux"
-                    pxe_initrd = "initrd"
-                    tftp = "images/tftpboot"
-                    bootp = "/pxelinux.0"
-                    extra_params += " -boot cn"
+                unattended_install:
                     # You have to use autoyast=floppy if you want to use floppies to
                     # hold your autoyast file
-                    kernel_args = "autoyast=floppy console=ttyS0,115200 console=tty0"
-                    #kernel_args = "autoyast=cdrom console=ttyS0,115200 console=tty0"
+                    extra_params += " --append 'autoyast=floppy console=ttyS0,115200 console=tty0'"
+                    #extra_params += " --append 'autoyast=cdrom console=ttyS0,115200 console=tty0'"
                     post_install_delay = 10
 
                 variants:
                     - 11.0.32:
                         image_name = openSUSE-11.0-32
-                        cdrom_cd1 = linux/openSUSE-11.0-DVD-i386.iso
-                        md5sum = ed6a5b3feb668866df812b1c2aed9d7f
-                        md5sum_1m = c720b30557af758e69de450409516369
                         install:
                             steps = openSUSE-11.0-32.steps
-                        unattended_install.cdrom:
+                            cdrom_cd1 = isos/linux/openSUSE-11.0-DVD-i386.iso
+                            md5sum_cd1 = ed6a5b3feb668866df812b1c2aed9d7f
+                            md5sum_1m_cd1 = c720b30557af758e69de450409516369
+                        unattended_install:
                             unattended_file = unattended/OpenSUSE-11.xml
-                            tftp = images/opensuse-11-0-32/tftpboot
                             floppy = images/opensuse-11-0-32/autoyast.vfd
                             #cdrom_unattended = images/opensuse-11-0-32/autoyast.iso
-                            pxe_dir = boot/i386/loader
+                            kernel = images/opensuse-11-0-32/linux
+                            initrd = images/opensuse-11-0-32/initrd
+                            boot_path = boot/i386/loader
+                        unattended_install.cdrom:
+                            cdrom_cd1 = isos/linux/openSUSE-11.0-DVD-i386.iso
+                            md5sum_cd1 = ed6a5b3feb668866df812b1c2aed9d7f
+                            md5sum_1m_cd1 = c720b30557af758e69de450409516369
+
 
                     - 11.0.64:
                         image_name = openSUSE-11.0-64
-                        cdrom_cd1 = linux/openSUSE-11.0-DVD-x86_64.iso
-                        md5sum = 512c8346b0f8eb35f28c4eb96454d391
-                        md5sum_1m = 661aa4cd031df2f25ea0102318a3f4d1
-                        unattended_install.cdrom:
+                        unattended_install:
                             unattended_file = unattended/OpenSUSE-11.xml
-                            tftp = images/opensuse-11-0-64/tftpboot
                             floppy = images/opensuse-11-0-64/autoyast.vfd
                             #cdrom_unattended = images/opensuse-11-0-64/autoyast.iso
-                            pxe_dir = boot/x86_64/loader
+                            kernel = images/opensuse-11-0-64/linux
+                            initrd = images/opensuse-11-0-64/initrd
+                            boot_path = boot/x86_64/loader
+                        unattended_install.cdrom:
+                            cdrom_cd1 = isos/linux/openSUSE-11.0-DVD-x86_64.iso
+                            md5sum_cd1 = 512c8346b0f8eb35f28c4eb96454d391
+                            md5sum_1m_cd1 = 661aa4cd031df2f25ea0102318a3f4d1
 
                     - 11.1.32:
                         image_name = openSUSE-11.1-32
-                        cdrom_cd1 = linux/openSUSE-11.1-DVD-i586.iso
-                        md5sum = 8f51b278c0415be28c5699e465444bd3
-                        md5sum_1m = b70217417468389083429f81ba7ce2bd
                         install:
-                            steps=openSUSE-11.1-32-and-64.steps
-                        unattended_install.cdrom:
+                            steps = openSUSE-11.1-32-and-64.steps
+                            cdrom_cd1 = isos/linux/openSUSE-11.1-DVD-i586.iso
+                            md5sum_cd1 = 8f51b278c0415be28c5699e465444bd3
+                            md5sum_1m_cd1 = b70217417468389083429f81ba7ce2bd
+                        unattended_install:
                             unattended_file = unattended/OpenSUSE-11.xml
-                            tftp = images/opensuse-11-1-32/tftpboot
                             floppy = images/opensuse-11-1-32/autoyast.vfd
                             #cdrom_unattended = images/opensuse-11-1-32/autoyast.iso
-                            pxe_dir = boot/i386/loader
+                            kernel = images/opensuse-11-1-32/linux
+                            initrd = images/opensuse-11-1-32/initrd
+                            boot_path = boot/i386/loader
+                        unattended_install.cdrom:
+                            cdrom_cd1 = isos/linux/openSUSE-11.1-DVD-i586.iso
+                            md5sum_cd1 = 8f51b278c0415be28c5699e465444bd3
+                            md5sum_1m_cd1 = b70217417468389083429f81ba7ce2bd
 
                     - 11.1.64:
                         image_name = openSUSE-11.1-64
-                        cdrom_cd1 = linux/openSUSE-11.1-DVD-x86_64.iso
-                        md5sum = 2afee1b8a87175e6dee2b8dbbd1ad8e8
-                        md5sum_1m = 768ca32503ef92c28f2d144f2a87e4d0
                         install:
                             steps=openSUSE-11.1-32-and-64.steps
-                        unattended_install.cdrom:
+                            cdrom_cd1 = isos/linux/openSUSE-11.1-DVD-x86_64.iso
+                            md5sum_cd1 = 2afee1b8a87175e6dee2b8dbbd1ad8e8
+                            md5sum_1m_cd1 = 768ca32503ef92c28f2d144f2a87e4d0
+                        unattended_install:
                             unattended_file = unattended/OpenSUSE-11.xml
-                            tftp = images/opensuse-11-1-64/tftpboot
                             floppy = images/opensuse-11-1-64/autoyast.vfd
                             #cdrom_unattended = images/opensuse-11-1-64/autoyast.iso
-                            pxe_dir = boot/x86_64/loader
+                            kernel = images/opensuse-11-1-64/linux
+                            initrd = images/opensuse-11-1-64/initrd
+                            boot_path = boot/x86_64/loader
+                        unattended_install.cdrom:
+                            cdrom_cd1 = isos/linux/openSUSE-11.1-DVD-x86_64.iso
+                            md5sum_cd1 = 2afee1b8a87175e6dee2b8dbbd1ad8e8
+                            md5sum_1m_cd1 = 768ca32503ef92c28f2d144f2a87e4d0
+
 
                     - 11.2.32:
                         image_name = openSUSE-11.2-32
-                        cdrom_cd1 = linux/openSUSE-11.2-DVD-i586.iso
-                        md5sum = 295d713314a30ad017948f0d542c6d92
-                        md5sum_1m = 1f8767d00acb492be5a5627c834e543f
-                        unattended_install.cdrom:
+                        unattended_install:
                             unattended_file = unattended/OpenSUSE-11.xml
-                            tftp = images/opensuse-11-2-32/tftpboot
                             floppy = images/opensuse-11-2-32/autoyast.vfd
                             #cdrom_unattended = images/opensuse-11-2-32/autoyast.iso
-                            pxe_dir = boot/i386/loader
+                            kernel = images/opensuse-11-2-32/linux
+                            initrd = images/opensuse-11-2-32/initrd
+                            boot_path = boot/i386/loader
+                        unattended_install.cdrom:
+                            cdrom_cd1 = isos/linux/openSUSE-11.2-DVD-i586.iso
+                            md5sum_cd1 = 295d713314a30ad017948f0d542c6d92
+                            md5sum_1m_cd1 = 1f8767d00acb492be5a5627c834e543f
+
 
                     - 11.2.64:
                         image_name = openSUSE-11.2-64
-                        cdrom_cd1 = linux/openSUSE-11.2-DVD-x86_64.iso
-                        md5sum = 6a09295e34dc030319d040f67f4742c6
-                        md5sum_1m = 11fd11d39744450b898f04c371dde2e7
-                        unattended_install.cdrom:
+                        unattended_install:
                             unattended_file = unattended/OpenSUSE-11.xml
-                            tftp = images/opensuse-11-2-64/tftpboot
                             floppy = images/opensuse11-2-64/autoyast.vfd
                             #cdrom_unattended = images/opensuse11-2-64/autoyast.iso
-                            pxe_dir = boot/x86_64/loader
+                            kernel = images/opensuse-11-2-64/linux
+                            initrd = images/opensuse-11-2-64/initrd
+                            boot_path = boot/x86_64/loader
+                        unattended_install.cdrom:
+                            cdrom_cd1 = isos/linux/openSUSE-11.2-DVD-x86_64.iso
+                            md5sum_cd1 = 6a09295e34dc030319d040f67f4742c6
+                            md5sum_1m_cd1 = 11fd11d39744450b898f04c371dde2e7
 
                     - 11.3.32:
                         image_name = openSUSE-11.3-32
-                        cdrom_cd1 = linux/openSUSE-11.3-DVD-i586.iso
-                        md5sum = 1a1da28c84e3cdad750d5cfa21c4fd17
-                        md5sum_1m = 4dd26906ce6cb3946519cb0b0de4b0f8
-                        unattended_install.cdrom:
+                        unattended_install:
                             unattended_file = unattended/OpenSUSE-11.xml
-                            tftp = images/opensuse-11-3-32/tftpboot
                             floppy = images/opensuse-11-3-32/autoyast.vfd
                             #cdrom_unattended = images/opensuse-11-3-32/autoyast.iso
-                            pxe_dir = boot/i386/loader
+                            kernel = images/opensuse-11-3-32/linux
+                            initrd = images/opensuse-11-3-32/initrd
+                            boot_path = boot/i386/loader
+                        unattended_install.cdrom:
+                            cdrom_cd1 = isos/linux/openSUSE-11.3-DVD-i586.iso
+                            md5sum_cd1 = 1a1da28c84e3cdad750d5cfa21c4fd17
+                            md5sum_1m_cd1 = 4dd26906ce6cb3946519cb0b0de4b0f8
 
                     - 11.3.64:
                         image_name = openSUSE-11.3-64
-                        cdrom_cd1 = linux/openSUSE-11.3-DVD-x86_64.iso
-                        md5sum = adf5d2a0a03c1e3aaf102fd6a4771b87
-                        md5sum_1m = e0dd12dac30d296417256775e1234c6e
-                        unattended_install.cdrom:
+                        unattended_install:
                             unattended_file = unattended/OpenSUSE-11.xml
-                            tftp = images/opensuse-11-3-64/tftpboot
                             floppy = images/opensuse-11-3-64/autoyast.vfd
                             #cdrom_unattended = images/opensuse-11-3-64/autoyast.iso
-                            pxe_dir = boot/x86_64/loader
+                            kernel = images/opensuse-11-3-64/linux
+                            initrd = images/opensuse-11-3-64/initrd
+                            boot_path = boot/x86_64/loader
+                        unattended_install.cdrom:
+                            cdrom_cd1 = isos/linux/openSUSE-11.3-DVD-x86_64.iso
+                            md5sum_cd1 = adf5d2a0a03c1e3aaf102fd6a4771b87
+                            md5sum_1m_cd1 = e0dd12dac30d296417256775e1234c6e
 
             - SLES:
                 shell_prompt = "^root@.*[\#\$]\s*$|#"
-                unattended_install.cdrom:
-                    pxe_image = "linux"
-                    pxe_initrd = "initrd"
-                    bootp = "/pxelinux.0"
-                    extra_params += " -boot cn"
+                unattended_install:
                     # You have to use autoyast=floppy if you want to use floppies to
                     # hold your autoyast file
-                    kernel_args = "autoyast=floppy console=ttyS0,115200 console=tty0"
-                    #kernel_args = "autoyast=cdrom console=ttyS0,115200 console=tty0"
+                    extra_params += " --append 'autoyast=floppy console=ttyS0,115200 console=tty0'"
+                    #extra_params += " --append 'autoyast=cdrom console=ttyS0,115200 console=tty0'"
                     post_install_delay = 10
+                    kernel = linux
+                    initrd = initrd
 
                 variants:
                     - 11.0.32:
                         image_name = sles11-32
-                        cdrom_cd1 = linux/SLES-11-DVD-i586-GM-DVD1.iso
-                        md5sum = 4958d4dde2575666355c8a1c5858bab0
-                        md5sum_1m = 1f19d4eff5bcead2a3e5b8b4212b6796
-                        unattended_install.cdrom:
+                        unattended_install:
                             unattended_file = unattended/SLES-11.xml
-                            tftp = images/sles-11-0-32/tftpboot
-                            #floppy = images/sles-11-0-32/autoyast.vfd
-                            cdrom_unattended = images/sles-11-0-32/autoyast.iso
                             floppy = images/sles-11-0-32/autoyast.vfd
                             #cdrom_unattended = images/sles-11-0-32/autoyast.iso
-                            pxe_dir = boot/i386/loader
+                            kernel = images/sles-11-0-32/linux
+                            initrd = images/sles-11-0-32/initrd
+                            boot_path = boot/i386/loader
+                        unattended_install.cdrom:
+                            cdrom_cd1 = isos/linux/SLES-11-DVD-i586-GM-DVD1.iso
+                            md5sum_cd1 = 4958d4dde2575666355c8a1c5858bab0
+                            md5sum_1m_cd1 = 1f19d4eff5bcead2a3e5b8b4212b6796
+
 
                     - 11.0.64:
                         image_name = sles11-64
-                        cdrom_cd1 = linux/SLES-11-DVD-x86_64-GM-DVD1.iso
-                        md5sum = 50a2bd45cd12c3808c3ee48208e2586b
-                        md5sum_1m = 00000951cab7c32e332362fc424c1054
-                        unattended_install.cdrom:
+                        cdrom_cd1 = isos/linux/SLES-11-DVD-x86_64-GM-DVD1.iso
+                        md5sum_cd1 = 50a2bd45cd12c3808c3ee48208e2586b
+                        md5sum_1m_cd1 = 00000951cab7c32e332362fc424c1054
+                        unattended_install:
                             unattended_file = unattended/SLES-11.xml
-                            tftp = images/sles-11-0-64/tftpboot
                             floppy = images/sles-11-0-64/autoyast.vfd
                             #cdrom_unattended = images/sles-11-0-64/autoyast.iso
-                            pxe_dir = boot/x86_64/loader
+                            kernel = images/sles-11-0-64/linux
+                            initrd = images/sles-11-0-64/initrd
+                            boot_path = boot/x86_64/loader
+                        unattended_install.cdrom:
+                            cdrom_cd1 = isos/linux/SLES-11-DVD-x86_64-GM-DVD1.iso
+                            md5sum_cd1 = 50a2bd45cd12c3808c3ee48208e2586b
+                            md5sum_1m_cd1 = 00000951cab7c32e332362fc424c1054
+
 
                     - 11.1.32:
                         image_name = sles11sp1-32
-                        cdrom_cd1 = linux/SLES-11-SP1-DVD-i586-GM-DVD1.iso
-                        md5sum = 0dd6886858d93501c38854552b9b1b0d
-                        md5sum_1m = a626a3d50813410e3ac42794e05773bb
                         unattended_install:
                             unattended_file = unattended/SLES-11.xml
-                            tftp = images/sles-11-1-32/tftpboot
                             floppy = images/sles-11-1-32/autoyast.vfd
                             #cdrom_unattended = images/sles-11-1-32/autoyast.iso
-                            pxe_dir = boot/i386/loader
+                            kernel = images/sles-11-1-32/linux
+                            initrd = images/sles-11-1-32/initrd
+                            boot_path = boot/i386/loader
+                        unattended_install.cdrom:
+                            cdrom_cd1 = isos/linux/SLES-11-SP1-DVD-i586-GM-DVD1.iso
+                            md5sum_cd1 = 0dd6886858d93501c38854552b9b1b0d
+                            md5sum_1m_cd1 = a626a3d50813410e3ac42794e05773bb
 
                     - 11.1.64:
                         image_name = sles11sp1-64
-                        cdrom_cd1 = linux/SLES-11-SP1-DVD-x86_64-GM-DVD1.iso
-                        md5sum = d2e10420f3689faa49a004b60fb396b7
-                        md5sum_1m = f7f67b5da46923a9f01da8a2b6909654
                         unattended_install:
                             unattended_file = unattended/SLES-11.xml
-                            tftp = images/sles-11-1-64/tftpboot
                             floppy = images/sles-11-1-64/autoyast.vfd
                             #cdrom_unattended = images/sles-11-1-64/autoyast.iso
-                            pxe_dir = boot/x86_64/loader
+                            kernel = images/sles-11-1-64/linux
+                            initrd = images/sles-11-1-64/initrd
+                            boot_path = boot/x86_64/loader
+                        unattended_install.cdrom:
+                            cdrom_cd1 = isos/linux/SLES-11-SP1-DVD-x86_64-GM-DVD1.iso
+                            md5sum_cd1 = d2e10420f3689faa49a004b60fb396b7
+                            md5sum_1m_cd1 = f7f67b5da46923a9f01da8a2b6909654
+
 
             - @Ubuntu:
                 shell_prompt = "^root@.*[\#\$]\s*$"
@@ -1126,16 +1442,16 @@
                         only install
                         image_name = ubuntu-6.10-32
                         steps = Ubuntu-6.10-32.steps
-                        cdrom_cd1 = linux/ubuntu-6.10-desktop-i386.iso
-                        md5sum = 17fb825641571ce5888a718329efd016
-                        md5sum_1m = 7531d0a84e7451d17c5d976f1c3f8509
+                        cdrom_cd1 = isos/linux/ubuntu-6.10-desktop-i386.iso
+                        md5sum_cd1 = 17fb825641571ce5888a718329efd016
+                        md5sum_1m_cd1 = 7531d0a84e7451d17c5d976f1c3f8509
 
                     - Ubuntu-8.04-32:
                         skip = yes
                         image_name = ubuntu-8.04-32
                         install:
                             steps = Ubuntu-8.04-32.steps
-                            cdrom_cd1 = linux/ubuntu-8.04.1-desktop-i386.iso
+                            cdrom_cd1 = isos/linux/ubuntu-8.04.1-desktop-i386.iso
                         setup:
                             steps = Ubuntu-8.04-32-setupssh.steps
 
@@ -1143,9 +1459,9 @@
                         image_name = ubuntu-8.10-server-32
                         install:
                             steps = Ubuntu-8.10-server-32.steps
-                            cdrom_cd1 = linux/ubuntu-8.10-server-i386.iso
-                            md5sum = a2ec9975a91e1228c8292ed9799dc302
-                            md5sum_1m = ea493eb8ef7722ead693492fd9f8a13f
+                            cdrom_cd1 = isos/linux/ubuntu-8.10-server-i386.iso
+                            md5sum_cd1 = a2ec9975a91e1228c8292ed9799dc302
+                            md5sum_1m_cd1 = ea493eb8ef7722ead693492fd9f8a13f
                         setup:
                             steps = Ubuntu-8.10-server-32-gcc.steps
 
@@ -1156,181 +1472,259 @@
                     modprobe_module = acpiphp
                 block_hotplug:
                     modprobe_module = acpiphp
-                unattended_install.cdrom:
-                    pxe_dir = "images/pxeboot"
-                    pxe_image = "vmlinuz"
-                    pxe_initrd = "initrd.img"
-                    tftp = "images/tftpboot"
-                    bootp = "/pxelinux.0"
-                    extra_params += " -boot cn"
+                unattended_install:
+                    boot_path = images/pxeboot
                     # You have to use ks=floppy if you want to use floppies to
                     # hold your kickstart file
-                    #kernel_args = "ks=floppy nicdelay=60 console=ttyS0,115200 console=tty0"
-                    kernel_args = "ks=cdrom nicdelay=60 console=ttyS0,115200 console=tty0"
+                    #extra_params += " --append 'ks=floppy nicdelay=60 console=ttyS0,115200 console=tty0'"
+                    extra_params += " --append 'ks=cdrom nicdelay=60 console=ttyS0,115200 console=tty0'"
 
                 variants:
                     - 3.9.i386:
                         no setup autotest linux_s3 guest_s4 shutdown
                         image_name = rhel3-32
                         mem_chk_cmd = dmidecode | awk -F: '/Maximum Capacity/ {print $2}'
-                        cdrom_cd1 = linux/RHEL-3.9-i386-DVD.iso
-                        md5sum = ddd11a1cb104119039b0fa05df6d52b8
-                        md5sum_1m = 5f10c9417c7b8372b3456c1b5f3f9ed0
                         install:
                             steps=RHEL-3.9-i386.steps
-                        unattended_install.cdrom:
+                            cdrom_cd1 = isos/linux/RHEL-3.9-i386-DVD.iso
+                            md5sum_cd1 = ddd11a1cb104119039b0fa05df6d52b8
+                            md5sum_1m_cd1 = 5f10c9417c7b8372b3456c1b5f3f9ed0
+                        unattended_install:
                             unattended_file = unattended/RHEL-3-series.ks
-                            tftp = images/rhel39-32/tftpboot
                             #floppy = images/rhel39-32/ks.vfd
                             cdrom_unattended = images/rhel39-32/ks.iso
+                            kernel = images/rhel39-32/vmlinuz
+                            initrd = images/rhel39-32/initrd.img
+                        unattended_install.cdrom:
+                            cdrom_cd1 = isos/linux/RHEL-3.9-i386-DVD.iso
+                            md5sum_cd1 = ddd11a1cb104119039b0fa05df6d52b8
+                            md5sum_1m_cd1 = 5f10c9417c7b8372b3456c1b5f3f9ed0
 
                     - 3.9.x86_64:
                         no setup autotest linux_s3 guest_s4 shutdown
                         image_name = rhel3-64
                         mem_chk_cmd = dmidecode | awk -F: '/Maximum Capacity/ {print $2}'
-                        cdrom_cd1 = linux/RHEL-3.9-x86_64-DVD.iso
-                        md5sum = bf4635e4a4bd3b43838e72bc8c329d55
-                        md5sum_1m = 18ecd37b639109f1b2af05cfb57dfeaf
                         install:
                             steps=RHEL-3.9-x86_64.steps
-                        unattended_install.cdrom:
+                            cdrom_cd1 = isos/linux/RHEL-3.9-x86_64-DVD.iso
+                            md5sum_cd1 = bf4635e4a4bd3b43838e72bc8c329d55
+                            md5sum_1m_cd1 = 18ecd37b639109f1b2af05cfb57dfeaf
+                        unattended_install:
                             unattended_file = unattended/RHEL-3-series.ks
-                            tftp = images/rhel39-64/tftpboot
                             #floppy = images/rhel39-64/ks.vfd
                             cdrom_unattended = images/rhel39-64/ks.iso
+                            kernel = images/rhel39-64/vmlinuz
+                            initrd = images/rhel39-64/initrd.img
+                        unattended_install.cdrom:
+                            cdrom_cd1 = isos/linux/RHEL-3.9-x86_64-DVD.iso
+                            md5sum_cd1 = bf4635e4a4bd3b43838e72bc8c329d55
+                            md5sum_1m_cd1 = 18ecd37b639109f1b2af05cfb57dfeaf
+
 
                     - 4.7.i386:
                         no setup autotest
                         image_name = rhel4-32
-                        cdrom_cd1 = linux/RHEL-4.7-i386-DVD.iso
-                        md5sum = ee5092653732a88ddbaf8eef2484c500
-                        md5sum_1m = 127081cbed825d7232331a2083975528
                         install:
                             steps = RHEL-4.7-i386.steps
-                        unattended_install.cdrom:
+                            cdrom_cd1 = isos/linux/RHEL-4.7-i386-DVD.iso
+                            md5sum_cd1 = ee5092653732a88ddbaf8eef2484c500
+                            md5sum_1m_cd1 = 127081cbed825d7232331a2083975528
+                        unattended_install:
                             unattended_file = unattended/RHEL-4-series.ks
-                            tftp = images/rhel47-32/tftpboot
                             #floppy = images/rhel47-32/ks.vfd
                             cdrom_unattended = images/rhel47-32/ks.iso
+                            kernel = images/rhel47-32/vmlinuz
+                            initrd = images/rhel47-32/initrd.img
+                        unattended_install.cdrom:
+                            cdrom_cd1 = isos/linux/RHEL-4.7-i386-DVD.iso
+                            md5sum_cd1 = ee5092653732a88ddbaf8eef2484c500
+                            md5sum_1m_cd1 = 127081cbed825d7232331a2083975528
 
                     - 4.7.x86_64:
                         no setup autotest
                         image_name = rhel4-64
-                        cdrom_cd1 = linux/RHEL-4.7-x86_64-DVD.iso
-                        md5sum = ea9dae16dd86f7d94092d0e672333292
-                        md5sum_1m = 58fa63eaee68e269f4cb1d2edf479792
                         install:
                             steps = RHEL-4.7-x86_64.steps
-                        unattended_install.cdrom:
+                            cdrom_cd1 = isos/linux/RHEL-4.7-x86_64-DVD.iso
+                            md5sum_cd1 = ea9dae16dd86f7d94092d0e672333292
+                            md5sum_1m_cd1 = 58fa63eaee68e269f4cb1d2edf479792
+                        unattended_install:
                             unattended_file = unattended/RHEL-4-series.ks
-                            tftp = images/rhel47-64/tftpboot
                             #floppy = images/rhel47-64/ks.vfd
                             cdrom_unattended = images/rhel47-64/ks.iso
+                            kernel = images/rhel47-64/vmlinuz
+                            initrd = images/rhel47-64/initrd.img
+                        unattended_install.cdrom:
+                            cdrom_cd1 = isos/linux/RHEL-4.7-x86_64-DVD.iso
+                            md5sum_cd1 = ea9dae16dd86f7d94092d0e672333292
+                            md5sum_1m_cd1 = 58fa63eaee68e269f4cb1d2edf479792
 
                     - 4.8.i386:
                         no setup autotest
                         image_name = rhel4-32
-                        cdrom_cd1 = linux/RHEL-4.8-i386-DVD.iso
-                        md5sum = b024f0af5079539d3ef51f71fed0b194
-                        md5sum_1m = 969c197402b9058f28a278c1f807d15b
-                        unattended_install.cdrom:
+                        unattended_install:
                             unattended_file = unattended/RHEL-4-series.ks
-                            tftp = images/rhel48-32/tftpboot
                             #floppy = images/rhel48-32/ks.vfd
                             cdrom_unattended = images/rhel48-32/ks.iso
+                            kernel = images/rhel48-32/vmlinuz
+                            initrd = images/rhel48-32/initrd.img
+                        unattended_install.cdrom:
+                            cdrom_cd1 = isos/linux/RHEL-4.8-i386-DVD.iso
+                            md5sum_cd1 = b024f0af5079539d3ef51f71fed0b194
+                            md5sum_1m_cd1 = 969c197402b9058f28a278c1f807d15b
+
 
                     - 4.8.x86_64:
                         no setup autotest
                         image_name = rhel4-64
-                        cdrom_cd1 = linux/RHEL-4.8-x86_64-DVD.iso
-                        md5sum = 696bc877b0200cc942626673fcc3fc09
-                        md5sum_1m = b11ac0ef7fd345ad712966972db63886
-                        unattended_install.cdrom:
+                        unattended_install:
                             unattended_file = unattended/RHEL-4-series.ks
-                            tftp = images/rhel48-64/tftpboot
                             #floppy = images/rhel48-64/ks.vfd
                             cdrom_unattended = images/rhel48-64/ks.iso
+                            kernel = images/rhel48-64/vmlinuz
+                            initrd = images/rhel48-64/initrd.img
+                        unattended_install.cdrom:
+                            cdrom_cd1 = isos/linux/RHEL-4.8-x86_64-DVD.iso
+                            md5sum_cd1 = 696bc877b0200cc942626673fcc3fc09
+                            md5sum_1m_cd1 = b11ac0ef7fd345ad712966972db63886
+
 
                     - 5.3.i386:
                         no setup
                         image_name = rhel5-32
-                        cdrom_cd1 = linux/RHEL-5.3-i386-DVD.iso
-                        md5sum = 371c62851611fd32ead440df6f24a296
-                        md5sum_1m = 242318dd44152210f6ff6cdda1bfbf51
                         install:
                             steps = RHEL-5.3-i386.steps
-                        unattended_install.cdrom:
+                            cdrom_cd1 = isos/linux/RHEL-5.3-i386-DVD.iso
+                            md5sum_cd1 = 371c62851611fd32ead440df6f24a296
+                            md5sum_1m_cd1 = 242318dd44152210f6ff6cdda1bfbf51
+                        unattended_install:
                             unattended_file = unattended/RHEL-5-series.ks
-                            tftp = images/rhel53-32/tftpboot
                             #floppy = images/rhel53-32/ks.vfd
                             cdrom_unattended = images/rhel53-32/ks.iso
+                            kernel = images/rhel53-32/vmlinuz
+                            initrd = images/rhel53-32/initrd.img
+                        unattended_install.cdrom:
+                            cdrom_cd1 = isos/linux/RHEL-5.3-i386-DVD.iso
+                            md5sum_cd1 = 371c62851611fd32ead440df6f24a296
+                            md5sum_1m_cd1 = 242318dd44152210f6ff6cdda1bfbf51
+
 
                     - 5.3.x86_64:
                         no setup
                         image_name = rhel5-64
-                        cdrom_cd1 = linux/RHEL-5.3-x86_64-DVD.iso
-                        md5sum = c5ed6b284410f4d8212cafc78fd7a8c5
-                        md5sum_1m = b999f437583098ea5bbd56fb1de1d011
                         install:
                             steps=RHEL-5.3-x86_64.steps
-                        unattended_install.cdrom:
+                            cdrom_cd1 = isos/linux/RHEL-5.3-x86_64-DVD.iso
+                            md5sum_cd1 = c5ed6b284410f4d8212cafc78fd7a8c5
+                            md5sum_1m_cd1 = b999f437583098ea5bbd56fb1de1d011
+                        unattended_install:
                             unattended_file = unattended/RHEL-5-series.ks
-                            tftp = images/rhel53-64/tftpboot
                             #floppy = images/rhel53-64/ks.vfd
                             cdrom_unattended = images/rhel53-64/ks.iso
+                            kernel = images/rhel53-64/vmlinuz
+                            initrd = images/rhel53-64/initrd.img
+                        unattended_install.cdrom:
+                            cdrom_cd1 = isos/linux/RHEL-5.3-x86_64-DVD.iso
+                            md5sum_cd1 = c5ed6b284410f4d8212cafc78fd7a8c5
+                            md5sum_1m_cd1 = b999f437583098ea5bbd56fb1de1d011
+
 
                     - 5.4.i386:
                         no setup
                         image_name = rhel5-32
-                        cdrom_cd1 = linux/RHEL-5.4-i386-DVD.iso
-                        md5sum = 7a12ec6599527e4f3d1790b51eadbfed
-                        md5sum_1m = 0dbeb8f58d213752d8c029e8601abfbb
-                        unattended_install.cdrom:
+                        unattended_install:
                             unattended_file = unattended/RHEL-5-series.ks
-                            tftp = images/rhel54-32/tftpboot
                             #floppy = images/rhel54-32/ks.vfd
                             cdrom_unattended = images/rhel54-32/ks.iso
+                            kernel = images/rhel54-32/vmlinuz
+                            initrd = images/rhel54-32/initrd.img
+                        unattended_install.cdrom:
+                            cdrom_cd1 = isos/linux/RHEL-5.4-i386-DVD.iso
+                            md5sum_cd1 = 7a12ec6599527e4f3d1790b51eadbfed
+                            md5sum_1m_cd1 = 0dbeb8f58d213752d8c029e8601abfbb
+
 
                     - 5.4.x86_64:
                         no setup
                         image_name = rhel5-64
-                        cdrom_cd1 = linux/RHEL-5.4-x86_64-DVD.iso
-                        md5sum = 04fe3c10202402d7b389528d2bad0210
-                        md5sum_1m = 3e74112003e88a966754849dbb8f5c3f
-                        unattended_install.cdrom:
+                        unattended_install:
                             unattended_file = unattended/RHEL-5-series.ks
-                            tftp = images/rhel54-64/tftpboot
                             #floppy = images/rhel54-64/ks.vfd
                             cdrom_unattended = images/rhel54-64/ks.iso
+                            kernel = images/rhel54-64/vmlinuz
+                            initrd = images/rhel54-64/initrd.img
+                        unattended_install.cdrom:
+                            cdrom_cd1 = isos/linux/RHEL-5.4-x86_64-DVD.iso
+                            md5sum_cd1 = 04fe3c10202402d7b389528d2bad0210
+                            md5sum_1m_cd1 = 3e74112003e88a966754849dbb8f5c3f
+
 
                     - 5.5.i386:
                         no setup
                         image_name = rhel5-32
-                        cdrom_cd1 = linux/RHEL-5.5-i386-DVD.iso
-                        md5sum = 148858b157f275d9153797efddfc83c3
-                        md5sum_1m = 2502cc7ddb9d0684fe08c4a83d247902
-                        unattended_install.cdrom:
+                        unattended_install:
                             unattended_file = unattended/RHEL-5-series.ks
-                            tftp = images/rhel55-32/tftpboot
                             #floppy = images/rhel55-32/ks.vfd
                             cdrom_unattended = images/rhel55-32/ks.iso
+                            kernel = images/rhel55-32/vmlinuz
+                            initrd = images/rhel55-32/initrd.img
+                        unattended_install.cdrom:
+                            cdrom_cd1 = isos/linux/RHEL-5.5-i386-DVD.iso
+                            md5sum_cd1 = 148858b157f275d9153797efddfc83c3
+                            md5sum_1m_cd1 = 2502cc7ddb9d0684fe08c4a83d247902
+
 
                     - 5.5.x86_64:
                         no setup
                         image_name = rhel5-64
-                        cdrom_cd1 = linux/RHEL-5.5-x86_64-DVD.iso
-                        md5sum = f3119f883257ef9041234feda2f1cad0
-                        md5sum_1m = a744084a03f6a08627f71527fc107a1e
-                        unattended_install.cdrom:
+                        unattended_install:
                             unattended_file = unattended/RHEL-5-series.ks
-                            tftp = images/rhel55-64/tftpboot
                             #floppy = images/rhel55-64/ks.vfd
                             cdrom_unattended = images/rhel55-64/ks.iso
+                            kernel = images/rhel55-64/vmlinuz
+                            initrd = images/rhel55-64/initrd.img
+                        unattended_install.cdrom:
+                            cdrom_cd1 = isos/linux/RHEL-5.5-x86_64-DVD.iso
+                            md5sum_cd1 = f3119f883257ef9041234feda2f1cad0
+                            md5sum_1m_cd1 = a744084a03f6a08627f71527fc107a1e
+
+
+                    - 6.0.i386:
+                        no setup
+                        image_name = rhel6-32
+                        unattended_install:
+                            unattended_file = unattended/RHEL-6-series.ks
+                            #floppy = images/rhel60-32/ks.vfd
+                            cdrom_unattended = images/rhel60-32/ks.iso
+                            kernel = images/rhel60-32/vmlinuz
+                            initrd = images/rhel60-32/initrd.img
+                        unattended_install.cdrom:
+                            cdrom_cd1 = isos/linux/RHEL-6.0-i386-DVD.iso
+                            md5sum_cd1 = 291d234c93442405972689b4b41c14bc
+                            md5sum_1m_cd1 = ee2cc3d3babe91a1d581a07099c4318b
+
+
+                    - 6.0.x86_64:
+                        no setup
+                        image_name = rhel6-64
+                        unattended_install:
+                            unattended_file = unattended/RHEL-6-series.ks
+                            #floppy = images/rhel60-64/ks.vfd
+                            cdrom_unattended = images/rhel60-64/ks.iso
+                            kernel = images/rhel60-64/vmlinuz
+                            initrd = images/rhel60-64/initrd.img
+                        unattended_install.cdrom:
+                            cdrom_cd1 = isos/linux/RHEL-6.0-x86_64-DVD.iso
+                            md5sum_cd1 = f7141396c6a19399d63e8c195354317d
+                            md5sum_1m_cd1 = b060eeef63e2c8700db54ae02056e80c
+
 
 
     # Windows section
     - @Windows:
-        no autotest linux_s3 vlan ioquit unattended_install.(url|nfs|remote_ks) jumbo nicdriver_unload nic_promisc multicast mac_change ethtool
+        no autotest linux_s3 vlan ioquit unattended_install.(url|nfs|remote_ks)
+        no jumbo nicdriver_unload nic_promisc multicast mac_change ethtool clock_getres
+
         shutdown_command = shutdown /s /f /t 0
         reboot_command = shutdown /r /f /t 0
         status_test_command = echo %errorlevel%
@@ -1347,23 +1741,25 @@
         guest_port_file_transfer = 10023
 
         # This ISO will be used for all tests except install:
-        cdrom_cd1 = windows/winutils.iso
+        cdrom_cd1 = isos/windows/winutils.iso
 
         cpu_chk_cmd = echo %NUMBER_OF_PROCESSORS%
         mem_chk_cmd = wmic memphysical
         mem_chk_cur_cmd = wmic memphysical
 
-        unattended_install.cdrom:
+        unattended_install.cdrom|whql.support_vm_install:
             timeout = 7200
             finish_program = deps/finish.exe
             cdroms += " winutils"
-            cdrom_winutils = windows/winutils.iso
+            cdrom_winutils = isos/windows/winutils.iso
             drive_index_winutils = 2
+            kernel =
+            initrd =
             # Turn install_virtio = yes if you want to install the
             # Windows virtio drivers. It might be a lot of setup though :)
             #install_virtio = no
             #cdroms += " virtio"
-            #cdrom_virtio = windows/virtio-win.iso
+            #cdrom_virtio = isos/windows/virtio-win.iso
             #drive_index_virtio = 3
             #virtio_floppy = /usr/share/virtio-win/virtio-drivers.vfd
         migrate:
@@ -1371,6 +1767,8 @@
             migration_bg_command = start ping -t localhost
             migration_bg_check_command = tasklist | find /I "ping.exe"
             migration_bg_kill_command = taskkill /IM ping.exe /F
+        migrate.with_file_transfer:
+            guest_path = C:\tmpfile
         stress_boot:
             alive_test_cmd = systeminfo
         timedrift:
@@ -1398,7 +1796,6 @@
                 time_command = "echo TIME: %date% %time%"
                 time_filter_re = "(?<=TIME: \w\w\w ).{19}(?=\.\d\d)"
                 time_format = "%m/%d/%Y %H:%M:%S"
-
         guest_s4:
             check_s4_support_cmd = powercfg /hibernate on
             test_s4_cmd = start ping -t localhost
@@ -1420,11 +1817,13 @@
             find_pci_cmd = wmic diskdrive list brief
             pci_test_cmd = echo select disk 1 > dt && echo online >> dt && echo detail disk >> dt && echo exit >> dt && diskpart /s dt
         physical_resources_check:
-            catch_uuid_cmd = 
-
+            catch_uuid_cmd =
         file_transfer:
             tmp_dir = C:\
             clean_cmd = del
+        vmstop:
+            guest_path = C:\
+
         variants:
             - Win2000:
                 no reboot whql
@@ -1432,16 +1831,16 @@
                 kill_vm_gracefully = no
                 install:
                     steps = Win2000-32.steps
-                    cdrom_cd1 = windows/Windows2000_sp4.iso
-                    md5sum = dda6039f3a9173f0f6bfae40f5efdfea
-                    md5sum_1m = dd28fba196d366d56fe774bd93df5527
+                    cdrom_cd1 = isos/windows/Windows2000_sp4.iso
+                    md5sum_cd1 = dda6039f3a9173f0f6bfae40f5efdfea
+                    md5sum_1m_cd1 = dd28fba196d366d56fe774bd93df5527
                     user = user
                 setup:
                     steps = Win2000-32-rss.steps
                 unattended_install.cdrom:
-                    cdrom_cd1 = windows/Windows2000_sp4.iso
-                    md5sum = dda6039f3a9173f0f6bfae40f5efdfea
-                    md5sum_1m = dd28fba196d366d56fe774bd93df5527
+                    cdrom_cd1 = isos/windows/Windows2000_sp4.iso
+                    md5sum_cd1 = dda6039f3a9173f0f6bfae40f5efdfea
+                    md5sum_1m_cd1 = dd28fba196d366d56fe774bd93df5527
                     unattended_file = unattended/win2000-32.sif
                     floppy = images/win2000-32/answer.vfd
 
@@ -1451,17 +1850,17 @@
                     - 32:
                         image_name += -32
                         install:
-                            cdrom_cd1 = windows/WindowsXP-sp2-vlk.iso
-                            md5sum = 743450644b1d9fe97b3cf379e22dceb0
-                            md5sum_1m = b473bf75af2d1269fec8958cf0202bfd
+                            cdrom_cd1 = isos/windows/WindowsXP-sp2-vlk.iso
+                            md5sum_cd1 = 743450644b1d9fe97b3cf379e22dceb0
+                            md5sum_1m_cd1 = b473bf75af2d1269fec8958cf0202bfd
                             user = user
                             steps = WinXP-32.steps
                         setup:
                             steps = WinXP-32-rss.steps
-                        unattended_install.cdrom:
-                            cdrom_cd1 = windows/WindowsXP-sp2-vlk.iso
-                            md5sum = 743450644b1d9fe97b3cf379e22dceb0
-                            md5sum_1m = b473bf75af2d1269fec8958cf0202bfd
+                        unattended_install.cdrom|whql.support_vm_install:
+                            cdrom_cd1 = isos/windows/WindowsXP-sp2-vlk.iso
+                            md5sum_cd1 = 743450644b1d9fe97b3cf379e22dceb0
+                            md5sum_1m_cd1 = b473bf75af2d1269fec8958cf0202bfd
                             unattended_file = unattended/winxp32.sif
                             floppy = images/winXP-32/answer.vfd
                             # Uncomment virtio_network_installer_path line if
@@ -1470,28 +1869,31 @@
                             virtio_oemsetup_id = WXP32
                             virtio_network_path = 'F:\NetKVM\xp\x86'
                             #virtio_network_installer_path = 'F:\RHEV-Network32.msi'
-                        whql_submission:
+                        whql.submission:
                             desc_path_desc1 = $\WDK\Logo Type\Device Logo\Windows XP
                             desc_path_desc2 = $\WDK\Logo Type\Systems Logo\Windows XP
                             dd_data_logoarch = X86
                             dd_data_logoos = Windows XP
                             dd_data_whqlos = Windows XP
-                            dd_data_whqlqual = Basic
+                            device:
+                                dd_data_whqlqual = Basic
+                            device.net:
+                                image_name_supportvm = winXP-32-supportvm
 
                     - 64:
                         image_name += -64
                         install:
-                            cdrom_cd1 = windows/WindowsXP-64.iso
-                            md5sum = 8d3f007ec9c2060cec8a50ee7d7dc512
-                            md5sum_1m = e812363ff427effc512b7801ee70e513
+                            cdrom_cd1 = isos/windows/WindowsXP-64.iso
+                            md5sum_cd1 = 8d3f007ec9c2060cec8a50ee7d7dc512
+                            md5sum_1m_cd1 = e812363ff427effc512b7801ee70e513
                             user = user
                             steps = WinXP-64.steps
                         setup:
                             steps = WinXP-64-rss.steps
-                        unattended_install.cdrom:
-                            cdrom_cd1 = windows/WindowsXP-64.iso
-                            md5sum = 8d3f007ec9c2060cec8a50ee7d7dc512
-                            md5sum_1m = e812363ff427effc512b7801ee70e513
+                        unattended_install.cdrom|whql.support_vm_install:
+                            cdrom_cd1 = isos/windows/WindowsXP-64.iso
+                            md5sum_cd1 = 8d3f007ec9c2060cec8a50ee7d7dc512
+                            md5sum_1m_cd1 = e812363ff427effc512b7801ee70e513
                             unattended_file = unattended/winxp64.sif
                             floppy = images/winXP-64/answer.vfd
                             # Uncomment virtio_network_installer_path line if
@@ -1500,13 +1902,16 @@
                             virtio_oemsetup_id = WNET64
                             virtio_network_path = 'F:\NetKVM\xp\amd64'
                             #virtio_network_installer_path = 'F:\RHEV-Network64.msi'
-                        whql_submission:
+                        whql.submission:
                             desc_path_desc1 = $\WDK\Logo Type\Device Logo\Windows XP
                             desc_path_desc2 = $\WDK\Logo Type\Systems Logo\Windows XP
                             dd_data_logoarch = AMD64
                             dd_data_logoos = Windows XP 64-Bit Edition Version 2003
                             dd_data_whqlos = Windows XP x64
-                            dd_data_whqlqual = Basic
+                            device:
+                                dd_data_whqlqual = Basic
+                            device.net:
+                                image_name_supportvm = winXP-64-supportvm
 
             - Win2003:
                 image_name = win2003
@@ -1516,17 +1921,17 @@
                     - 32:
                         image_name += -32
                         install:
-                            cdrom_cd1 = windows/Windows2003_r2_VLK.iso
-                            md5sum = 03e921e9b4214773c21a39f5c3f42ef7
-                            md5sum_1m = 37c2fdec15ac4ec16aa10fdfdb338aa3
+                            cdrom_cd1 = isos/windows/Windows2003_r2_VLK.iso
+                            md5sum_cd1 = 03e921e9b4214773c21a39f5c3f42ef7
+                            md5sum_1m_cd1 = 37c2fdec15ac4ec16aa10fdfdb338aa3
                             user = user
                             steps = Win2003-32.steps
                         setup:
                             steps = Win2003-32-rss.steps
-                        unattended_install.cdrom:
-                            cdrom_cd1 = windows/Windows2003_r2_VLK.iso
-                            md5sum = 03e921e9b4214773c21a39f5c3f42ef7
-                            md5sum_1m = 37c2fdec15ac4ec16aa10fdfdb338aa3
+                        unattended_install.cdrom|whql.support_vm_install:
+                            cdrom_cd1 = isos/windows/Windows2003_r2_VLK.iso
+                            md5sum_cd1 = 03e921e9b4214773c21a39f5c3f42ef7
+                            md5sum_1m_cd1 = 37c2fdec15ac4ec16aa10fdfdb338aa3
                             unattended_file = unattended/win2003-32.sif
                             floppy = images/win2003-32/answer.vfd
                             # Uncomment virtio_network_installer_path line if
@@ -1535,27 +1940,30 @@
                             virtio_oemsetup_id = WNET32
                             virtio_network_path = 'F:\NetKVM\2k3\x86'
                             #virtio_network_installer_path = 'F:\RHEV-Network32.msi'
-                        whql_submission:
+                        whql.submission:
                             desc_path_desc1 = $\WDK\Logo Type\Device Logo\Windows Server 2003
                             dd_data_logoarch = X86
                             dd_data_logoos = Windows Server 2003
                             dd_data_whqlos = Windows Server 2003
-                            dd_data_whqlqual = Basic
+                            device:
+                                dd_data_whqlqual = Basic
+                            device.net:
+                                image_name_supportvm = win2003-32-supportvm
 
                     - 64:
                         image_name += -64
                         install:
-                            cdrom_cd1 = windows/Windows2003-x64.iso
-                            md5sum = 5703f87c9fd77d28c05ffadd3354dbbd
-                            md5sum_1m = 439393c384116aa09e08a0ad047dcea8
+                            cdrom_cd1 = isos/windows/Windows2003-x64.iso
+                            md5sum_cd1 = 5703f87c9fd77d28c05ffadd3354dbbd
+                            md5sum_1m_cd1 = 439393c384116aa09e08a0ad047dcea8
                             user = user
                             steps = Win2003-64.steps
                         setup:
                             steps = Win2003-64-rss.steps
-                        unattended_install.cdrom:
-                            cdrom_cd1 = windows/Windows2003-x64.iso
-                            md5sum = 5703f87c9fd77d28c05ffadd3354dbbd
-                            md5sum_1m = 439393c384116aa09e08a0ad047dcea8
+                        unattended_install.cdrom|whql.support_vm_install:
+                            cdrom_cd1 = isos/windows/Windows2003-x64.iso
+                            md5sum_cd1 = 5703f87c9fd77d28c05ffadd3354dbbd
+                            md5sum_1m_cd1 = 439393c384116aa09e08a0ad047dcea8
                             unattended_file = unattended/win2003-64.sif
                             floppy = images/win2003-64/answer.vfd
                             # Uncomment virtio_network_installer_path line if
@@ -1564,43 +1972,46 @@
                             virtio_oemsetup_id = WNET64
                             virtio_network_path = 'F:\NetKVM\2k3\amd64'
                             #virtio_network_installer_path = 'F:\RHEV-Network64.msi'
-
-                        whql_submission:
+                        whql.submission:
                             desc_path_desc1 = $\WDK\Logo Type\Device Logo\Windows Server 2003
                             dd_data_logoarch = AMD64
                             dd_data_logoos = Windows Server 2003
                             dd_data_whqlos = Windows Server 2003 x64
-                            dd_data_whqlqual = Basic
+                            device:
+                                dd_data_whqlqual = Basic
+                            device.net:
+                                image_name_supportvm = win2003-64-supportvm
 
             - WinVista:
                 image_name = winvista
                 image_size = 20G
-                whql_submission:
+                whql.submission:
                     desc_path_desc1 = $\WDK\Logo Type\Device Logo\Vista Client\Device Premium
                     desc_path_desc2 = $\WDK\Logo Type\Device Logo\Vista Client\Device Standard
                     desc_path_desc3 = $\WDK\Logo Type\Device Logo\Vista Client
 
                 variants:
                     - 32:
-                        whql_submission:
+                        whql.submission:
                             dd_data_logoarch = X86
                             dd_data_logoos = Windows Vista
                             dd_data_whqlos = Windows Vista Client
-                            dd_data_whqlqual = Premium
+                            device:
+                                dd_data_whqlqual = Premium
                         variants:
                             - sp1:
                                 image_name += -sp1-32
                                 install:
-                                    cdrom_cd1 = windows/WindowsVista-32.iso
-                                    md5sum = 1008f323d5170c8e614e52ccb85c0491
-                                    md5sum_1m = c724e9695da483bc0fd59e426eaefc72
+                                    cdrom_cd1 = isos/windows/WindowsVista-32.iso
+                                    md5sum_cd1 = 1008f323d5170c8e614e52ccb85c0491
+                                    md5sum_1m_cd1 = c724e9695da483bc0fd59e426eaefc72
                                     steps = Win-Vista-32.steps
                                 setup:
                                     steps = WinVista-32-rss.steps
-                                unattended_install.cdrom:
-                                    cdrom_cd1 = windows/WindowsVista-32.iso
-                                    md5sum = 1008f323d5170c8e614e52ccb85c0491
-                                    md5sum_1m = c724e9695da483bc0fd59e426eaefc72
+                                unattended_install.cdrom|whql.support_vm_install:
+                                    cdrom_cd1 = isos/windows/WindowsVista-32.iso
+                                    md5sum_cd1 = 1008f323d5170c8e614e52ccb85c0491
+                                    md5sum_1m_cd1 = c724e9695da483bc0fd59e426eaefc72
                                     unattended_file = unattended/winvista-32-autounattend.xml
                                     floppy = images/winvista-sp1-32/answer.vfd
                                     # Uncomment virtio_network_installer_path line if
@@ -1609,15 +2020,17 @@
                                     virtio_storage_path = 'F:\viostor\w7\x86'
                                     virtio_network_path = 'F:\NetKVM\w7\x86'
                                     #virtio_network_installer_path = 'F:\RHEV-Network32.msi'
+                                whql.submission.device.net:
+                                    image_name_supportvm = winvista-sp1-32-supportvm
 
                             - sp2:
                                 image_name += -sp2-32
-                                unattended_install.cdrom:
-                                    cdrom_cd1 = windows/en_windows_vista_with_sp2_x86_dvd_342266.iso
-                                    md5sum = 19ca90a425667812977bab6f4ce24175
-                                    md5sum_1m = 89c15020e0e6125be19acf7a2e5dc614
-                                    sha1sum = 25ad9a776503e6a583bec07879dbcc5dfd20cd6e
-                                    sha1sum_1m = a2afa4cffdc1c362dbf9e62942337f4f875a22cf
+                                unattended_install.cdrom|whql.support_vm_install:
+                                    cdrom_cd1 = isos/windows/en_windows_vista_with_sp2_x86_dvd_342266.iso
+                                    md5sum_cd1 = 19ca90a425667812977bab6f4ce24175
+                                    md5sum_1m_cd1 = 89c15020e0e6125be19acf7a2e5dc614
+                                    sha1sum_cd1 = 25ad9a776503e6a583bec07879dbcc5dfd20cd6e
+                                    sha1sum_1m_cd1 = a2afa4cffdc1c362dbf9e62942337f4f875a22cf
                                     unattended_file = unattended/winvista-32-autounattend.xml
                                     floppy = images/winvista-sp2-32/answer.vfd
                                     # Uncomment virtio_network_installer_path line if
@@ -1626,27 +2039,30 @@
                                     virtio_storage_path = 'F:\viostor\w7\x86'
                                     virtio_network_path = 'F:\NetKVM\w7\x86'
                                     #virtio_network_installer_path = 'F:\RHEV-Network32.msi'
+                                whql.submission.device.net:
+                                    image_name_supportvm = winvista-sp2-32-supportvm
 
                     - 64:
-                        whql_submission:
+                        whql.submission:
                             dd_data_logoarch = AMD64
                             dd_data_logoos = Windows Vista
                             dd_data_whqlos = Windows Vista Client x64
-                            dd_data_whqlqual = Premium
+                            device:
+                                dd_data_whqlqual = Premium
                         variants:
                             - sp1:
                                 image_name += -sp1-64
                                 install:
-                                    cdrom_cd1 = windows/WindowsVista-64.iso
-                                    md5sum = 11e2010d857fffc47813295e6be6d58d
-                                    md5sum_1m = 0947bcd5390546139e25f25217d6f165
+                                    cdrom_cd1 = isos/windows/WindowsVista-64.iso
+                                    md5sum_cd1 = 11e2010d857fffc47813295e6be6d58d
+                                    md5sum_1m_cd1 = 0947bcd5390546139e25f25217d6f165
                                     steps = Win-Vista-64.steps
                                 setup:
                                     steps = WinVista-64-rss.steps
-                                unattended_install.cdrom:
-                                    cdrom_cd1 = windows/WindowsVista-64.iso
-                                    md5sum = 11e2010d857fffc47813295e6be6d58d
-                                    md5sum_1m = 0947bcd5390546139e25f25217d6f165
+                                unattended_install.cdrom|whql.support_vm_install:
+                                    cdrom_cd1 = isos/windows/WindowsVista-64.iso
+                                    md5sum_cd1 = 11e2010d857fffc47813295e6be6d58d
+                                    md5sum_1m_cd1 = 0947bcd5390546139e25f25217d6f165
                                     unattended_file = unattended/winvista-64-autounattend.xml
                                     floppy = images/winvista-sp1-64/answer.vfd
                                     # Uncomment virtio_network_installer_path line if
@@ -1655,14 +2071,17 @@
                                     virtio_storage_path = 'F:\viostor\w7\amd64'
                                     virtio_network_path = 'F:\NetKVM\w7\amd64'
                                     #virtio_network_installer_path = 'F:\RHEV-Network64.msi'
+                                whql.submission.device.net:
+                                    image_name_supportvm = winvista-sp1-64-supportvm
+
                             - sp2:
                                 image_name += -sp2-64
-                                unattended_install.cdrom:
-                                    cdrom_cd1 = windows/en_windows_vista_sp2_x64_dvd_342267.iso
-                                    md5sum = a1c024d7abaf34bac3368e88efbc2574
-                                    md5sum_1m = 3d84911a80f3df71d1026f7adedc2181
-                                    sha1sum = aaee3c04533899f9f8c4ae0c4250ef5fafbe29a3
-                                    sha1sum_1m = 1fd21bd3ce2a4de8856c7b8fe6fdf80260f6d1c7
+                                unattended_install.cdrom|whql.support_vm_install:
+                                    cdrom_cd1 = isos/windows/en_windows_vista_sp2_x64_dvd_342267.iso
+                                    md5sum_cd1 = a1c024d7abaf34bac3368e88efbc2574
+                                    md5sum_1m_cd1 = 3d84911a80f3df71d1026f7adedc2181
+                                    sha1sum_cd1 = aaee3c04533899f9f8c4ae0c4250ef5fafbe29a3
+                                    sha1sum_1m_cd1 = 1fd21bd3ce2a4de8856c7b8fe6fdf80260f6d1c7
                                     unattended_file = unattended/winvista-64-autounattend.xml
                                     floppy = images/winvista-sp2-64/answer.vfd
                                     # Uncomment virtio_network_installer_path line if
@@ -1671,6 +2090,8 @@
                                     virtio_storage_path = 'F:\viostor\w7\amd64'
                                     virtio_network_path = 'F:\NetKVM\w7\amd64'
                                     #virtio_network_installer_path = 'F:\RHEV-Network64.msi'
+                                whql.submission.device.net:
+                                    image_name_supportvm = winvista-sp2-64-supportvm
 
             - Win2008:
                 no whql
@@ -1683,16 +2104,16 @@
                             - sp1:
                                 image_name += -sp1-32
                                 install:
-                                    cdrom_cd1 = windows/Windows2008-x86.iso
+                                    cdrom_cd1 = isos/windows/Windows2008-x86.iso
                                     #en_windows_server_2008_datacenter_enterprise_standard_x86_dvd_X14-26710.iso
                                     md5sum=0bfca49f0164de0a8eba236ced47007d
                                     md5sum_1m=07d7f5006393f74dc76e6e2e943e2440
-                                    sha1sum = 6ca018ff96f1e9b2b310a36546b6fded99a421e6
+                                    sha1sum_cd1 = 6ca018ff96f1e9b2b310a36546b6fded99a421e6
                                     steps = Win2008-32.steps
                                 setup:
                                     steps = Win2008-32-rss.steps
-                                unattended_install.cdrom:
-                                    cdrom_cd1 = windows/Windows2008-x86.iso
+                                unattended_install.cdrom|whql.support_vm_install:
+                                    cdrom_cd1 = isos/windows/Windows2008-x86.iso
                                     md5sum=0bfca49f0164de0a8eba236ced47007d
                                     md5sum_1m=07d7f5006393f74dc76e6e2e943e2440
                                     unattended_file = unattended/win2008-32-autounattend.xml
@@ -1706,12 +2127,12 @@
 
                             - sp2:
                                 image_name += -sp2-32
-                                unattended_install.cdrom:
-                                    cdrom_cd1 = windows/en_windows_server_2008_datacenter_enterprise_standard_sp2_x86_dvd_342333.iso
-                                    md5sum = b9201aeb6eef04a3c573d036a8780bdf
-                                    md5sum_1m = b7a9d42e55ea1e85105a3a6ad4da8e04
-                                    sha1sum = 49d0d6917c1256fe81048d414fa473bbc76a8724
-                                    sha1sum_1m = 9662ff7ed715faa00407e4befc484ea52a92a9fb
+                                unattended_install.cdrom|whql.support_vm_install:
+                                    cdrom_cd1 = isos/windows/en_windows_server_2008_datacenter_enterprise_standard_sp2_x86_dvd_342333.iso
+                                    md5sum_cd1 = b9201aeb6eef04a3c573d036a8780bdf
+                                    md5sum_1m_cd1 = b7a9d42e55ea1e85105a3a6ad4da8e04
+                                    sha1sum_cd1 = 49d0d6917c1256fe81048d414fa473bbc76a8724
+                                    sha1sum_1m_cd1 = 9662ff7ed715faa00407e4befc484ea52a92a9fb
                                     unattended_file = unattended/win2008-32-autounattend.xml
                                     floppy = images/win2008-sp2-32/answer.vfd
                                     # Uncomment virtio_network_installer_path line if
@@ -1727,16 +2148,16 @@
                                 image_name += -sp1-64
                                 install:
                                     steps = Win2008-64.steps
-                                    cdrom_cd1 = windows/Windows2008-x64.iso
+                                    cdrom_cd1 = isos/windows/Windows2008-x64.iso
                                     #en_windows_server_2008_datacenter_enterprise_standard_x64_dvd_X14-26714.iso
                                     md5sum=27c58cdb3d620f28c36333a5552f271c
                                     md5sum_1m=efdcc11d485a1ef9afa739cb8e0ca766
-                                    sha1sum = bd000374709f67e9358814db6ec8f0ddaaa16f70
+                                    sha1sum_cd1 = bd000374709f67e9358814db6ec8f0ddaaa16f70
                                     passwd = 1q2w3eP
                                 setup:
                                     steps = Win2008-64-rss.steps
-                                unattended_install.cdrom:
-                                    cdrom_cd1 = windows/Windows2008-x64.iso
+                                unattended_install.cdrom|whql.support_vm_install:
+                                    cdrom_cd1 = isos/windows/Windows2008-x64.iso
                                     md5sum=27c58cdb3d620f28c36333a5552f271c
                                     md5sum_1m=efdcc11d485a1ef9afa739cb8e0ca766
                                     unattended_file = unattended/win2008-64-autounattend.xml
@@ -1750,12 +2171,12 @@
 
                             - sp2:
                                 image_name += -sp2-64
-                                unattended_install.cdrom:
-                                    cdrom_cd1 = windows/en_windows_server_2008_datacenter_enterprise_standard_sp2_x64_dvd_342336.iso
-                                    md5sum = e94943ef484035b3288d8db69599a6b5
-                                    md5sum_1m = ee55506823d0efffb5532ddd88a8e47b
-                                    sha1sum = 34c7d726c57b0f8b19ba3b40d1b4044c15fc2029
-                                    sha1sum_1m = 8fe08b03e3531906855a60a78020ac9577dff5ba
+                                unattended_install.cdrom|whql.support_vm_install:
+                                    cdrom_cd1 = isos/windows/en_windows_server_2008_datacenter_enterprise_standard_sp2_x64_dvd_342336.iso
+                                    md5sum_cd1 = e94943ef484035b3288d8db69599a6b5
+                                    md5sum_1m_cd1 = ee55506823d0efffb5532ddd88a8e47b
+                                    sha1sum_cd1 = 34c7d726c57b0f8b19ba3b40d1b4044c15fc2029
+                                    sha1sum_1m_cd1 = 8fe08b03e3531906855a60a78020ac9577dff5ba
                                     unattended_file = unattended/win2008-64-autounattend.xml
                                     floppy = images/win2008-sp2-64/answer.vfd
                                     # Uncomment virtio_network_installer_path line if
@@ -1767,12 +2188,12 @@
 
                             - r2:
                                 image_name += -r2-64
-                                unattended_install.cdrom:
-                                    cdrom_cd1 = windows/en_windows_server_2008_r2_standard_enterprise_datacenter_and_web_x64_dvd_x15-59754.iso
-                                    md5sum = 0207ef392c60efdda92071b0559ca0f9
-                                    md5sum_1m = a5a22ce25008bd7109f6d830d627e3ed
-                                    sha1sum = ad855ea913aaec3f1d0e1833c1aef7a0de326b0a
-                                    sha1sum_1m = 9194a3aabae25b36e5f73cad001314b2c8d07d14
+                                unattended_install.cdrom|whql.support_vm_install:
+                                    cdrom_cd1 = isos/windows/en_windows_server_2008_r2_standard_enterprise_datacenter_and_web_x64_dvd_x15-59754.iso
+                                    md5sum_cd1 = 0207ef392c60efdda92071b0559ca0f9
+                                    md5sum_1m_cd1 = a5a22ce25008bd7109f6d830d627e3ed
+                                    sha1sum_cd1 = ad855ea913aaec3f1d0e1833c1aef7a0de326b0a
+                                    sha1sum_1m_cd1 = 9194a3aabae25b36e5f73cad001314b2c8d07d14
                                     unattended_file = unattended/win2008-r2-autounattend.xml
                                     floppy = images/win2008-r2-64/answer.vfd
                                     # Uncomment virtio_network_installer_path line if
@@ -1785,7 +2206,7 @@
             - Win7:
                 image_name = win7
                 image_size = 20G
-                whql_submission:
+                whql.submission:
                     desc_path_desc1 = $\WDK\Logo Type\Device Logo\Windows 7 Client\Logo
                     desc_path_desc2 = $\WDK\Logo Type\Device Logo\Windows 7 Client
                     device_data += " adq"
@@ -1795,12 +2216,12 @@
                 variants:
                     - 32:
                         image_name += -32
-                        unattended_install.cdrom:
-                            cdrom_cd1 = windows/en_windows_7_ultimate_x86_dvd_x15-65921.iso
-                            md5sum = d0b8b407e8a3d4b75ee9c10147266b89
-                            md5sum_1m = 2b0c2c22b1ae95065db08686bf83af93
-                            sha1sum = 5395dc4b38f7bdb1e005ff414deedfdb16dbf610
-                            sha1sum_1m = 9f9c3780aebeb28a9bf22188eed6bc15475dc9c5
+                        unattended_install.cdrom|whql.support_vm_install:
+                            cdrom_cd1 = isos/windows/en_windows_7_ultimate_x86_dvd_x15-65921.iso
+                            md5sum_cd1 = d0b8b407e8a3d4b75ee9c10147266b89
+                            md5sum_1m_cd1 = 2b0c2c22b1ae95065db08686bf83af93
+                            sha1sum_cd1 = 5395dc4b38f7bdb1e005ff414deedfdb16dbf610
+                            sha1sum_1m_cd1 = 9f9c3780aebeb28a9bf22188eed6bc15475dc9c5
                             unattended_file = unattended/win7-32-autounattend.xml
                             floppy = images/win7-32/answer.vfd
                             # Uncomment virtio_network_installer_path line if
@@ -1809,28 +2230,31 @@
                             virtio_storage_path = 'F:\viostor\w7\x86'
                             virtio_network_path = 'F:\NetKVM\w7\x86'
                             #virtio_network_installer_path = 'F:\RHEV-Network32.msi'
-                        whql_submission:
+                        whql.submission:
                             dd_data_logoarch = X86
                             dd_data_logoos = Windows 7
                             dd_data_whqlos = Windows 7 Client
-                            dd_data_whqlqual = Logo
+                            device:
+                                dd_data_whqlqual = Logo
+                            device.net:
+                                image_name_supportvm = win7-32-supportvm
 
                     - 64:
                         image_name += -64
                         install:
-                            cdrom_cd1 = windows/en_windows_7_ultimate_x64_dvd_x15-65922.iso
+                            cdrom_cd1 = isos/windows/en_windows_7_ultimate_x64_dvd_x15-65922.iso
                             md5sum=f43d22e4fb07bf617d573acd8785c028
                             md5sum_1m=b44d8cf99dbed2a5cb02765db8dfd48f
                             passwd = 1q2w3eP
                             steps = Win7-64.steps
                         setup:
                             steps = Win7-64-rss.steps
-                        unattended_install.cdrom:
-                            cdrom_cd1 = windows/en_windows_7_ultimate_x64_dvd_x15-65922.iso
-                            md5sum = f43d22e4fb07bf617d573acd8785c028
-                            md5sum_1m = b44d8cf99dbed2a5cb02765db8dfd48f
-                            sha1sum = 326327cc2ff9f05379f5058c41be6bc5e004baa7
-                            sha1sum_1m = 4a3903bd5157de54f0702e5263e0a683c5775515
+                        unattended_install.cdrom|whql.support_vm_install:
+                            cdrom_cd1 = isos/windows/en_windows_7_ultimate_x64_dvd_x15-65922.iso
+                            md5sum_cd1 = f43d22e4fb07bf617d573acd8785c028
+                            md5sum_1m_cd1 = b44d8cf99dbed2a5cb02765db8dfd48f
+                            sha1sum_cd1 = 326327cc2ff9f05379f5058c41be6bc5e004baa7
+                            sha1sum_1m_cd1 = 4a3903bd5157de54f0702e5263e0a683c5775515
                             unattended_file = unattended/win7-64-autounattend.xml
                             floppy = images/win7-64/answer.vfd
                             # Uncomment virtio_network_installer_path line if
@@ -1839,11 +2263,14 @@
                             virtio_storage_path = 'F:\viostor\w7\amd64'
                             virtio_network_path = 'F:\NetKVM\w7\amd64'
                             #virtio_network_installer_path = 'F:\RHEV-Network64.msi'
-                        whql_submission:
+                        whql.submission:
                             dd_data_logoarch = AMD64
                             dd_data_logoos = Windows 7
                             dd_data_whqlos = Windows 7 Client x64
-                            dd_data_whqlqual = Logo
+                            device:
+                                dd_data_whqlqual = Logo
+                            device.net:
+                                image_name_supportvm = win7-64-supportvm
 
 
     # Unix/BSD section
@@ -1858,16 +2285,16 @@
                 image_name = NetBSD-1.6.2
                 image_size = 4G
                 steps = NetBSD-1.6.2.steps
-                cdrom_cd1 = bsd/netbsd-1.6.2-i386.iso
+                cdrom_cd1 = isos/bsd/netbsd-1.6.2-i386.iso
                 md5sum=72eb680300f77d529bfbc880ba8208f3
                 md5sum_1m=f1a9e1e825c90adfb1be35c6177bd9ac
 
             - OpenBSD-4.1:
                 image_name = OpenBSD-4.1
                 steps = OpenBSD-4.1-32.steps
-                cdrom_cd1 = unix/openbsd41-i386-07-05-06.iso
-                md5sum = 984790db10ebdd6fc7a9cf97abc7c967
-                md5sum_1m = 8fc234b4b0ecfe56843a32ac1d26ed55
+                cdrom_cd1 = isos/unix/openbsd41-i386-07-05-06.iso
+                md5sum_cd1 = 984790db10ebdd6fc7a9cf97abc7c967
+                md5sum_1m_cd1 = 8fc234b4b0ecfe56843a32ac1d26ed55
 
     # Live CD section
     - @livecd:
@@ -1879,27 +2306,31 @@
         variants:
             - Belenix:
                 steps = Belenix-0.7.1.steps
-                cdrom_cd1 = unix/belenix_0.7.1.iso
-                md5sum = 29cea6160cf5250de138e2820e53e342
-                md5sum_1m = 427bbef1b85d6d051799b825d686ae94
+                cdrom_cd1 = isos/unix/belenix_0.7.1.iso
+                md5sum_cd1 = 29cea6160cf5250de138e2820e53e342
+                md5sum_1m_cd1 = 427bbef1b85d6d051799b825d686ae94
 
             - Slax:
                 steps = Slax-6.0.7.steps
-                cdrom_cd1 = linux/slax-6.0.7.iso
-                md5sum = cde0ecba3c8289d786e12c44666ded6e
-                md5sum_1m = ddf02bc7444f22d1160a6e5a8fc8723f
+                cdrom_cd1 = isos/linux/slax-6.0.7.iso
+                md5sum_cd1 = cde0ecba3c8289d786e12c44666ded6e
+                md5sum_1m_cd1 = ddf02bc7444f22d1160a6e5a8fc8723f
 
             - FreeSBIE-2.0.1:
                 steps = FreeSBIE-2.0.1.steps
-                cdrom_cd1 = unix/FreeSBIE-2.0.1-RELEASE.iso
-                md5sum = b2f680d27c21bbfaf4fb90dce090a118
-                md5sum_1m = 4d81ee7fe0101b0a14225963bfff60c1
+                cdrom_cd1 = isos/unix/FreeSBIE-2.0.1-RELEASE.iso
+                md5sum_cd1 = b2f680d27c21bbfaf4fb90dce090a118
+                md5sum_1m_cd1 = 4d81ee7fe0101b0a14225963bfff60c1
 
             - memtest:
                 mem = 128
                 steps = memtest86+.steps
-                cdrom_cd1 = misc/memtest86+-2.01.iso
-                md5sum = 9fae22f2666369968a76ef59e9a81ced
+                cdrom_cd1 = isos/misc/memtest86+-2.01.iso
+                md5sum_cd1 = 9fae22f2666369968a76ef59e9a81ced
+
+
+whql.support_vm_install|whql.client_install.support_vm:
+    image_name += -supportvm
 
 
 variants:
@@ -1923,18 +2354,16 @@
 
 
 virtio_net|virtio_blk|e1000|balloon_check:
-    only Fedora.11 Fedora.12 Fedora.13 RHEL.5 OpenSUSE.11 SLES.11 Ubuntu-8.10-server
-    # only WinXP Win2003 Win2008 WinVista Win7 Fedora.11 Fedora.12 Fedora.13 RHEL.5 OpenSUSE.11 SLES.11 Ubuntu-8.10-server
+    only Fedora.11 Fedora.12 Fedora.13 Fedora.14 RHEL.5 RHEL.6 OpenSUSE.11 SLES.11 Ubuntu-8.10-server
+    # only WinXP Win2003 Win2008 WinVista Win7 Fedora.11 Fedora.12 Fedora.13 Fedora.14 RHEL.5 RHEL.6 OpenSUSE.11 SLES.11 Ubuntu-8.10-server
 
+kdump:
+    only RHEL.5 RHEL.6
 
 variants:
     - @qcow2:
         image_format = qcow2
-        post_command += " python scripts/check_image.py;"
-        post_command_timeout = 600
-        post_command_noncritical = yes
-        ioquit:
-            post_command_noncritical = no
+        check_image = yes
     - vmdk:
         no ioquit
         only Fedora Ubuntu Windows
@@ -1952,8 +2381,7 @@
 variants:
     - @smallpages:
     - hugepages:
-        pre_command += " scripts/hugepage.py /mnt/kvm_hugepage;"
-        post_command += " umount /mnt/kvm_hugepage && echo 0 > /proc/sys/vm/nr_hugepages;"
+        setup_hugepages = yes
         extra_params += " -mem-path /mnt/kvm_hugepage"
 
 
diff --git a/client/tests/kvm/unattended/Fedora-13.ks b/client/tests/kvm/unattended/Fedora-13.ks
index 0949e40..861546b 100644
--- a/client/tests/kvm/unattended/Fedora-13.ks
+++ b/client/tests/kvm/unattended/Fedora-13.ks
@@ -1,5 +1,5 @@
 install
-cdrom
+KVM_TEST_MEDIUM
 text
 reboot
 lang en_US
diff --git a/client/tests/kvm/unattended/Fedora-14.ks b/client/tests/kvm/unattended/Fedora-14.ks
new file mode 100644
index 0000000..9b99432
--- /dev/null
+++ b/client/tests/kvm/unattended/Fedora-14.ks
@@ -0,0 +1,37 @@
+install
+KVM_TEST_MEDIUM
+text
+reboot
+lang en_US
+keyboard us
+network --bootproto dhcp
+rootpw 123456
+firewall --enabled --ssh
+selinux --enforcing
+timezone --utc America/New_York
+firstboot --disable
+bootloader --location=mbr --append="rd_NO_PLYMOUTH console=tty0 console=ttyS0,115200"
+zerombr
+
+clearpart --all --initlabel
+autopart
+
+%packages
+@base
+@development-libs
+@development-tools
+%end
+
+%post --interpreter /usr/bin/python
+import socket, os
+os.system('dhclient')
+os.system('chkconfig sshd on')
+os.system('iptables -F')
+os.system('echo 0 > /selinux/enforce')
+server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+server.bind(('', 12323))
+server.listen(1)
+(client, addr) = server.accept()
+client.send("done")
+client.close()
+%end
diff --git a/client/tests/kvm/unattended/RHEL-5-series.ks b/client/tests/kvm/unattended/RHEL-5-series.ks
index 92ff727..3ee84f1 100644
--- a/client/tests/kvm/unattended/RHEL-5-series.ks
+++ b/client/tests/kvm/unattended/RHEL-5-series.ks
@@ -21,6 +21,7 @@
 @base
 @development-libs
 @development-tools
+kexec-tools
 
 %post --interpreter /usr/bin/python
 import socket, os
diff --git a/client/tests/kvm/unattended/RHEL-6-series.ks b/client/tests/kvm/unattended/RHEL-6-series.ks
new file mode 100644
index 0000000..16cd493
--- /dev/null
+++ b/client/tests/kvm/unattended/RHEL-6-series.ks
@@ -0,0 +1,40 @@
+install
+KVM_TEST_MEDIUM
+text
+reboot
+lang en_US.UTF-8
+keyboard us
+key --skip
+network --bootproto dhcp
+rootpw 123456
+firewall --enabled --ssh
+selinux --enforcing
+timezone --utc America/New_York
+firstboot --disable
+bootloader --location=mbr --append="console=tty0 console=ttyS0,115200"
+zerombr
+clearpart --all --initlabel
+autopart
+reboot
+
+%packages
+@base
+@core
+@development
+@additional-devel
+@debugging-tools
+@network-tools
+NetworkManager
+
+%post --interpreter /usr/bin/python
+import socket, os
+os.system('dhclient')
+os.system('chkconfig sshd on')
+os.system('iptables -F')
+os.system('echo 0 > /selinux/enforce')
+server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+server.bind(('', 12323))
+server.listen(1)
+(client, addr) = server.accept()
+client.send("done")
+client.close()
diff --git a/client/tests/kvm/unattended/win2000-32.sif b/client/tests/kvm/unattended/win2000-32.sif
index 7562846..8720851 100644
--- a/client/tests/kvm/unattended/win2000-32.sif
+++ b/client/tests/kvm/unattended/win2000-32.sif
@@ -70,4 +70,4 @@
 [GuiRunOnce]
    Command0="cmd /c E:\setuprss.bat"
    Command1="cmd /c netsh interface ip set address local dhcp"
-   Command2="cmd /c ping 10.0.2.2 -n 20 && A:\finish.exe"
+   Command2="cmd /c A:\finish.exe"
diff --git a/client/tests/kvm/unattended/win2003-32.sif b/client/tests/kvm/unattended/win2003-32.sif
index fab2cf5..207cd2b 100644
--- a/client/tests/kvm/unattended/win2003-32.sif
+++ b/client/tests/kvm/unattended/win2003-32.sif
@@ -63,4 +63,4 @@
     Command3="cmd /c net start telnet"
     Command4="cmd /c E:\setuprss.bat"
     Command5="cmd /c netsh interface ip set address local dhcp"
-    Command6="cmd /c ping 10.0.2.2 -n 20 && A:\finish.exe"
+    Command6="cmd /c A:\finish.exe"
diff --git a/client/tests/kvm/unattended/win2003-64.sif b/client/tests/kvm/unattended/win2003-64.sif
index fab2cf5..207cd2b 100644
--- a/client/tests/kvm/unattended/win2003-64.sif
+++ b/client/tests/kvm/unattended/win2003-64.sif
@@ -63,4 +63,4 @@
     Command3="cmd /c net start telnet"
     Command4="cmd /c E:\setuprss.bat"
     Command5="cmd /c netsh interface ip set address local dhcp"
-    Command6="cmd /c ping 10.0.2.2 -n 20 && A:\finish.exe"
+    Command6="cmd /c A:\finish.exe"
diff --git a/client/tests/kvm/unattended/win2008-32-autounattend.xml b/client/tests/kvm/unattended/win2008-32-autounattend.xml
index 352cb73..e33a36b 100644
--- a/client/tests/kvm/unattended/win2008-32-autounattend.xml
+++ b/client/tests/kvm/unattended/win2008-32-autounattend.xml
@@ -147,20 +147,6 @@
 				<SynchronousCommand wcm:action="add">
 					<CommandLine>%WINDIR%\System32\cmd /c net start telnet</CommandLine>
 					<Order>5</Order>
-<<<<<<< HEAD
-				</SynchronousCommand>
-				<SynchronousCommand wcm:action="add">
-					<CommandLine>%WINDIR%\System32\cmd /c E:\setuprss.bat</CommandLine>
-					<Order>6</Order>
-				</SynchronousCommand>
-				<SynchronousCommand wcm:action="add">
-					<CommandLine>%WINDIR%\System32\cmd /c netsh interface ip set address "Local Area Connection" dhcp</CommandLine>
-					<Order>7</Order>
-				</SynchronousCommand>
-				<SynchronousCommand wcm:action="add">
-					<CommandLine>%WINDIR%\System32\cmd /c ping 10.0.2.2 -n 20 &#38;&#38; A:\finish.exe</CommandLine>
-					<Order>8</Order>
-=======
 				</SynchronousCommand>
 				<SynchronousCommand wcm:action="add">
 					<CommandLine>%WINDIR%\System32\cmd /c bcdedit /set {current} bootstatuspolicy ignoreallfailures</CommandLine>
@@ -175,9 +161,8 @@
 					<Order>8</Order>
 				</SynchronousCommand>
 				<SynchronousCommand wcm:action="add">
-					<CommandLine>%WINDIR%\System32\cmd /c ping 10.0.2.2 -n 20 &#38;&#38; A:\finish.exe</CommandLine>
+					<CommandLine>%WINDIR%\System32\cmd /c A:\finish.exe</CommandLine>
 					<Order>9</Order>
->>>>>>> cros/upstream
 				</SynchronousCommand>
 			</FirstLogonCommands>
 			<OOBE>
diff --git a/client/tests/kvm/unattended/win2008-64-autounattend.xml b/client/tests/kvm/unattended/win2008-64-autounattend.xml
index fce6582..5de61a9 100644
--- a/client/tests/kvm/unattended/win2008-64-autounattend.xml
+++ b/client/tests/kvm/unattended/win2008-64-autounattend.xml
@@ -170,7 +170,7 @@
 					<Order>8</Order>
 				</SynchronousCommand>
 				<SynchronousCommand wcm:action="add">
-					<CommandLine>%WINDIR%\System32\cmd /c ping 10.0.2.2 -n 20 &#38;&#38; A:\finish.exe</CommandLine>
+					<CommandLine>%WINDIR%\System32\cmd /c A:\finish.exe</CommandLine>
 					<Order>9</Order>
 				</SynchronousCommand>
 			</FirstLogonCommands>
diff --git a/client/tests/kvm/unattended/win2008-r2-autounattend.xml b/client/tests/kvm/unattended/win2008-r2-autounattend.xml
index 7e9ab23..5de61a9 100644
--- a/client/tests/kvm/unattended/win2008-r2-autounattend.xml
+++ b/client/tests/kvm/unattended/win2008-r2-autounattend.xml
@@ -114,20 +114,6 @@
 			<UILanguage>en-US</UILanguage>
 			<UserLocale>en-US</UserLocale>
 		</component>
-		<component name="Microsoft-Windows-PnpCustomizationsWinPE"
-			processorArchitecture="amd64" publicKeyToken="31bf3856ad364e35"
-			language="neutral" versionScope="nonSxS"
-			xmlns:wcm="http://schemas.microsoft.com/WMIConfig/2002/State"
-			xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
-			<DriverPaths>
-				<PathAndCredentials wcm:keyValue="1" wcm:action="add">
-					<Path>KVM_TEST_STORAGE_DRIVER_PATH</Path>
-				</PathAndCredentials>
-				<PathAndCredentials wcm:keyValue="2" wcm:action="add">
-					<Path>KVM_TEST_NETWORK_DRIVER_PATH</Path>
-				</PathAndCredentials>
-			</DriverPaths>
-		</component>
 	</settings>
 	<settings pass="oobeSystem">
 		<component name="Microsoft-Windows-Shell-Setup"
@@ -184,7 +170,7 @@
 					<Order>8</Order>
 				</SynchronousCommand>
 				<SynchronousCommand wcm:action="add">
-					<CommandLine>%WINDIR%\System32\cmd /c ping 10.0.2.2 -n 20 &#38;&#38; A:\finish.exe</CommandLine>
+					<CommandLine>%WINDIR%\System32\cmd /c A:\finish.exe</CommandLine>
 					<Order>9</Order>
 				</SynchronousCommand>
 			</FirstLogonCommands>
diff --git a/client/tests/kvm/unattended/win7-32-autounattend.xml b/client/tests/kvm/unattended/win7-32-autounattend.xml
index 6904db1..f313f4a 100644
--- a/client/tests/kvm/unattended/win7-32-autounattend.xml
+++ b/client/tests/kvm/unattended/win7-32-autounattend.xml
@@ -156,19 +156,6 @@
 					<Order>5</Order>
 				</SynchronousCommand>
 				<SynchronousCommand wcm:action="add">
-<<<<<<< HEAD
-					<CommandLine>%WINDIR%\System32\cmd /c E:\setuprss.bat</CommandLine>
-					<Order>6</Order>
-				</SynchronousCommand>
-				<SynchronousCommand wcm:action="add">
-					<CommandLine>%WINDIR%\System32\cmd /c netsh interface ip set address "Local Area Connection" dhcp</CommandLine>
-					<Order>7</Order>
-				</SynchronousCommand>
-				<SynchronousCommand wcm:action="add">
-					<CommandLine>%WINDIR%\System32\cmd /c ping 10.0.2.2 -n 20 &#38;&#38; A:\finish.exe</CommandLine>
-					<Order>8</Order>
-				</SynchronousCommand>
-=======
 					<CommandLine>%WINDIR%\System32\cmd /c bcdedit /set {current} bootstatuspolicy ignoreallfailures</CommandLine>
 					<Order>6</Order>
 				</SynchronousCommand>
@@ -181,13 +168,12 @@
 					<Order>8</Order>
 				</SynchronousCommand>
 				<SynchronousCommand wcm:action="add">
-					<CommandLine>%WINDIR%\System32\cmd /c ping 10.0.2.2 -n 20 &#38;&#38; A:\finish.exe</CommandLine>
+					<CommandLine>%WINDIR%\System32\cmd /c A:\finish.exe</CommandLine>
 					<Order>9</Order>
 				</SynchronousCommand>
->>>>>>> cros/upstream
 			</FirstLogonCommands>
 		</component>
 	</settings>
 	<cpi:offlineImage cpi:source="wim:c:/install.wim#Windows Longhorn SERVERSTANDARD"
 		xmlns:cpi="urn:schemas-microsoft-com:cpi" />
-</unattend>
+</unattend>
\ No newline at end of file
diff --git a/client/tests/kvm/unattended/win7-64-autounattend.xml b/client/tests/kvm/unattended/win7-64-autounattend.xml
index e30e2c7..b42aa8f 100644
--- a/client/tests/kvm/unattended/win7-64-autounattend.xml
+++ b/client/tests/kvm/unattended/win7-64-autounattend.xml
@@ -168,7 +168,7 @@
 					<Order>8</Order>
 				</SynchronousCommand>
 				<SynchronousCommand wcm:action="add">
-					<CommandLine>%WINDIR%\System32\cmd /c ping 10.0.2.2 -n 20 &#38;&#38; A:\finish.exe</CommandLine>
+					<CommandLine>%WINDIR%\System32\cmd /c A:\finish.exe</CommandLine>
 					<Order>9</Order>
 				</SynchronousCommand>
 			</FirstLogonCommands>
@@ -176,4 +176,4 @@
 	</settings>
 	<cpi:offlineImage cpi:source="wim:c:/install.wim#Windows Longhorn SERVERSTANDARD"
 		xmlns:cpi="urn:schemas-microsoft-com:cpi" />
-</unattend>
+</unattend>
\ No newline at end of file
diff --git a/client/tests/kvm/unattended/winvista-32-autounattend.xml b/client/tests/kvm/unattended/winvista-32-autounattend.xml
index d4e8c5c..4dfe06c 100644
--- a/client/tests/kvm/unattended/winvista-32-autounattend.xml
+++ b/client/tests/kvm/unattended/winvista-32-autounattend.xml
@@ -164,7 +164,7 @@
 					<Order>7</Order>
 				</SynchronousCommand>
 				<SynchronousCommand wcm:action="add">
-					<CommandLine>%WINDIR%\System32\cmd /c ping 10.0.2.2 -n 20 &#38;&#38; A:\finish.exe</CommandLine>
+					<CommandLine>%WINDIR%\System32\cmd /c A:\finish.exe</CommandLine>
 					<Order>8</Order>
 				</SynchronousCommand>
 			</FirstLogonCommands>
diff --git a/client/tests/kvm/unattended/winvista-64-autounattend.xml b/client/tests/kvm/unattended/winvista-64-autounattend.xml
index 16d4850..5867bdb 100644
--- a/client/tests/kvm/unattended/winvista-64-autounattend.xml
+++ b/client/tests/kvm/unattended/winvista-64-autounattend.xml
@@ -165,7 +165,7 @@
 					<Order>7</Order>
 				</SynchronousCommand>
 				<SynchronousCommand wcm:action="add">
-					<CommandLine>%WINDIR%\System32\cmd /c ping 10.0.2.2 -n 20 &#38;&#38; A:\finish.exe</CommandLine>
+					<CommandLine>%WINDIR%\System32\cmd /c A:\finish.exe</CommandLine>
 					<Order>8</Order>
 				</SynchronousCommand>
 			</FirstLogonCommands>
diff --git a/client/tests/kvm/unattended/winxp32.sif b/client/tests/kvm/unattended/winxp32.sif
index b9a2ab6..4696e29 100644
--- a/client/tests/kvm/unattended/winxp32.sif
+++ b/client/tests/kvm/unattended/winxp32.sif
@@ -72,4 +72,6 @@
    Command0="cmd /c KVM_TEST_VIRTIO_NETWORK_INSTALLER"
    Command1="cmd /c E:\setuprss.bat"
    Command2="cmd /c netsh interface ip set address local dhcp"
-   Command3="cmd /c ping 10.0.2.2 -n 20 && A:\finish.exe"
+   Command3="cmd /c sc config tlntsvr start= auto"
+   Command4="cmd /c net start telnet"
+   Command5="cmd /c A:\finish.exe"
diff --git a/client/tests/kvm/unattended/winxp64.sif b/client/tests/kvm/unattended/winxp64.sif
index b9a2ab6..4696e29 100644
--- a/client/tests/kvm/unattended/winxp64.sif
+++ b/client/tests/kvm/unattended/winxp64.sif
@@ -72,4 +72,6 @@
    Command0="cmd /c KVM_TEST_VIRTIO_NETWORK_INSTALLER"
    Command1="cmd /c E:\setuprss.bat"
    Command2="cmd /c netsh interface ip set address local dhcp"
-   Command3="cmd /c ping 10.0.2.2 -n 20 && A:\finish.exe"
+   Command3="cmd /c sc config tlntsvr start= auto"
+   Command4="cmd /c net start telnet"
+   Command5="cmd /c A:\finish.exe"
diff --git a/client/tests/tbench/tbench.py b/client/tests/tbench/tbench.py
index 510bc2f..79466a3 100644
--- a/client/tests/tbench/tbench.py
+++ b/client/tests/tbench/tbench.py
@@ -32,7 +32,9 @@
             client = self.srcdir + '/client.txt'
             args = '-c ' + client + ' ' + '%s' % args
             cmd = os.path.join(self.srcdir, "tbench") + " " + args
-            self.results = utils.system_output(cmd, retain_output=True)
+            # Standard output is verbose and merely makes our debug logs huge
+            # so we don't retain it.  It gets parsed for the results.
+            self.results = utils.run(cmd, stderr_tee=utils.TEE_TO_LOGS).stdout
             os.kill(pid, signal.SIGTERM)    # clean up the server
         else:                           # child
             server = self.srcdir + '/tbench_srv'
diff --git a/client/tests/tracing_microbenchmark/tracers.py b/client/tests/tracing_microbenchmark/tracers.py
index bdd5194..08ca7e3 100644
--- a/client/tests/tracing_microbenchmark/tracers.py
+++ b/client/tests/tracing_microbenchmark/tracers.py
@@ -56,5 +56,5 @@
                 cpu_key = '%s_%s' % (cpu, key)
                 total_key = 'total_' + key
                 results[cpu_key] = val
-                results[total_key] = (results.get(total_key, 0) + 
+                results[total_key] = (results.get(total_key, 0) +
                                       results[cpu_key])
diff --git a/client/tests/unixbench/Makefile.patch b/client/tests/unixbench/Makefile.patch
index 9d62287..ba672f9 100644
--- a/client/tests/unixbench/Makefile.patch
+++ b/client/tests/unixbench/Makefile.patch
@@ -1,6 +1,5 @@
-diff -c -r src/Makefile newsrc/Makefile
-*** src/Makefile	Wed Jul 28 15:05:02 1999
---- srcnew/Makefile	Tue Feb 16 19:43:54 2010
+*** unixbench-4.1.0/Makefile	Wed Jul 28 15:05:02 1999
+--- unixbench-4.1.0.patch/Makefile	Fri Jan 21 10:56:32 2011
 ***************
 *** 32,50 ****
   SHELL = /bin/sh
@@ -43,3 +42,26 @@
   
   ## Very generic
   #OPTON = -O
+***************
+*** 203,212 ****
+  	$(CC) -o $(PROGDIR)/dhry2 ${CFLAGS} ${OPTON} $(SRCDIR)/dhry_1.o $(SRCDIR)/dhry_2.o 
+  	cd $(SRCDIR); rm -f dhry_1.o dhry_2.o 
+  $(PROGDIR)/dhry2reg: $(SRCDIR)/dhry_1.c $(SRCDIR)/dhry_2.c $(SRCDIR)/dhry.h 
+! 	cd $(SRCDIR); $(CC) -c ${CFLAGS} -DREG=register -DHZ=${HZ} ${OPTON} dhry_1.c
+! 	cd $(SRCDIR); $(CC) -c ${CFLAGS} -DREG=register -DHZ=${HZ} ${OPTON} dhry_2.c
+! 	$(CC) -o $(PROGDIR)/dhry2reg ${CFLAGS} ${OPTON} $(SRCDIR)/dhry_1.o $(SRCDIR)/dhry_2.o
+! 	cd $(SRCDIR); rm -f dhry_1.o dhry_2.o
+  
+  
+  dhry2reg: $(SRCDIR)/dhry_1.c $(SRCDIR)/dhry_2.c $(SRCDIR)/dhry.h 
+--- 204,213 ----
+  	$(CC) -o $(PROGDIR)/dhry2 ${CFLAGS} ${OPTON} $(SRCDIR)/dhry_1.o $(SRCDIR)/dhry_2.o 
+  	cd $(SRCDIR); rm -f dhry_1.o dhry_2.o 
+  $(PROGDIR)/dhry2reg: $(SRCDIR)/dhry_1.c $(SRCDIR)/dhry_2.c $(SRCDIR)/dhry.h 
+! 	cd $(SRCDIR); $(CC) -c ${CFLAGS} -DREG=register -DHZ=${HZ} ${OPTON} dhry_1.c -o dhry_1_reg.o
+! 	cd $(SRCDIR); $(CC) -c ${CFLAGS} -DREG=register -DHZ=${HZ} ${OPTON} dhry_2.c -o dhry_2_reg.o
+! 	$(CC) -o $(PROGDIR)/dhry2reg ${CFLAGS} ${OPTON} $(SRCDIR)/dhry_1_reg.o $(SRCDIR)/dhry_2_reg.o
+! 	cd $(SRCDIR); rm -f dhry_1_reg.o dhry_2_reg.o
+  
+  
+  dhry2reg: $(SRCDIR)/dhry_1.c $(SRCDIR)/dhry_2.c $(SRCDIR)/dhry.h 
diff --git a/client/tests/unixbench/unixbench.py b/client/tests/unixbench/unixbench.py
index b3fe920..9983cec 100644
--- a/client/tests/unixbench/unixbench.py
+++ b/client/tests/unixbench/unixbench.py
@@ -19,7 +19,7 @@
 
         utils.system('patch -p1 < ../unixbench.patch')
         utils.system('patch -p1 < ../Makefile.patch')
-        utils.system('make')
+        utils.make()
         utils.system('rm pgms/select')
 
 
@@ -34,7 +34,15 @@
                     % ((stepsecs,)*5)
 
         os.chdir(self.srcdir)
-        utils.system(vars + ' ./Run ' + args)
+        try:
+            utils.system(vars + ' ./Run ' + args)
+        finally:
+            times_path = os.path.join(self.resultsdir, 'times')
+            # The 'times' file can be needlessly huge as it contains warnings
+            # and error messages printed out by small benchmarks that are
+            # run in a loop.  It can easily compress 100x in such cases.
+            if os.path.exists(times_path):
+                utils.system("gzip -9 '%s'" % (times_path,), ignore_status=True)
 
         report_path = os.path.join(self.resultsdir, 'report')
         self.report_data = open(report_path).readlines()[9:]
diff --git a/client/tests/unixbench5/Makefile.patch b/client/tests/unixbench5/Makefile.patch
new file mode 100644
index 0000000..27ac225
--- /dev/null
+++ b/client/tests/unixbench5/Makefile.patch
@@ -0,0 +1,11 @@
+--- Makefile.bak       2011-01-14 10:45:12.000000000 -0800
++++ Makefile   2011-01-14 10:46:54.000000000 -0800
+@@ -52,7 +52,7 @@
+ # COMPILER CONFIGURATION: Set "CC" to the name of the compiler to use
+ # to build the binary benchmarks.  You should also set "$cCompiler" in the
+ # Run script to the name of the compiler you want to test.
+-CC=gcc
++CC?=gcc
+
+ # OPTIMISATION SETTINGS:
+
diff --git a/client/tests/unixbench5/control b/client/tests/unixbench5/control
new file mode 100644
index 0000000..862a521
--- /dev/null
+++ b/client/tests/unixbench5/control
@@ -0,0 +1,26 @@
+NAME = 'Unix Bench 5'
+AUTHOR = 'adrianbg@google.com'
+TIME = 'MEDIUM'
+PURPOSE = 'Measure system level performance.'
+CRITERIA = 'This test is a benchmark.'
+TEST_CLASS = 'Kernel'
+TEST_CATEGORY = 'Benchmark'
+TEST_TYPE = 'client'
+DOC = """
+This test measure system wide performance by running the following tests:
+  - Dhrystone - focuses on string handling.
+  - Whetstone - measure floating point operations.
+  - Execl Throughput - measure the number of execl calls per second.
+  - File Copy
+  - Pipe throughput
+  - Pipe-based context switching
+  - Process creation - number of times a process can fork and reap
+  - Shell Scripts - number of times a process can start and reap a script
+  - System Call Overhead - estimates the cost of entering and leaving the
+    kernel.
+
+For more information visit:
+http://code.google.com/p/byte-unixbench/
+"""
+
+job.run_test('unixbench5')
diff --git a/client/tests/unixbench5/unixbench-5.1.3.tgz b/client/tests/unixbench5/unixbench-5.1.3.tgz
new file mode 100644
index 0000000..c654b33
--- /dev/null
+++ b/client/tests/unixbench5/unixbench-5.1.3.tgz
Binary files differ
diff --git a/client/tests/unixbench5/unixbench5.py b/client/tests/unixbench5/unixbench5.py
new file mode 100644
index 0000000..46d176e
--- /dev/null
+++ b/client/tests/unixbench5/unixbench5.py
@@ -0,0 +1,251 @@
+import os, re
+from autotest_lib.client.bin import test, utils
+from autotest_lib.client.common_lib import error
+
+
+class unixbench5(test.test):
+    """
+    This test measure system wide performance by running the following tests:
+      - Dhrystone - focuses on string handling.
+      - Whetstone - measure floating point operations.
+      - Execl Throughput - measure the number of execl calls per second.
+      - File Copy
+      - Pipe throughput
+      - Pipe-based context switching
+      - Process creation - number of times a process can fork and reap
+      - Shell Scripts - number of times a process can start and reap a script
+      - System Call Overhead - estimates the cost of entering and leaving the
+        kernel.
+
+    @see: http://code.google.com/p/byte-unixbench/
+    @author: Dale Curtis <dalecurtis@google.com>
+    """
+    version = 1
+
+
+    def initialize(self):
+        self.job.require_gcc()
+        self.err = []
+
+
+    def setup(self, tarball='unixbench-5.1.3.tgz'):
+        """
+        Compiles unixbench.
+
+        @tarball: Path or URL to a unixbench tarball
+        @see: http://byte-unixbench.googlecode.com/files/unixbench-5.1.3.tgz
+        """
+        tarball = utils.unmap_url(self.bindir, tarball, self.tmpdir)
+        utils.extract_tarball_to_dir(tarball, self.srcdir)
+        os.chdir(self.srcdir)
+
+        utils.system('patch -p0 < ../Makefile.patch')
+        utils.make()
+
+
+    def run_once(self, args=''):
+        vars = 'UB_TMPDIR="%s" UB_RESULTDIR="%s"' % (self.tmpdir,
+                                                     self.resultsdir)
+        os.chdir(self.srcdir)
+        self.report_data = utils.system_output(vars + ' ./Run ' + args)
+        self.results_path = os.path.join(self.resultsdir,
+                                         'raw_output_%s' % self.iteration)
+        utils.open_write_close(self.results_path, self.report_data)
+
+
+    def cleanup(self):
+        """
+        Check error index list and throw TestError if necessary.
+        """
+        if self.err:
+            e_msg = ("No measured results for output lines: %s\nOutput:%s" %
+                     (" ".join(self.err), self.report_data))
+            raise error.TestError(e_msg)
+
+
+    def process_section(self, section, suffix):
+        keyval = {}
+        subsections = section.split('\n\n')
+
+        if len(subsections) < 3:
+            raise error.TestError('Invalid output format. Unable to parse')
+
+        # Process the subsection containing performance results first.
+        for index, line in enumerate(subsections[1].strip().split('\n')):
+            # Look for problems first.
+            if re.search('no measured results', line, flags=re.IGNORECASE):
+                self.err.append(str(index + 1))
+
+            # Every performance result line ends with 6 values, with the sixth
+            # being the actual result. Make sure there are at least that words
+            # in the line before processing.
+            words = line.lower().split()
+            if len(words) >= 6:
+                key = re.sub('\W', '', '_'.join(words[:-6]))
+                keyval[key + suffix] = words[-6]
+
+        # The final score should be the last item in the third subsection.
+        keyval['score' + suffix] = subsections[2].strip().split()[-1]
+
+        self.write_perf_keyval(keyval)
+
+
+    def postprocess_iteration(self):
+        # Break up sections around dividing lines.
+        sections = self.report_data.split('-'*72)
+
+        # First section is junk to us, second has results for single CPU run.
+        if len(sections) > 1:
+            self.process_section(section=sections[1], suffix='')
+
+            # Only machines with > 1 CPU will have a 3rd section.
+            if len(sections) > 2:
+                self.process_section(section=sections[2], suffix='_multi')
+        else:
+            raise error.TestError('Invalid output format. Unable to parse')
+
+
+""" Here is a sample output:
+
+   #    #  #    #  #  #    #          #####   ######  #    #   ####   #    #
+   #    #  ##   #  #   #  #           #    #  #       ##   #  #    #  #    #
+   #    #  # #  #  #    ##            #####   #####   # #  #  #       ######
+   #    #  #  # #  #    ##            #    #  #       #  # #  #       #    #
+   #    #  #   ##  #   #  #           #    #  #       #   ##  #    #  #    #
+    ####   #    #  #  #    #          #####   ######  #    #   ####   #    #
+
+   Version 5.1.2                      Based on the Byte Magazine Unix Benchmark
+
+   Multi-CPU version                  Version 5 revisions by Ian Smith,
+                                      Sunnyvale, CA, USA
+   December 22, 2007                  johantheghost at yahoo period com
+
+
+1 x Dhrystone 2 using register variables  1 2 3 4 5 6 7 8 9 10
+
+1 x Double-Precision Whetstone  1 2 3 4 5 6 7 8 9 10
+
+1 x Execl Throughput  1 2 3
+
+1 x File Copy 1024 bufsize 2000 maxblocks  1 2 3
+
+1 x File Copy 256 bufsize 500 maxblocks  1 2 3
+
+1 x File Copy 4096 bufsize 8000 maxblocks  1 2 3
+
+1 x Pipe Throughput  1 2 3 4 5 6 7 8 9 10
+
+1 x Pipe-based Context Switching  1 2 3 4 5 6 7 8 9 10
+
+1 x Process Creation  1 2 3
+
+1 x System Call Overhead  1 2 3 4 5 6 7 8 9 10
+
+1 x Shell Scripts (1 concurrent)  1 2 3
+
+1 x Shell Scripts (8 concurrent)  1 2 3
+
+2 x Dhrystone 2 using register variables  1 2 3 4 5 6 7 8 9 10
+
+2 x Double-Precision Whetstone  1 2 3 4 5 6 7 8 9 10
+
+2 x Execl Throughput  1 2 3
+
+2 x File Copy 1024 bufsize 2000 maxblocks  1 2 3
+
+2 x File Copy 256 bufsize 500 maxblocks  1 2 3
+
+2 x File Copy 4096 bufsize 8000 maxblocks  1 2 3
+
+2 x Pipe Throughput  1 2 3 4 5 6 7 8 9 10
+
+2 x Pipe-based Context Switching  1 2 3 4 5 6 7 8 9 10
+
+2 x Process Creation  1 2 3
+
+2 x System Call Overhead  1 2 3 4 5 6 7 8 9 10
+
+2 x Shell Scripts (1 concurrent)  1 2 3
+
+2 x Shell Scripts (8 concurrent)  1 2 3
+
+========================================================================
+   BYTE UNIX Benchmarks (Version 5.1.2)
+
+   System: localhost: GNU/Linux
+   OS: GNU/Linux -- 2.6.32.26+drm33.12 -- #1 SMP Wed Jan 12 16:16:05 PST 2011
+   Machine: i686 (GenuineIntel)
+   Language: en_US.utf8 (charmap=, collate=)
+   CPU 0: Intel(R) Atom(TM) CPU N455 @ 1.66GHz (3325.2 bogomips)
+          Hyper-Threading, x86-64, MMX, Physical Address Ext, SYSENTER/SYSEXIT
+   CPU 1: Intel(R) Atom(TM) CPU N455 @ 1.66GHz (3325.0 bogomips)
+          Hyper-Threading, x86-64, MMX, Physical Address Ext, SYSENTER/SYSEXIT
+   14:11:59 up 1 day,  1:10,  0 users,  load average: 0.47, 0.48, 0.51; runlevel
+
+------------------------------------------------------------------------
+Benchmark Run: Fri Jan 14 2011 14:11:59 - 14:41:26
+2 CPUs in system; running 1 parallel copy of tests
+
+Dhrystone 2 using register variables        2264000.6 lps   (10.0 s, 7 samples)
+Double-Precision Whetstone                      507.0 MWIPS (10.1 s, 7 samples)
+Execl Throughput                                796.7 lps   (30.0 s, 2 samples)
+File Copy 1024 bufsize 2000 maxblocks        110924.1 KBps  (30.1 s, 2 samples)
+File Copy 256 bufsize 500 maxblocks           32600.5 KBps  (30.1 s, 2 samples)
+File Copy 4096 bufsize 8000 maxblocks        284236.5 KBps  (30.0 s, 2 samples)
+Pipe Throughput                              301672.5 lps   (10.0 s, 7 samples)
+Pipe-based Context Switching                  29475.3 lps   (10.0 s, 7 samples)
+Process Creation                               3124.6 lps   (30.0 s, 2 samples)
+Shell Scripts (1 concurrent)                   1753.0 lpm   (60.0 s, 2 samples)
+Shell Scripts (8 concurrent)                    305.9 lpm   (60.1 s, 2 samples)
+System Call Overhead                         592781.7 lps   (10.0 s, 7 samples)
+
+System Benchmarks Index Values               BASELINE       RESULT    INDEX
+Dhrystone 2 using register variables         116700.0    2264000.6    194.0
+Double-Precision Whetstone                       55.0        507.0     92.2
+Execl Throughput                                 43.0        796.7    185.3
+File Copy 1024 bufsize 2000 maxblocks          3960.0     110924.1    280.1
+File Copy 256 bufsize 500 maxblocks            1655.0      32600.5    197.0
+File Copy 4096 bufsize 8000 maxblocks          5800.0     284236.5    490.1
+Pipe Throughput                               12440.0     301672.5    242.5
+Pipe-based Context Switching                   4000.0      29475.3     73.7
+Process Creation                                126.0       3124.6    248.0
+Shell Scripts (1 concurrent)                     42.4       1753.0    413.4
+Shell Scripts (8 concurrent)                      6.0        305.9    509.8
+System Call Overhead                          15000.0     592781.7    395.2
+                                                                   ========
+System Benchmarks Index Score                                         238.0
+
+------------------------------------------------------------------------
+Benchmark Run: Fri Jan 14 2011 14:41:26 - 15:09:23
+2 CPUs in system; running 2 parallel copies of tests
+
+Dhrystone 2 using register variables        3411919.6 lps   (10.0 s, 7 samples)
+Double-Precision Whetstone                      964.3 MWIPS (10.1 s, 7 samples)
+Execl Throughput                               2053.5 lps   (30.0 s, 2 samples)
+File Copy 1024 bufsize 2000 maxblocks        158308.0 KBps  (30.0 s, 2 samples)
+File Copy 256 bufsize 500 maxblocks           46249.5 KBps  (30.0 s, 2 samples)
+File Copy 4096 bufsize 8000 maxblocks        389881.9 KBps  (30.0 s, 2 samples)
+Pipe Throughput                              410193.1 lps   (10.0 s, 7 samples)
+Pipe-based Context Switching                 113780.0 lps   (10.0 s, 7 samples)
+Process Creation                               7609.0 lps   (30.0 s, 2 samples)
+Shell Scripts (1 concurrent)                   2355.0 lpm   (60.0 s, 2 samples)
+Shell Scripts (8 concurrent)                    308.1 lpm   (60.2 s, 2 samples)
+System Call Overhead                        1057063.2 lps   (10.0 s, 7 samples)
+
+System Benchmarks Index Values               BASELINE       RESULT    INDEX
+Dhrystone 2 using register variables         116700.0    3411919.6    292.4
+Double-Precision Whetstone                       55.0        964.3    175.3
+Execl Throughput                                 43.0       2053.5    477.6
+File Copy 1024 bufsize 2000 maxblocks          3960.0     158308.0    399.8
+File Copy 256 bufsize 500 maxblocks            1655.0      46249.5    279.5
+File Copy 4096 bufsize 8000 maxblocks          5800.0     389881.9    672.2
+Pipe Throughput                               12440.0     410193.1    329.7
+Pipe-based Context Switching                   4000.0     113780.0    284.5
+Process Creation                                126.0       7609.0    603.9
+Shell Scripts (1 concurrent)                     42.4       2355.0    555.4
+Shell Scripts (8 concurrent)                      6.0        308.1    513.5
+System Call Overhead                          15000.0    1057063.2    704.7
+                                                                   ========
+System Benchmarks Index Score                                         407.4
+
+"""
diff --git a/client/tools/autotest b/client/tools/autotest
index 061afcb..13fe1cc 100755
--- a/client/tools/autotest
+++ b/client/tools/autotest
@@ -2,13 +2,20 @@
 import sys,os
 
 autodir = None
-if os.path.exists('/etc/autotest.conf'):
-    autodir = os.path.dirname(os.path.realpath('/etc/autotest.conf'))
+autotest_conf = os.path.realpath('/etc/autotest.conf')
+
+if os.path.isfile(autotest_conf):
+    autodir = os.path.dirname(autotest_conf)
+
 if not autodir:
     for path in ['/usr/local/autotest', '/home/autotest']:
         if os.path.exists(os.path.join(path, 'bin/autotest')):
             autodir = path
 
+if not autodir:
+    print "Autotest home dir NOT FOUND"
+    sys.exit()
+
 autotest = os.path.join(autodir, 'bin/autotest')
 control = os.path.join(autodir, 'control')
 state = os.path.join(autodir, 'control.state')
diff --git a/conmux/drivers/dli-lpc b/conmux/drivers/dli-lpc
new file mode 100755
index 0000000..edf09e2
--- /dev/null
+++ b/conmux/drivers/dli-lpc
@@ -0,0 +1,52 @@
+#!/bin/sh
+#
+# Port control script for Digital Loggers Inc. Web Power Switch II and III
+#
+# Written by: Grant Likely <grant.likely@secretlab.ca>
+# Copyright 2010 Secret Lab Technologies Ltd.
+#
+# Usage: dli-pscontrol.sh <admin:passwd@host> <port> {on|off|cycle}
+#
+# <port> is in the range 1..8.
+# 'cycle' will turn a port off and on with a 1 second delay.
+#
+# The Web Power Switch uses a simple http request protocol for controlling
+# the port state.  The action simply gets encoded into the url in the form:
+#
+#   http://<user>:<passwd>@<host[:port]>/outlet?<port-number>={ON|OFF|CCW}
+#
+# ON and OFF are self explanatory.
+# CCW means cycle power, but only has effect when the port is already on.
+#
+# The protocol is simple enough that wget is sufficient to control ports.
+
+baseurl="http://${1}"
+porturl="${baseurl}/outlet?${2}"
+
+wget_cmd="wget --auth-no-challenge -O /dev/null"
+
+port_set() {
+	${wget_cmd} "${porturl}=${1}" > /dev/null 2>&1
+}
+
+case "$3" in
+  on)
+	port_set ON
+	;;
+  off)
+	port_set OFF
+	;;
+  cycle)
+	# The CCW command *could* be used here, but the command has no
+	# effect if the port is in the OFF state.
+	port_set OFF
+	sleep 1s
+	port_set ON
+	;;
+  *)
+	echo "Usage: $0 <admin:passwd@host> <port> {on|off|cycle}"
+	exit 1;
+	;;
+esac
+
+exit 0
diff --git a/conmux/drivers/fence_apc_snmp.py b/conmux/drivers/fence_apc_snmp.py
new file mode 100755
index 0000000..3595071
--- /dev/null
+++ b/conmux/drivers/fence_apc_snmp.py
@@ -0,0 +1,370 @@
+#!/usr/bin/python
+
+#############################################################################
+#############################################################################
+##
+##  Copyright (C) Sistina Software, Inc.  1997-2003  All rights reserved.
+##  Copyright (C) 2004-2006 Red Hat, Inc.  All rights reserved.
+##
+##  This copyrighted material is made available to anyone wishing to use,
+##  modify, copy, or redistribute it subject to the terms and conditions
+##  of the GNU General Public License v.2.
+##
+#############################################################################
+## This APC Fence script uses snmp to control the APC power
+## switch. This script requires that net-snmp-utils be installed
+## on all nodes in the cluster, and that the powernet369.mib file be
+## located in /usr/share/snmp/mibs/
+#############################################################################
+#############################################################################
+
+
+
+import getopt, sys
+import os
+import time
+import select
+import signal
+from glob import glob
+
+#BEGIN_VERSION_GENERATION
+FENCE_RELEASE_NAME=""
+REDHAT_COPYRIGHT=""
+BUILD_DATE=""
+#END_VERSION_GENERATION
+
+POWER_ON="outletOn"
+POWER_OFF="outletOff"
+POWER_REBOOT="outletReboot"
+
+def usage():
+    print "Usage:";
+    print "";
+    print "Options:";
+    print "  -a <ip>          IP address or hostname of MasterSwitch";
+    print "  -h               usage";
+    print "  -l <name>        Login name";
+    print "  -n <num>         Outlet number to change";
+    print "  -o <string>      Action: Reboot (default), Off or On";
+    print "  -p <string>      Login password";
+    print "  -q               quiet mode";
+    print "  -V               version";
+    print "  -v               Log to file /tmp/apclog";
+
+    print sys.argv
+    sys.exit(0);
+
+
+
+def main():
+    apc_base = "enterprises.apc.products.hardware."
+    apc_outletctl = "masterswitch.sPDUOutletControl.sPDUOutletControlTable.sPDUOutletControlEntry.sPDUOutletCtl."
+    apc_outletstatus = "masterswitch.sPDUOutletStatus.sPDUOutletStatusMSPTable.sPDUOutletStatusMSPEntry.sPDUOutletStatusMSP."
+
+    address = ""
+    output = ""
+    port = ""
+    action = "outletReboot"
+    status_check = False
+    verbose = False
+
+    if not glob('/usr/share/snmp/mibs/powernet*.mib'):
+        sys.stderr.write('This APC Fence script uses snmp to control the APC power switch. This script requires that net-snmp-utils be installed on all nodes in the cluster, and that the powernet369.mib file be located in /usr/share/snmp/mibs/\n')
+        sys.exit(1)
+
+    if len(sys.argv) > 1:
+        try:
+            opts, args = getopt.getopt(sys.argv[1:], "a:hl:p:n:o:vV", ["help", "output="])
+        except getopt.GetoptError:
+            #print help info and quit
+            usage()
+            sys.exit(2)
+
+        for o, a in opts:
+            if o == "-v":
+                verbose = True
+            if o == "-V":
+                print "%s\n" % FENCE_RELEASE_NAME
+                print "%s\n" % REDHAT_COPYRIGHT
+                print "%s\n" % BUILD_DATE
+                sys.exit(0)
+            if o in ("-h", "--help"):
+                usage()
+                sys.exit(0)
+            if o == "-n":
+                port = a
+            if o  == "-o":
+                lcase = a.lower() #Lower case string
+                if lcase == "off":
+                    action = "outletOff"
+                elif lcase == "on":
+                    action = "outletOn"
+                elif lcase == "reboot":
+                    action = "outletReboot"
+                elif lcase == "status":
+                    #action = "sPDUOutletStatusMSPOutletState"
+                    action = ""
+                    status_check = True
+                else:
+                    usage()
+                    sys.exit()
+            if o == "-a":
+                address = a
+
+        if address == "":
+            usage()
+            sys.exit(1)
+
+        if port == "":
+            usage()
+            sys.exit(1)
+
+    else: #Get opts from stdin
+        params = {}
+        #place params in dict
+        for line in sys.stdin:
+            val = line.split("=")
+            if len(val) == 2:
+                params[val[0].strip()] = val[1].strip()
+
+        try:
+            address = params["ipaddr"]
+        except KeyError, e:
+            sys.stderr.write("FENCE: Missing ipaddr param for fence_apc...exiting")
+            sys.exit(1)
+        try:
+            login = params["login"]
+        except KeyError, e:
+            sys.stderr.write("FENCE: Missing login param for fence_apc...exiting")
+            sys.exit(1)
+
+        try:
+            passwd = params["passwd"]
+        except KeyError, e:
+            sys.stderr.write("FENCE: Missing passwd param for fence_apc...exiting")
+            sys.exit(1)
+
+        try:
+            port = params["port"]
+        except KeyError, e:
+            sys.stderr.write("FENCE: Missing port param for fence_apc...exiting")
+            sys.exit(1)
+
+
+        try:
+            a = params["option"]
+            if a == "Off" or a == "OFF" or a == "off":
+                action = POWER_OFF
+            elif a == "On" or a == "ON" or a == "on":
+                action = POWER_ON
+            elif a == "Reboot" or a == "REBOOT" or a == "reboot":
+                action = POWER_REBOOT
+        except KeyError, e:
+            action = POWER_REBOOT
+
+        ####End of stdin section
+
+    apc_command = apc_base + apc_outletctl + port
+
+    args_status = list()
+    args_off = list()
+    args_on = list()
+
+    args_status.append("/usr/bin/snmpget")
+    args_status.append("-Oqu") #sets printing options
+    args_status.append("-v")
+    args_status.append("1")
+    args_status.append("-c")
+    args_status.append("private")
+    args_status.append("-m")
+    args_status.append("ALL")
+    args_status.append(address)
+    args_status.append(apc_command)
+
+    args_off.append("/usr/bin/snmpset")
+    args_off.append("-Oqu") #sets printing options
+    args_off.append("-v")
+    args_off.append("1")
+    args_off.append("-c")
+    args_off.append("private")
+    args_off.append("-m")
+    args_off.append("ALL")
+    args_off.append(address)
+    args_off.append(apc_command)
+    args_off.append("i")
+    args_off.append("outletOff")
+
+    args_on.append("/usr/bin/snmpset")
+    args_on.append("-Oqu") #sets printing options
+    args_on.append("-v")
+    args_on.append("1")
+    args_on.append("-c")
+    args_on.append("private")
+    args_on.append("-m")
+    args_on.append("ALL")
+    args_on.append(address)
+    args_on.append(apc_command)
+    args_on.append("i")
+    args_on.append("outletOn")
+
+    cmdstr_status = ' '.join(args_status)
+    cmdstr_off = ' '.join(args_off)
+    cmdstr_on = ' '.join(args_on)
+
+##This section issues the actual commands. Reboot is split into
+##Off, then On to make certain both actions work as planned.
+##
+##The status command just dumps the outlet status to stdout.
+##The status checks that are made when turning an outlet on or off, though,
+##use the execWithCaptureStatus so that the stdout from snmpget can be
+##examined and the desired operation confirmed.
+
+    if status_check:
+        if verbose:
+            fd = open("/tmp/apclog", "w")
+            fd.write("Attempting the following command: %s\n" % cmdstr_status)
+        strr = os.system(cmdstr_status)
+        print strr
+        if verbose:
+            fd.write("Result: %s\n" % strr)
+            fd.close()
+
+    else:
+        if action == POWER_OFF:
+            if verbose:
+                fd = open("/tmp/apclog", "w")
+                fd.write("Attempting the following command: %s\n" % cmdstr_off)
+            strr = os.system(cmdstr_off)
+            time.sleep(1)
+            strr,code = execWithCaptureStatus("/usr/bin/snmpget",args_status)
+            if verbose:
+                fd.write("Result: %s\n" % strr)
+                fd.close()
+            if strr.find(POWER_OFF) >= 0:
+                print "Success. Outlet off"
+                sys.exit(0)
+            else:
+                if verbose:
+                    fd.write("Unable to power off apc outlet")
+                    fd.close()
+                sys.exit(1)
+
+        elif action == POWER_ON:
+            if verbose:
+                fd = open("/tmp/apclog", "w")
+                fd.write("Attempting the following command: %s\n" % cmdstr_on)
+            strr = os.system(cmdstr_on)
+            time.sleep(1)
+            strr,code = execWithCaptureStatus("/usr/bin/snmpget",args_status)
+            #strr = os.system(cmdstr_status)
+            if verbose:
+                fd.write("Result: %s\n" % strr)
+            if strr.find(POWER_ON) >= 0:
+                if verbose:
+                    fd.close()
+                print "Success. Outlet On."
+                sys.exit(0)
+            else:
+                print "Unable to power on apc outlet"
+                if verbose:
+                    fd.write("Unable to power on apc outlet")
+                    fd.close()
+                sys.exit(1)
+
+        elif action == POWER_REBOOT:
+            if verbose:
+                fd = open("/tmp/apclog", "w")
+                fd.write("Attempting the following command: %s\n" % cmdstr_off)
+            strr = os.system(cmdstr_off)
+            time.sleep(1)
+            strr,code = execWithCaptureStatus("/usr/bin/snmpget",args_status)
+            #strr = os.system(cmdstr_status)
+            if verbose:
+                fd.write("Result: %s\n" % strr)
+            if strr.find(POWER_OFF) < 0:
+                print "Unable to power off apc outlet"
+                if verbose:
+                    fd.write("Unable to power off apc outlet")
+                    fd.close()
+                sys.exit(1)
+
+            if verbose:
+                fd.write("Attempting the following command: %s\n" % cmdstr_on)
+            strr = os.system(cmdstr_on)
+            time.sleep(1)
+            strr,code = execWithCaptureStatus("/usr/bin/snmpget",args_status)
+            #strr = os.system(cmdstr_status)
+            if verbose:
+                fd.write("Result: %s\n" % strr)
+            if strr.find(POWER_ON) >= 0:
+                if verbose:
+                    fd.close()
+                print "Success: Outlet Rebooted."
+                sys.exit(0)
+            else:
+                print "Unable to power on apc outlet"
+                if verbose:
+                    fd.write("Unable to power on apc outlet")
+                    fd.close()
+                sys.exit(1)
+
+def execWithCaptureStatus(command, argv, searchPath = 0, root = '/', stdin = 0,
+                          catchfd = 1, closefd = -1):
+
+    if not os.access (root + command, os.X_OK):
+        raise RuntimeError, command + " cannot be run"
+
+    (read, write) = os.pipe()
+
+    childpid = os.fork()
+    if (not childpid):
+        if (root and root != '/'): os.chroot (root)
+        if isinstance(catchfd, tuple):
+            for fd in catchfd:
+                os.dup2(write, fd)
+        else:
+            os.dup2(write, catchfd)
+        os.close(write)
+        os.close(read)
+
+        if closefd != -1:
+            os.close(closefd)
+
+        if stdin:
+            os.dup2(stdin, 0)
+            os.close(stdin)
+
+        if (searchPath):
+            os.execvp(command, argv)
+        else:
+            os.execv(command, argv)
+
+        sys.exit(1)
+
+    os.close(write)
+
+    rc = ""
+    s = "1"
+    while (s):
+        select.select([read], [], [])
+        s = os.read(read, 1000)
+        rc = rc + s
+
+    os.close(read)
+
+    pid = -1
+    status = -1
+    try:
+        (pid, status) = os.waitpid(childpid, 0)
+    except OSError, (errno, msg):
+        print __name__, "waitpid:", msg
+
+    if os.WIFEXITED(status) and (os.WEXITSTATUS(status) == 0):
+        status = os.WEXITSTATUS(status)
+    else:
+        status = -1
+
+    return (rc, status)
+
+if __name__ == "__main__":
+    main()
diff --git a/conmux/drivers/module.mk b/conmux/drivers/module.mk
index 0cc24f9..7c36663 100644
--- a/conmux/drivers/module.mk
+++ b/conmux/drivers/module.mk
@@ -3,9 +3,9 @@
 #
 # The Console Multiplexor is released under the GNU Public License V2
 
-DRIVERS:=blade hmc reboot-netfinity reboot-newisys reboot-numaq \
+DRIVERS:=blade dli-lpc hmc reboot-netfinity reboot-newisys reboot-numaq \
 	reboot-rsa reboot-rsa2 zseries-console x3270_glue.expect \
-	reboot-acs48 reboot-apc reboot-laurel
+	reboot-acs48 reboot-apc reboot-laurel fence_apc_snmp.py
 
 install::
 	@[ -d $(BASE)/lib/drivers ] || mkdir $(BASE)/lib/drivers
diff --git a/conmux/examples/apc_snmp.cf b/conmux/examples/apc_snmp.cf
new file mode 100644
index 0000000..4164ff7
--- /dev/null
+++ b/conmux/examples/apc_snmp.cf
@@ -0,0 +1,4 @@
+listener localhost/ts63
+socket console 'console' 'localhost:13467'
+command 'config' 'Show conmux configuration' 'cat /usr/local/conmux/etc/apc_snmp.cf'
+command 'hardreset' 'initiated a hard reset' 'fence_apc_snmp.py -a pdu.xx.com -n 11 -l root'
diff --git a/conmux/start b/conmux/start
index 92402af..dabcd58 100755
--- a/conmux/start
+++ b/conmux/start
@@ -1,4 +1,4 @@
-#! /bin/sh
+#!/bin/bash
 #
 # start -- start up configured conmux servers on this host.
 #
diff --git a/frontend/afe/resources_test.py b/frontend/afe/resources_test.py
old mode 100644
new mode 100755
diff --git a/frontend/client/gwt_dir b/frontend/client/gwt_dir
old mode 100644
new mode 100755
diff --git a/frontend/client/src/autotest/EmbeddedTkoClient.gwt.xml b/frontend/client/src/autotest/EmbeddedTkoClient.gwt.xml
index 20eb6dd..3b94c39 100644
--- a/frontend/client/src/autotest/EmbeddedTkoClient.gwt.xml
+++ b/frontend/client/src/autotest/EmbeddedTkoClient.gwt.xml
@@ -12,5 +12,5 @@
   <stylesheet src='afeclient.css'/>
   <stylesheet src='tkoclient.css'/>
   
-  <!-- <set-property name="user.agent" value="gecko"/> -->
+  <!-- <set-property name="user.agent" value="gecko1_8"/> -->
 </module>
diff --git a/frontend/migrations/062_drone_sets_unique.py b/frontend/migrations/062_drone_sets_unique.py
index 3c031c6..738a0f0 100644
--- a/frontend/migrations/062_drone_sets_unique.py
+++ b/frontend/migrations/062_drone_sets_unique.py
@@ -41,9 +41,9 @@
              'GROUP BY drone_id HAVING COUNT(*) > 1')
     rows = manager.execute(query)
     if rows:
-      raise Exception('Some drones are associated with more than one drone '
-                      'set. Please remove all duplicates before running this '
-                      'migration.')
+        raise Exception('Some drones are associated with more than one drone '
+                        'set. Please remove all duplicates before running this '
+                        'migration.')
     manager.execute_script(UP_SQL)
 
     if db_utils.check_index_exists(manager, 'afe_drone_sets_drones',
diff --git a/frontend/planner/control_file_unittest.py b/frontend/planner/control_file_unittest.py
old mode 100644
new mode 100755
diff --git a/frontend/planner/execution_engine_unittest.py b/frontend/planner/execution_engine_unittest.py
old mode 100644
new mode 100755
diff --git a/frontend/planner/rpc_interface_unittest.py b/frontend/planner/rpc_interface_unittest.py
old mode 100644
new mode 100755
diff --git a/frontend/planner/rpc_utils_unittest.py b/frontend/planner/rpc_utils_unittest.py
old mode 100644
new mode 100755
diff --git a/frontend/tko/common.py b/frontend/tko/common.py
index d37136e..1edf302 100644
--- a/frontend/tko/common.py
+++ b/frontend/tko/common.py
@@ -6,19 +6,3 @@
 import setup_modules
 sys.path.pop(0)
 setup_modules.setup(base_path=autotest_dir, root_module_name="autotest_lib")
-import os, sys
-dirname = os.path.dirname(sys.modules[__name__].__file__)
-autotest_dir = os.path.abspath(os.path.join(dirname, '..', '..'))
-client_dir = os.path.join(autotest_dir, "client")
-sys.path.insert(0, client_dir)
-import setup_modules
-sys.path.pop(0)
-setup_modules.setup(base_path=autotest_dir, root_module_name="autotest_lib")
-import os, sys
-dirname = os.path.dirname(sys.modules[__name__].__file__)
-autotest_dir = os.path.abspath(os.path.join(dirname, '..', '..'))
-client_dir = os.path.join(autotest_dir, "client")
-sys.path.insert(0, client_dir)
-import setup_modules
-sys.path.pop(0)
-setup_modules.setup(base_path=autotest_dir, root_module_name="autotest_lib")
diff --git a/frontend/tko/resources_test.py b/frontend/tko/resources_test.py
old mode 100644
new mode 100755
diff --git a/frontend/tko/rpc_interface_unittest.py b/frontend/tko/rpc_interface_unittest.py
old mode 100644
new mode 100755
diff --git a/global_config.ini b/global_config.ini
index 6f721f9..f794cb1 100644
--- a/global_config.ini
+++ b/global_config.ini
@@ -77,7 +77,7 @@
 gc_stats_interval_mins: 360
 # set nonzero to enable periodic reverification of all dead hosts
 reverify_period_minutes: 0
-reverify_max_hosts_at_once: 0
+reverify_max_hosts_at_once: 0 
 drone_sets_enabled: False
 # default_drone_set_name: This is required if drone sets are enabled.
 default_drone_set_name:
diff --git a/scheduler/drone_manager.py b/scheduler/drone_manager.py
index 75724f3..e094f14 100644
--- a/scheduler/drone_manager.py
+++ b/scheduler/drone_manager.py
@@ -159,8 +159,7 @@
         self._results_dir = base_results_dir
 
         for hostname in drone_hostnames:
-            drone = self._add_drone(hostname)
-            drone.call('initialize', self.absolute_path(''))
+            self._add_drone(hostname)
 
         if not self._drones:
             # all drones failed to initialize
@@ -205,8 +204,9 @@
     def _add_drone(self, hostname):
         logging.info('Adding drone %s' % hostname)
         drone = drones.get_drone(hostname)
-        self._drones[drone.hostname] = drone
-        return drone
+        if drone:
+            self._drones[drone.hostname] = drone
+            drone.call('initialize', self.absolute_path(''))
 
 
     def _remove_drone(self, hostname):
diff --git a/scheduler/drones.py b/scheduler/drones.py
index 85a5ee2..b742b55 100644
--- a/scheduler/drones.py
+++ b/scheduler/drones.py
@@ -7,6 +7,11 @@
 AUTOTEST_INSTALL_DIR = global_config.global_config.get_config_value('SCHEDULER',
                                                  'drone_installation_directory')
 
+class DroneUnreachable(Exception):
+    """The drone is non-sshable."""
+    pass
+
+
 class _AbstractDrone(object):
     """
     Attributes:
@@ -111,6 +116,9 @@
         super(_RemoteDrone, self).__init__()
         self.hostname = hostname
         self._host = drone_utility.create_host(hostname)
+        if not self._host.is_up():
+            logging.error('Drone %s is unpingable, kicking out', hostname)
+            raise DroneUnreachable
         self._autotest_install_dir = AUTOTEST_INSTALL_DIR
 
 
@@ -156,4 +164,7 @@
     """
     if hostname == 'localhost':
         return _LocalDrone()
-    return _RemoteDrone(hostname)
+    try:
+        return _RemoteDrone(hostname)
+    except DroneUnreachable:
+        return None
diff --git a/scheduler/metahost_scheduler.py b/scheduler/metahost_scheduler.py
index 9588e95..98f49be 100644
--- a/scheduler/metahost_scheduler.py
+++ b/scheduler/metahost_scheduler.py
@@ -54,16 +54,16 @@
 
 
     def schedule_metahost(self, queue_entry, scheduling_utility):
-         """Schedule the given queue entry, if possible.
+        """Schedule the given queue entry, if possible.
 
-         This method should make necessary database changes culminating in
-         assigning a host to the given queue entry in the database.  It may
-         take no action if no host can be assigned currently.
+        This method should make necessary database changes culminating in
+        assigning a host to the given queue entry in the database.  It may
+        take no action if no host can be assigned currently.
 
-         @param queue_entry: a HostQueueEntry DBObject
-         @param scheduling_utility: a HostSchedulingUtility object
-         """
-         raise NotImplementedError
+        @param queue_entry: a HostQueueEntry DBObject
+        @param scheduling_utility: a HostSchedulingUtility object
+        """
+        raise NotImplementedError
 
 
     def recovery_on_startup(self):
diff --git a/scheduler/monitor_db.py b/scheduler/monitor_db.py
index 7ef0db9..d0cf46e 100755
--- a/scheduler/monitor_db.py
+++ b/scheduler/monitor_db.py
@@ -262,7 +262,7 @@
     """Raised by HostScheduler when an inconsistent state occurs."""
 
 
-class HostScheduler(metahost_scheduler.HostSchedulingUtility):
+class BaseHostScheduler(metahost_scheduler.HostSchedulingUtility):
     """Handles the logic for choosing when to run jobs and on which hosts.
 
     This class makes several queries to the database on each tick, building up
@@ -665,6 +665,15 @@
         return []
 
 
+site_host_scheduler = utils.import_site_class(__file__,
+                                  "autotest_lib.scheduler.site_host_scheduler",
+                                  "site_host_scheduler", BaseHostScheduler)
+
+
+class HostScheduler(site_host_scheduler):
+    pass
+
+
 class Dispatcher(object):
     def __init__(self):
         self._agents = []
diff --git a/server/autoserv b/server/autoserv
index 1cec992..2c4acb1 100755
--- a/server/autoserv
+++ b/server/autoserv
@@ -89,13 +89,11 @@
 
     # can't be both a client and a server side test
     if client and server:
-        print "Can not specify a test as both server and client!"
-        sys.exit(1)
+        parser.parser.error("Can not specify a test as both server and client!")
 
     if len(parser.args) < 1 and not (verify or repair or cleanup
                                      or collect_crashinfo):
-        print parser.parser.print_help()
-        sys.exit(1)
+        parser.parser.error("Missing argument: control file")
 
     # We have a control file unless it's just a verify/repair/cleanup job
     if len(parser.args) > 0:
@@ -116,15 +114,13 @@
     if machines:
         for machine in machines:
             if not machine or re.search('\s', machine):
-                print "Invalid machine %s" % str(machine)
-                sys.exit(1)
+                parser.parser.error("Invalid machine: %s" % str(machine))
         machines = list(set(machines))
         machines.sort()
 
     if group_name and len(machines) < 2:
-        print ("-G %r may only be supplied with more than one machine."
+        parser.parser.error("-G %r may only be supplied with more than one machine."
                % group_name)
-        sys.exit(1)
 
     kwargs = {'group_name': group_name, 'tag': execution_tag}
     if control_filename:
diff --git a/server/autoserv_parser.py b/server/autoserv_parser.py
index cbb7f6c..a955039 100644
--- a/server/autoserv_parser.py
+++ b/server/autoserv_parser.py
@@ -13,7 +13,7 @@
     """
     def __init__(self):
         self.args = sys.argv[1:]
-        self.parser = optparse.OptionParser()
+        self.parser = optparse.OptionParser(usage="%prog [options] [control-file]")
         self.setup_options()
 
         # parse an empty list of arguments in order to set self.options
diff --git a/server/autotest.py b/server/autotest.py
index eb25095..74a76c7 100644
--- a/server/autotest.py
+++ b/server/autotest.py
@@ -76,6 +76,7 @@
             try:
                 autotest_binary = os.path.join(path, 'bin', 'autotest')
                 host.run('test -x %s' % utils.sh_escape(autotest_binary))
+                host.run('test -w %s' % utils.sh_escape(path))
                 logging.debug('Found existing autodir at %s', path)
                 return path
             except error.AutoservRunError:
@@ -109,6 +110,7 @@
         for path in client_autodir_paths:
             try:
                 host.run('mkdir -p %s' % utils.sh_escape(path))
+                host.run('test -w %s' % utils.sh_escape(path))
                 return path
             except error.AutoservRunError:
                 logging.debug('Failed to create %s', path)
diff --git a/server/autotest_unittest.py b/server/autotest_unittest.py
index 6025af6..04023e9 100755
--- a/server/autotest_unittest.py
+++ b/server/autotest_unittest.py
@@ -256,6 +256,7 @@
         self.host.get_autodir.expect_call().and_return(None)
         self._expect_failed_run('test -x /some/path/bin/autotest')
         self.host.run.expect_call('test -x /another/path/bin/autotest')
+        self.host.run.expect_call('test -w /another/path')
 
         autodir = autotest.Autotest.get_installed_autodir(self.host)
         self.assertEquals(autodir, '/another/path')
@@ -268,6 +269,7 @@
         self._expect_failed_run('test -x /another/path/bin/autotest')
         self._expect_failed_run('mkdir -p /some/path')
         self.host.run.expect_call('mkdir -p /another/path')
+        self.host.run.expect_call('test -w /another/path')
 
         install_dir = autotest.Autotest.get_install_dir(self.host)
         self.assertEquals(install_dir, '/another/path')
diff --git a/server/base_utils.py b/server/base_utils.py
index 6d683b2..1c58609 100644
--- a/server/base_utils.py
+++ b/server/base_utils.py
@@ -244,45 +244,27 @@
     return (ntuples, failures)
 
 
-def parse_machine(machine, user = 'root', port = 22, password = ''):
+def parse_machine(machine, user='root', password='', port=22):
     """
     Parse the machine string user:pass@host:port and return it separately,
     if the machine string is not complete, use the default parameters
     when appropriate.
     """
 
-    user = user
-    port = port
-    password = password
+    if '@' in machine:
+        user, machine = machine.split('@', 1)
 
-    if re.search('@', machine):
-        machine = machine.split('@')
+    if ':' in user:
+        user, password = user.split(':', 1)
 
-        if re.search(':', machine[0]):
-            machine[0] = machine[0].split(':')
-            user = machine[0][0]
-            password = machine[0][1]
+    if ':' in machine:
+        machine, port = machine.split(':', 1)
+        port = int(port)
 
-        else:
-            user = machine[0]
+    if not machine or not user:
+        raise ValueError
 
-        if re.search(':', machine[1]):
-            machine[1] = machine[1].split(':')
-            hostname = machine[1][0]
-            port = int(machine[1][1])
-
-        else:
-            hostname = machine[1]
-
-    elif re.search(':', machine):
-        machine = machine.split(':')
-        hostname = machine[0]
-        port = int(machine[1])
-
-    else:
-        hostname = machine
-
-    return hostname, user, password, port
+    return machine, user, password, port
 
 
 def get_public_key():
diff --git a/server/base_utils_unittest.py b/server/base_utils_unittest.py
index 111dfbe..feb9ace 100755
--- a/server/base_utils_unittest.py
+++ b/server/base_utils_unittest.py
@@ -26,5 +26,37 @@
         self.assertEquals(self.failures, failures)
 
 
+    # parse_machine() test cases
+    def test_parse_machine_good(self):
+        '''test that parse_machine() is outputting the correct data'''
+        gooddata = (('host',                ('host', 'root', '', 22)),
+                    ('host:21',             ('host', 'root', '', 21)),
+                    ('user@host',           ('host', 'user', '', 22)),
+                    ('user:pass@host',      ('host', 'user', 'pass', 22)),
+                    ('user:pass@host:1234', ('host', 'user', 'pass', 1234)),
+                   )
+        for machine, result in gooddata:
+            self.assertEquals(utils.parse_machine(machine), result)
+
+
+    def test_parse_machine_override(self):
+        '''Test that parse_machine() defaults can be overridden'''
+        self.assertEquals(utils.parse_machine('host', 'bob', 'foo', 1234),
+                          ('host', 'bob', 'foo', 1234))
+
+
+    def test_parse_machine_bad(self):
+        '''test that bad data passed to parse_machine() will raise an exception'''
+        baddata = (('host:port', ValueError),   # pass a non-integer string for port
+                   ('host:22:33', ValueError),  # pass two ports
+                   (':22', ValueError),         # neglect to pass a hostname #1
+                   ('user@', ValueError),       # neglect to pass a hostname #2
+                   ('user@:22', ValueError),    # neglect to pass a hostname #3
+                   (':pass@host', ValueError),  # neglect to pass a username
+                  )
+        for machine, exception in baddata:
+            self.assertRaises(exception, utils.parse_machine, machine)
+
+
 if __name__ == "__main__":
     unittest.main()
diff --git a/server/control_segments/cleanup b/server/control_segments/cleanup
index 1a201bd..6ac6788 100644
--- a/server/control_segments/cleanup
+++ b/server/control_segments/cleanup
@@ -1,8 +1,5 @@
 def cleanup(machine):
-    hostname, user, passwd, port = parse_machine(machine, ssh_user,
-                                                 ssh_port, ssh_pass)
-    host = hosts.create_host(hostname, user=user, port=port, initialize=False,
-                             password=passwd, auto_monitor=False)
+    host = hosts.create_host(machine, initialize=False, auto_monitor=False)
     host.cleanup()
 
 
diff --git a/server/control_segments/client_wrapper b/server/control_segments/client_wrapper
index f8acae3..0d29c7e 100644
--- a/server/control_segments/client_wrapper
+++ b/server/control_segments/client_wrapper
@@ -2,9 +2,7 @@
 
 
 def run_client(machine):
-    hostname, user, passwd, port = parse_machine(machine, ssh_user, ssh_port,
-                                                 ssh_pass)
-    host = hosts.create_host(hostname, user=user, port=port, password=passwd)
+    host = hosts.create_host(machine)
     host.log_kernel()
     at.run(control, host=host)
 
diff --git a/server/control_segments/crashdumps b/server/control_segments/crashdumps
index 7bad63d..b5ebe78 100644
--- a/server/control_segments/crashdumps
+++ b/server/control_segments/crashdumps
@@ -2,10 +2,7 @@
 
 
 def crashdumps(machine):
-    hostname, user, passwd, port = parse_machine(machine, ssh_user,
-                                                 ssh_port, ssh_pass)
-    host = hosts.create_host(hostname, user=user, port=port, initialize=False,
-                             password=passwd, auto_monitor=False)
+    host = hosts.create_host(machine, initialize=False, auto_monitor=False)
     crashcollect.get_crashdumps(host, test_start_time)
 
 
diff --git a/server/control_segments/crashinfo b/server/control_segments/crashinfo
index c273620..c802832 100644
--- a/server/control_segments/crashinfo
+++ b/server/control_segments/crashinfo
@@ -2,10 +2,7 @@
 
 
 def crashinfo(machine):
-    hostname, user, passwd, port = parse_machine(machine, ssh_user,
-                                                 ssh_port, ssh_pass)
-    host = hosts.create_host(hostname, user=user, port=port, initialize=False,
-                             password=passwd, auto_monitor=False)
+    host = hosts.create_host(machine, initialize=False, auto_monitor=False)
     crashcollect.get_crashinfo(host, test_start_time)
 
 
diff --git a/server/control_segments/install b/server/control_segments/install
index aba79ae..20d6944 100644
--- a/server/control_segments/install
+++ b/server/control_segments/install
@@ -1,8 +1,5 @@
 def install(machine):
-    hostname, user, passwd, port = parse_machine(machine, ssh_user,
-                                                 ssh_port, ssh_pass)
-    host = hosts.create_host(hostname, user=user, port=port, initialize=False,
-                             password=passwd, auto_monitor=False)
+    host = hosts.create_host(machine, initialize=False, auto_monitor=False)
     host.machine_install()
 
 
diff --git a/server/hosts/abstract_ssh.py b/server/hosts/abstract_ssh.py
index 3d8d9e9..3723c46 100644
--- a/server/hosts/abstract_ssh.py
+++ b/server/hosts/abstract_ssh.py
@@ -115,6 +115,17 @@
                           " ".join(sources), dest)
 
 
+    def _make_ssh_cmd(self, cmd):
+        """
+        Create a base ssh command string for the host which can be used
+        to run commands directly on the machine
+        """
+        base_cmd = make_ssh_command(user=self.user, port=self.port,
+                                    opts=self.master_ssh_option,
+                                    hosts_file=self.known_hosts_fd)
+
+        return '%s %s "%s"' % (base_cmd, self.hostname, utils.sh_escape(cmd))
+
     def _make_scp_cmd(self, sources, dest):
         """
         Given a list of source paths and a destination path, produces the
diff --git a/server/hosts/factory.py b/server/hosts/factory.py
index f1a054f..7a2a724 100644
--- a/server/hosts/factory.py
+++ b/server/hosts/factory.py
@@ -68,9 +68,8 @@
     site_factory.postprocess_classes(classes, hostname,
                                      auto_monitor=auto_monitor, **args)
 
-    args['user'] = ssh_user
-    args['port'] = ssh_port
-    args['password'] = ssh_pass
+    hostname, args['user'], args['password'], args['port'] = \
+            server_utils.parse_machine(hostname, ssh_user, ssh_pass, ssh_port)
 
     # create a custom host class for this machine and return an instance of it
     host_class = type("%s_host" % hostname, tuple(classes), {})
diff --git a/server/hosts/logfile_monitor.py b/server/hosts/logfile_monitor.py
index 1608a6b..9595cc8 100644
--- a/server/hosts/logfile_monitor.py
+++ b/server/hosts/logfile_monitor.py
@@ -26,15 +26,6 @@
     """Error occurred launching followfiles remotely."""
 
 
-def run_cmd_on_host(hostname, cmd, stdin, stdout, stderr):
-    base_cmd = abstract_ssh.make_ssh_command()
-    full_cmd = "%s %s \"%s\"" % (base_cmd, hostname,
-                                 server_utils.sh_escape(cmd))
-
-    return subprocess.Popen(full_cmd, stdin=stdin, stdout=stdout,
-                            stderr=stderr, shell=True)
-
-
 def list_remote_pythons(host):
     """List out installed pythons on host."""
     result = host.run('ls /usr/bin/python[0-9]*')
@@ -72,25 +63,24 @@
             raise FollowFilesLaunchError('No supported Python on host.')
 
     remote_monitordir = copy_monitordir(host)
-    remote_script_path = os.path.join(
-        remote_monitordir, 'followfiles.py')
+    remote_script_path = os.path.join(remote_monitordir, 'followfiles.py')
 
     followfiles_cmd = '%s %s --lastlines_dirpath=%s %s' % (
         supported_python, remote_script_path,
         lastlines_dirpath, ' '.join(follow_paths))
 
-    devnull_r = open(os.devnull, 'r')
-    devnull_w = open(os.devnull, 'w')
-    remote_followfiles_proc = run_cmd_on_host(
-        host.hostname, followfiles_cmd, stdout=subprocess.PIPE,
-        stdin=devnull_r, stderr=devnull_w)
+    remote_ff_proc = subprocess.Popen(host._make_ssh_cmd(followfiles_cmd),
+                                      stdin=open(os.devnull, 'r'),
+                                      stdout=subprocess.PIPE, shell=True)
+
+
     # Give it enough time to crash if it's going to (it shouldn't).
     time.sleep(5)
-    doa = remote_followfiles_proc.poll()
+    doa = remote_ff_proc.poll()
     if doa:
         raise FollowFilesLaunchError('ssh command crashed.')
 
-    return remote_followfiles_proc
+    return remote_ff_proc
 
 
 def resolve_patterns_path(patterns_path):
diff --git a/server/hosts/remote.py b/server/hosts/remote.py
index e46bc1b..d1b4b46 100644
--- a/server/hosts/remote.py
+++ b/server/hosts/remote.py
@@ -27,7 +27,7 @@
     LAST_BOOT_TAG = object()
     DEFAULT_HALT_TIMEOUT = 2 * 60
 
-    VAR_LOG_MESSAGES_COPY_PATH = "/var/log/messages.autotest_start"
+    VAR_LOG_MESSAGES_COPY_PATH = "/var/tmp/messages.autotest_start"
 
     def _initialize(self, hostname, autodir=None, *args, **dargs):
         super(RemoteHost, self)._initialize(*args, **dargs)
@@ -230,8 +230,8 @@
             keyvals = utils.read_keyval(keyval_path)
             all_labels = keyvals.get('labels', '')
             if all_labels:
-              all_labels = all_labels.split(',')
-              return [urllib.unquote(label) for label in all_labels]
+                all_labels = all_labels.split(',')
+                return [urllib.unquote(label) for label in all_labels]
         return []
 
 
diff --git a/server/hosts/serial.py b/server/hosts/serial.py
index d514dba..d363cb7 100644
--- a/server/hosts/serial.py
+++ b/server/hosts/serial.py
@@ -165,6 +165,7 @@
                     # Run on num_attempts=1 or last retry
                     try:
                         self.wait_for_restart(timeout,
+                                              old_boot_id=old_boot_id,
                                               **wait_for_restart_kwargs)
                     except error.AutoservShutdownError:
                         logging.warning(warning_msg, num_attempts, num_attempts)
diff --git a/server/prebuild.py b/server/prebuild.py
index 4d78605..272446e 100644
--- a/server/prebuild.py
+++ b/server/prebuild.py
@@ -44,6 +44,7 @@
         hostname = None
         user = None
         log = True
+        tap_report = False
     return client_setup_job.init_test(options, testdir)
 
 
@@ -61,4 +62,3 @@
     # instantiate a client_test instance.
     client_test = init_test(client_test_dir)
     client_setup_job.setup_test(client_test)
-
diff --git a/server/server_job.py b/server/server_job.py
index efc88d6..8a16c6a 100644
--- a/server/server_job.py
+++ b/server/server_job.py
@@ -6,7 +6,7 @@
 Copyright Martin J. Bligh, Andy Whitcroft 2007
 """
 
-import getpass, os, sys, re, stat, tempfile, time, select, subprocess
+import getpass, os, sys, re, stat, tempfile, time, select, subprocess, platform
 import traceback, shutil, warnings, fcntl, pickle, logging, itertools, errno
 from autotest_lib.client.bin import sysinfo
 from autotest_lib.client.common_lib import base_job
@@ -204,6 +204,7 @@
 
         job_data = {'label' : label, 'user' : user,
                     'hostname' : ','.join(machines),
+                    'drone' : platform.node(),
                     'status_version' : str(self._STATUS_VERSION),
                     'job_started' : str(int(time.time()))}
         if group_name:
diff --git a/server/standalone_profiler.py b/server/standalone_profiler.py
index 62a11e1..6324551 100644
--- a/server/standalone_profiler.py
+++ b/server/standalone_profiler.py
@@ -9,6 +9,7 @@
 
 __author__ = 'cranger@google.com (Colby Ranger)'
 
+import platform
 import common
 from autotest_lib.client.common_lib import barrier
 
@@ -16,7 +17,7 @@
 _RUNTEST_PATTERN = ("job.run_test('profiler_sync', timeout_sync=%r,\n"
                     "             timeout_start=%r, timeout_stop=%r,\n"
                     "             hostid='%s', masterid='%s', all_ids=%r)")
-_PROF_MASTER = "PROF_MASTER"
+_PROF_MASTER = platform.node()
 _PORT = 11920
 
 
diff --git a/server/tests/barriertest_2client/control.srv b/server/tests/barriertest_2client/control.srv
new file mode 100644
index 0000000..d6a70bb
--- /dev/null
+++ b/server/tests/barriertest_2client/control.srv
@@ -0,0 +1,78 @@
+AUTHOR = "gps@google.com (Gregory P. Smith)"
+TIME = "SHORT"
+NAME = "barrier_2client"
+TEST_CATEGORY = "Functional"
+TEST_CLASS = 'Network'
+TEST_TYPE = "Server"
+EXPERIMENTAL = True  # This is functional a test of autotest itself.
+SYNC_COUNT = 2
+DOC = """
+A functional test of autotest's Barrier mechanisms for synchronizing
+events between two clients without the help of the server.
+"""
+
+from autotest_lib.server import utils
+
+def run(pair):
+    logging.info('Running on %s and %s', pair[0], pair[1])
+    host_objs = [hosts.create_host(machine) for machine in pair]
+    host_at_objs = [autotest.Autotest(host) for host in host_objs]
+
+    client_control_template = """
+import logging, platform, socket, traceback
+try:
+    client_hostnames = %r
+    master_hostname = client_hostnames[0]
+    client_hostname = client_hostnames[1]
+
+    logging.info('Testing hostname only barrier')
+    barrier = job.barrier(platform.node(), 'barriertest_2client', 120)
+    logging.info('rendezvous-ing')
+    barrier.rendezvous(master_hostname, client_hostname)
+    logging.info('done.')
+
+    logging.info('Testing local identifier barrier')
+    barrier = job.barrier(platform.node() + '#id0', 'barriertest_2client', 120)
+    logging.info('rendezvous-ing')
+    barrier.rendezvous(master_hostname + '#id0',
+                       client_hostname + '#id0')
+    logging.info('done.')
+
+    logging.info('Testing IP@ barrier')
+    barrier = job.barrier(socket.gethostbyname(platform.node()),
+                          'barriertest_2client', 120)
+    logging.info('rendezvous-ing')
+    barrier.rendezvous(socket.gethostbyname(master_hostname),
+                       socket.gethostbyname(client_hostname))
+    logging.info('done.')
+
+    logging.info('Testing IP@ barrier with ids')
+    barrier = job.barrier(socket.gethostbyname(platform.node()) + '#42',
+                          'barriertest_2client', 120)
+    logging.info('rendezvous-ing')
+    barrier.rendezvous(socket.gethostbyname(master_hostname) + '#42',
+                       socket.gethostbyname(client_hostname) + '#42')
+    logging.info('done.')
+except:
+    traceback.print_exc()
+    raise
+"""
+    client_controls = [client_control_template % (pair,) for host in host_objs]
+
+    subcommand_list = []
+    for host, host_at, control in zip(host_objs, host_at_objs, client_controls):
+        subcommand_list.append(subcommand(host_at.run,
+                                          (control, host.hostname)))
+
+    parallel(subcommand_list)
+
+
+# grab the pairs (and failures)
+(pairs, failures) = utils.form_ntuples_from_machines(machines, 2)
+
+# log the failures
+for failure in failures:
+    job.record("FAIL", failure[0], "barrier_2client", failure[1])
+
+# now run through each pair and run
+job.parallel_simple(run, pairs, log=False)
diff --git a/server/tests/netperf2/netperf2.py b/server/tests/netperf2/netperf2.py
index 604e4c8..108dab8 100644
--- a/server/tests/netperf2/netperf2.py
+++ b/server/tests/netperf2/netperf2.py
@@ -33,7 +33,7 @@
                             "test_time=%d, stream_list=%s, tag='%s', ",
                             "iterations=%d)"])
 
-        server_control_file = template % (server.ip, client.ip, 'server', test, 
+        server_control_file = template % (server.ip, client.ip, 'server', test,
                                           time, stream_list, test, cycles)
         client_control_file = template % (server.ip, client.ip, 'client', test,
                                           time, stream_list, test, cycles)
diff --git a/tko/job_serializer.py b/tko/job_serializer.py
old mode 100644
new mode 100755
diff --git a/tko/job_serializer_unittest.py b/tko/job_serializer_unittest.py
old mode 100644
new mode 100755
diff --git a/tko/parsers/version_1.py b/tko/parsers/version_1.py
index 111f7ef..7448c4f 100644
--- a/tko/parsers/version_1.py
+++ b/tko/parsers/version_1.py
@@ -70,11 +70,14 @@
                 val_type = "perf"
 
         # parse the actual value into a dict
-        if val_type == "attr":
-            attr_dict[key] = value
-        elif val_type == "perf" and re.search("^\d+(\.\d+)?$", value):
-            perf_dict[key] = float(value)
-        else:
+        try:
+            if val_type == "attr":
+                attr_dict[key] = value
+            elif val_type == "perf":
+                perf_dict[key] = float(value)
+            else:
+                raise ValueError
+        except ValueError:
             msg = ("WARNING: line '%s' found in test "
                    "iteration keyval could not be parsed")
             msg %= line
diff --git a/tko/parsers/version_1_unittest.py b/tko/parsers/version_1_unittest.py
index 72444c5..4114c59 100755
--- a/tko/parsers/version_1_unittest.py
+++ b/tko/parsers/version_1_unittest.py
@@ -220,8 +220,8 @@
 
 
     def test_perf_entry(self):
-        result = self.parse_line("perf-val{perf}=173")
-        self.assertEqual(({}, {"perf-val": 173}), result)
+        result = self.parse_line("perf-val{perf}=-173")
+        self.assertEqual(({}, {"perf-val": -173}), result)
 
 
     def test_attr_entry(self):
@@ -230,8 +230,8 @@
 
 
     def test_untagged_is_perf(self):
-        result = self.parse_line("untagged=678.5")
-        self.assertEqual(({}, {"untagged": 678.5}), result)
+        result = self.parse_line("untagged=-678.5e5")
+        self.assertEqual(({}, {"untagged": -678.5e5}), result)
 
 
     def test_invalid_tag_ignored(self):
@@ -240,12 +240,12 @@
 
 
     def test_non_numeric_perf_ignored(self):
-        result = self.parse_line("perf-val{perf}=NaN")
+        result = self.parse_line("perf-val{perf}=FooBar")
         self.assertEqual(({}, {}), result)
 
 
     def test_non_numeric_untagged_ignored(self):
-        result = self.parse_line("untagged=NaN")
+        result = self.parse_line("untagged=FooBar")
         self.assertEqual(({}, {}), result)
 
 
diff --git a/utils/check_patch.py b/utils/check_patch.py
index 576b97d..78af6b9 100755
--- a/utils/check_patch.py
+++ b/utils/check_patch.py
@@ -20,7 +20,7 @@
 @author: Lucas Meneghel Rodrigues <lmr@redhat.com>
 """
 
-import os, stat, logging, sys, optparse
+import os, stat, logging, sys, optparse, time
 import common
 from autotest_lib.client.common_lib import utils, error, logging_config
 from autotest_lib.client.common_lib import logging_manager
@@ -32,6 +32,20 @@
                                                                verbose=verbose)
 
 
+def ask(question, auto=False):
+    """
+    Raw input with a prompt that emulates logging.
+
+    @param question: Question to be asked
+    @param auto: Whether to return "y" instead of asking the question
+    """
+    if auto:
+        logging.info("%s (y/n) y" % question)
+        return "y"
+    return raw_input("%s INFO | %s (y/n) " %
+                     (time.strftime("%H:%M:%S", time.localtime()), question))
+
+
 class VCS(object):
     """
     Abstraction layer to the version control system.
@@ -104,6 +118,7 @@
     """
     def __init__(self):
         logging.debug("Subversion VCS backend initialized.")
+        self.ignored_extension_list = ['.orig', '.bak']
 
 
     def get_unknown_files(self):
@@ -112,7 +127,9 @@
         for line in status.split("\n"):
             status_flag = line[0]
             if line and status_flag == "?":
-                unknown_files.append(line[1:].strip())
+                for extension in self.ignored_extension_list:
+                    if not line.endswith(extension):
+                        unknown_files.append(line[1:].strip())
         return unknown_files
 
 
@@ -181,13 +198,16 @@
     Picks up a given file and performs various checks, looking after problems
     and eventually suggesting solutions.
     """
-    def __init__(self, path):
+    def __init__(self, path, confirm=False):
         """
         Class constructor, sets the path attribute.
 
         @param path: Path to the file that will be checked.
+        @param confirm: Whether to answer yes to all questions asked without
+                prompting the user.
         """
         self.path = path
+        self.confirm = confirm
         self.basename = os.path.basename(self.path)
         if self.basename.endswith('.py'):
             self.is_python = True
@@ -204,7 +224,7 @@
         self.first_line = checked_file.readline()
         checked_file.close()
         self.corrective_actions = []
-        self.indentation_exceptions = ['cli/job_unittest.py']
+        self.indentation_exceptions = ['job_unittest.py']
 
 
     def _check_indent(self):
@@ -226,8 +246,6 @@
         reindent_results = reindent_raw.split(" ")[-1].strip(".")
         if reindent_results == "changed":
             if self.basename not in self.indentation_exceptions:
-                logging.error("Possible indentation and spacing issues on "
-                              "file %s" % self.path)
                 self.corrective_actions.append("reindent.py -v %s" % self.path)
 
 
@@ -242,8 +260,7 @@
         c_cmd = 'run_pylint.py %s' % self.path
         rc = utils.system(c_cmd, ignore_status=True)
         if rc != 0:
-            logging.error("Possible syntax problems on file %s", self.path)
-            logging.error("You might want to rerun '%s'", c_cmd)
+            logging.error("Syntax issues found during '%s'", c_cmd)
 
 
     def _check_unittest(self):
@@ -260,9 +277,8 @@
                 unittest_cmd = 'python %s' % unittest_path
                 rc = utils.system(unittest_cmd, ignore_status=True)
                 if rc != 0:
-                    logging.error("Problems during unit test execution "
-                                  "for file %s", self.path)
-                    logging.error("You might want to rerun '%s'", unittest_cmd)
+                    logging.error("Unittest issues found during '%s'",
+                                  unittest_cmd)
 
 
     def _check_permissions(self):
@@ -273,14 +289,10 @@
         """
         if self.first_line.startswith("#!"):
             if not self.is_executable:
-                logging.info("File %s seems to require execution "
-                             "permissions. ", self.path)
-                self.corrective_actions.append("chmod +x %s" % self.path)
+                self.corrective_actions.append("svn propset svn:executable ON %s" % self.path)
         else:
             if self.is_executable:
-                logging.info("File %s does not seem to require execution "
-                             "permissions. ", self.path)
-                self.corrective_actions.append("chmod -x %s" % self.path)
+                self.corrective_actions.append("svn propdel svn:executable %s" % self.path)
 
 
     def report(self):
@@ -294,10 +306,9 @@
             self._check_code()
             self._check_unittest()
         if self.corrective_actions:
-            logging.info("The following corrective actions are suggested:")
             for action in self.corrective_actions:
-                logging.info(action)
-                answer = raw_input("Would you like to apply it? (y/n) ")
+                answer = ask("Would you like to execute %s?" % action,
+                             auto=self.confirm)
                 if answer == "y":
                     rc = utils.system(action, ignore_status=True)
                     if rc != 0:
@@ -305,7 +316,8 @@
 
 
 class PatchChecker(object):
-    def __init__(self, patch=None, patchwork_id=None):
+    def __init__(self, patch=None, patchwork_id=None, confirm=False):
+        self.confirm = confirm
         self.base_dir = os.getcwd()
         if patch:
             self.patch = os.path.abspath(patch)
@@ -322,7 +334,7 @@
         if changed_files_before:
             logging.error("Repository has changed files prior to patch "
                           "application. ")
-            answer = raw_input("Would you like to revert them? (y/n) ")
+            answer = ask("Would you like to revert them?", auto=self.confirm)
             if answer == "n":
                 logging.error("Not safe to proceed without reverting files.")
                 sys.exit(1)
@@ -370,20 +382,20 @@
             for untracked_file in add_to_vcs:
                 logging.info(untracked_file)
             logging.info("Might need to be added to VCS")
-            logging.info("Would you like to add them to VCS ? (y/n/abort) ")
-            answer = raw_input()
+            answer = ask("Would you like to add them to VCS ?")
             if answer == "y":
                 for untracked_file in add_to_vcs:
                     self.vcs.add_untracked_file(untracked_file)
                     modified_files_after.append(untracked_file)
             elif answer == "n":
                 pass
-            elif answer == "abort":
-                sys.exit(1)
 
         for modified_file in modified_files_after:
-            file_checker = FileChecker(modified_file)
-            file_checker.report()
+            # Additional safety check, new commits might introduce
+            # new directories
+            if os.path.isfile(modified_file):
+                file_checker = FileChecker(modified_file)
+                file_checker.report()
 
 
     def check(self):
@@ -399,20 +411,37 @@
                       help='id of a given patchwork patch')
     parser.add_option('--verbose', dest="debug", action='store_true',
                       help='include debug messages in console output')
+    parser.add_option('-f', '--full-check', dest="full_check",
+                      action='store_true',
+                      help='check the full tree for corrective actions')
+    parser.add_option('-y', '--yes', dest="confirm",
+                      action='store_true',
+                      help='Answer yes to all questions')
 
     options, args = parser.parse_args()
     local_patch = options.local_patch
     id = options.id
     debug = options.debug
+    full_check = options.full_check
+    confirm = options.confirm
 
     logging_manager.configure_logging(CheckPatchLoggingConfig(), verbose=debug)
 
-    if local_patch:
-        patch_checker = PatchChecker(patch=local_patch)
-    elif id:
-        patch_checker = PatchChecker(patchwork_id=id)
+    ignore_file_list = ['common.py']
+    if full_check:
+        for root, dirs, files in os.walk('.'):
+            if not '.svn' in root:
+                for file in files:
+                    if file not in ignore_file_list:
+                        path = os.path.join(root, file)
+                        file_checker = FileChecker(path, confirm=confirm)
+                        file_checker.report()
     else:
-        logging.error('No patch or patchwork id specified. Aborting.')
-        sys.exit(1)
-
-    patch_checker.check()
+        if local_patch:
+            patch_checker = PatchChecker(patch=local_patch, confirm=confirm)
+        elif id:
+            patch_checker = PatchChecker(patchwork_id=id, confirm=confirm)
+        else:
+            logging.error('No patch or patchwork id specified. Aborting.')
+            sys.exit(1)
+        patch_checker.check()
diff --git a/utils/external_packages.py b/utils/external_packages.py
old mode 100644
new mode 100755
index d505eef..bcfd15d
--- a/utils/external_packages.py
+++ b/utils/external_packages.py
@@ -613,7 +613,8 @@
 class ParamikoPackage(ExternalPackage):
     version = '1.7.5'
     local_filename = 'paramiko-%s.tar.gz' % version
-    urls = ('http://www.lag.net/paramiko/download/' + local_filename,)
+    urls = ('http://www.lag.net/paramiko/download/' + local_filename,
+            'ftp://mirrors.kernel.org/gentoo/distfiles/' + local_filename,)
     hex_sum = '592be7a08290070b71da63a8e6f28a803399e5c5'
 
 
diff --git a/utils/run_pylint.py b/utils/run_pylint.py
index 0bf5f95..3c3e225 100755
--- a/utils/run_pylint.py
+++ b/utils/run_pylint.py
@@ -6,10 +6,13 @@
 # do a basic check to see if pylint is even installed
 try:
     import pylint
+    from pylint.__pkginfo__ import version as pylint_version
 except ImportError:
     print "Unable to import pylint, it may need to be installed"
     sys.exit(1)
 
+major, minor, release = pylint_version.split('.')
+pylint_version = float("%s.%s" % (major, minor))
 pylintrc_path = os.path.expanduser('~/.pylintrc')
 if not os.path.exists(pylintrc_path):
     open(pylintrc_path, 'w').close()
@@ -54,10 +57,13 @@
 # * common_lib.enum.Enum objects
 # * DB model objects (scheduler models are the worst, but Django models also
 #   generate some errors)
-pylint_base_opts = ['--disable-msg-cat=warning,refactor,convention',
-                    '--disable-msg=E1101,E1103',
-                    '--reports=no',
-                    '--include-ids=y']
+if pylint_version >= 0.21:
+    pylint_base_opts = ['--disable=W,R,C,E1101,E1103']
+else:
+    pylint_base_opts = ['--disable-msg-cat=warning,refactor,convention',
+                        '--disable-msg=E1101,E1103']
+pylint_base_opts += ['--reports=no',
+                     '--include-ids=y']
 
 file_list = sys.argv[1:]
 if '--' in file_list:
diff --git a/utils/test_importer.py b/utils/test_importer.py
index 00b912c..a7be755 100755
--- a/utils/test_importer.py
+++ b/utils/test_importer.py
@@ -242,6 +242,12 @@
         _log_or_execute(repr(new_test), new_test.save)
         add_label_dependencies(new_test)
 
+        # save TestParameter
+        for para_name in data.test_parameters:
+            test_parameter = models.TestParameter.objects.get_or_create(
+                test=new_test, name=para_name)[0]
+            test_parameter.save()
+
 
 def _set_attributes_clean(test, data):
     """
diff --git a/utils/unittest_suite.py b/utils/unittest_suite.py
index 629eba6..78a9602 100755
--- a/utils/unittest_suite.py
+++ b/utils/unittest_suite.py
@@ -16,6 +16,8 @@
 parser.add_option("--skip-tests", dest="skip_tests",  default=[],
                   help="A space separated list of tests to skip")
 
+parser.set_defaults(module_list=None)
+
 
 REQUIRES_DJANGO = set((
         'monitor_db_unittest.py',
@@ -64,6 +66,11 @@
     'logging_manager_test.py',
     ))
 
+# This particular KVM autotest test is not a unittest
+SKIP = set((
+    'guest_test.py',
+    ))
+
 LONG_TESTS = (REQUIRES_DJANGO |
               REQUIRES_MYSQLDB |
               REQUIRES_GWT |
@@ -103,16 +110,10 @@
             raise TestFailure(msg)
 
 
-def find_and_run_tests(start, options):
-    """
-    Find and run Python unittest suites below the given directory.  Only look
-    in subdirectories of start that are actual importable Python modules.
-
-    @param start: The absolute directory to look for tests under.
-    @param options: optparse options.
-    """
+def scan_for_modules(start, options):
     modules = []
-    skip_tests = set()
+
+    skip_tests = SKIP
     if options.skip_tests:
         skip_tests.update(options.skip_tests.split())
 
@@ -142,6 +143,22 @@
                 modules.append(['autotest_lib'] + names)
                 if options.debug:
                     print 'testing', path_no_py
+    return modules
+
+def find_and_run_tests(start, options):
+    """
+    Find and run Python unittest suites below the given directory.  Only look
+    in subdirectories of start that are actual importable Python modules.
+
+    @param start: The absolute directory to look for tests under.
+    @param options: optparse options.
+    """
+    if options.module_list:
+        modules = []
+        for m in options.module_list:
+            modules.append(m.split('.'))
+    else:
+        modules = scan_for_modules(start, options)
 
     if options.debug:
         print 'Number of test modules found:', len(modules)
@@ -170,9 +187,7 @@
 def main():
     options, args = parser.parse_args()
     if args:
-        parser.error('Unexpected argument(s): %s' % args)
-        parser.print_help()
-        sys.exit(1)
+        options.module_list = args
 
     # Strip the arguments off the command line, so that the unit tests do not
     # see them.