Merge remote branch 'cros/upstream' into tempbranch
BUG=
TEST=
Review URL: http://codereview.chromium.org/4823005
Change-Id: I5d56f1c10d0fce7f9d7dc3ad727ea52dcb9b2d6c
diff --git a/.gitignore b/.gitignore
index 51dad37..355a9e3 100644
--- a/.gitignore
+++ b/.gitignore
@@ -18,3 +18,18 @@
# This file is meant only for local testing
private_host_attributes_config.py
+
+# default svn:ignore property:
+.*.rej
+*.rej
+.*~
+*~
+.#*
+.DS_Store.
+# additional patterns:
+client/control
+client/results/
+client/tests/kvm/images
+client/tests/kvm/env
+client/tmp
+client/tests/kvm/*.cfg
diff --git a/cli/cli_mock.py b/cli/cli_mock.py
index c24a375..e636ee9 100644
--- a/cli/cli_mock.py
+++ b/cli/cli_mock.py
@@ -18,7 +18,6 @@
def create_file(content):
file_temp = autotemp.tempfile(unique_id='cli_mock', text=True)
os.write(file_temp.fd, content)
- os.close(file_temp.fd)
return file_temp
diff --git a/client/bin/cpuset.py b/client/bin/cpuset.py
new file mode 100644
index 0000000..68fe50a
--- /dev/null
+++ b/client/bin/cpuset.py
@@ -0,0 +1,544 @@
+# Copyright 2007-2010 Google Inc. Released under the GPL v2
+__author__ = "duanes (Duane Sand), pdahl (Peter Dahl)"
+
+# A basic cpuset/cgroup container manager for limiting memory use during tests
+# for use on kernels not running some site-specific container manager
+
+import os, sys, re, glob, fcntl, logging
+from autotest_lib.client.bin import utils
+from autotest_lib.client.common_lib import error
+
+SUPER_ROOT = '' # root of all containers or cgroups
+NO_LIMIT = (1 << 63) - 1 # containername/memory.limit_in_bytes if no limit
+
+# propio service classes:
+PROPIO_PRIO = 1
+PROPIO_NORMAL = 2
+PROPIO_IDLE = 3
+
+super_root_path = '' # usually '/dev/cgroup'; '/dev/cpuset' on 2.6.18
+cpuset_prefix = None # usually 'cpuset.'; '' on 2.6.18
+fake_numa_containers = False # container mem via numa=fake mem nodes, else pages
+mem_isolation_on = False
+node_mbytes = 0 # mbytes in one typical mem node
+root_container_bytes = 0 # squishy limit on effective size of root container
+
+
+def discover_container_style():
+ global super_root_path, cpuset_prefix
+ global mem_isolation_on, fake_numa_containers
+ global node_mbytes, root_container_bytes
+ if super_root_path != '':
+ return # already looked up
+ if os.path.exists('/dev/cgroup/tasks'):
+ # running on 2.6.26 or later kernel with containers on:
+ super_root_path = '/dev/cgroup'
+ cpuset_prefix = 'cpuset.'
+ if get_boot_numa():
+ mem_isolation_on = fake_numa_containers = True
+ else: # memcg containers IFF compiled-in & mounted & non-fakenuma boot
+ fake_numa_containers = False
+ mem_isolation_on = os.path.exists(
+ '/dev/cgroup/memory.limit_in_bytes')
+ # TODO: handle possibility of where memcg is mounted as its own
+ # cgroup hierarchy, separate from cpuset??
+ elif os.path.exists('/dev/cpuset/tasks'):
+ # running on 2.6.18 kernel with containers on:
+ super_root_path = '/dev/cpuset'
+ cpuset_prefix = ''
+ mem_isolation_on = fake_numa_containers = get_boot_numa() != ''
+ else:
+ # neither cpuset nor cgroup filesystem active:
+ super_root_path = None
+ cpuset_prefix = 'no_cpusets_or_cgroups_exist'
+ mem_isolation_on = fake_numa_containers = False
+
+ logging.debug('mem_isolation: %s', mem_isolation_on)
+ logging.debug('fake_numa_containers: %s', fake_numa_containers)
+ if fake_numa_containers:
+ node_mbytes = int(mbytes_per_mem_node())
+ elif mem_isolation_on: # memcg-style containers
+ # For now, limit total of all containers to using just 98% of system's
+ # visible total ram, to avoid oom events at system level, and avoid
+ # page reclaim overhead from going above kswapd highwater mark.
+ system_visible_pages = utils.memtotal() >> 2
+ usable_pages = int(system_visible_pages * 0.98)
+ root_container_bytes = usable_pages << 12
+ logging.debug('root_container_bytes: %s',
+ utils.human_format(root_container_bytes))
+
+
+def need_mem_containers():
+ discover_container_style()
+ if not mem_isolation_on:
+ raise error.AutotestError('Mem-isolation containers not enabled '
+ 'by latest reboot')
+
+def need_fake_numa():
+ discover_container_style()
+ if not fake_numa_containers:
+ raise error.AutotestError('fake=numa not enabled by latest reboot')
+
+
+def full_path(container_name):
+ discover_container_style()
+ return os.path.join(super_root_path, container_name)
+
+
+def unpath(container_path):
+ return container_path[len(super_root_path)+1:]
+
+
+def cpuset_attr(container_name, attr):
+ discover_container_style()
+ return os.path.join(super_root_path, container_name, cpuset_prefix+attr)
+
+
+def io_attr(container_name, attr):
+ discover_container_style()
+ # current version assumes shared cgroup hierarchy
+ return os.path.join(super_root_path, container_name, 'io.'+attr)
+
+
+def tasks_path(container_name):
+ return os.path.join(full_path(container_name), 'tasks')
+
+
+def mems_path(container_name):
+ return cpuset_attr(container_name, 'mems')
+
+
+def memory_path(container_name):
+ return os.path.join(super_root_path, container_name, 'memory')
+
+
+def cpus_path(container_name):
+ return cpuset_attr(container_name, 'cpus')
+
+
+def container_exists(name):
+ return name is not None and os.path.exists(tasks_path(name))
+
+
+def move_tasks_into_container(name, tasks):
+ task_file = tasks_path(name)
+ for task in tasks:
+ try:
+ logging.debug('moving task %s into container "%s"', task, name)
+ utils.write_one_line(task_file, task)
+ except Exception:
+ if utils.pid_is_alive(task):
+ raise # task exists but couldn't move it
+ # task is gone or zombie so ignore this exception
+
+
+def move_self_into_container(name):
+ me = str(os.getpid())
+ move_tasks_into_container(name, [me])
+ logging.debug('running self (pid %s) in container "%s"', me, name)
+
+
+def _avail_mbytes_via_nodes(parent):
+ # total mbytes of mem nodes available for new containers in parent
+ free_nodes = available_exclusive_mem_nodes(parent)
+ mbytes = nodes_avail_mbytes(free_nodes)
+ # don't have exact model for how container mgr measures mem space
+ # better here to underestimate than overestimate
+ mbytes = max(mbytes - node_mbytes//2, 0)
+ return mbytes
+
+
+def _avail_bytes_via_pages(parent):
+ # Get memory bytes available to parent container which could
+ # be allocated exclusively to new child containers.
+ # This excludes mem previously allocated to existing children.
+ available = container_bytes(parent)
+ mem_files_pattern = os.path.join(full_path(parent),
+ '*', 'memory.limit_in_bytes')
+ for mem_file in glob.glob(mem_files_pattern):
+ child_container = unpath(os.path.dirname(mem_file))
+ available -= container_bytes(child_container)
+ return available
+
+
+def avail_mbytes(parent=SUPER_ROOT):
+ # total mbytes available in parent, for exclusive use in new containers
+ if fake_numa_containers:
+ return _avail_mbytes_via_nodes(parent)
+ else:
+ return _avail_bytes_via_pages(parent) >> 20
+
+
+def delete_leftover_test_containers():
+ # recover mems and cores tied up by containers of prior failed tests:
+ for child in inner_containers_of(SUPER_ROOT):
+ _release_container_nest(child)
+
+
+def my_lock(lockname):
+ # lockname is 'inner'
+ lockdir = os.environ['AUTODIR']
+ lockname = os.path.join(lockdir, '.cpuset.lock.'+lockname)
+ lockfile = open(lockname, 'w')
+ fcntl.flock(lockfile, fcntl.LOCK_EX)
+ return lockfile
+
+
+def my_unlock(lockfile):
+ fcntl.flock(lockfile, fcntl.LOCK_UN)
+ lockfile.close()
+
+
+# Convert '1-3,7,9-12' to set(1,2,3,7,9,10,11,12)
+def rangelist_to_set(rangelist):
+ result = set()
+ if not rangelist:
+ return result
+ for x in rangelist.split(','):
+ if re.match(r'^(\d+)$', x):
+ result.add(int(x))
+ continue
+ m = re.match(r'^(\d+)-(\d+)$', x)
+ if m:
+ start = int(m.group(1))
+ end = int(m.group(2))
+ result.update(set(range(start, end+1)))
+ continue
+ msg = 'Cannot understand data input: %s %s' % (x, rangelist)
+ raise ValueError(msg)
+ return result
+
+
+def my_container_name():
+ # Get current process's inherited or self-built container name
+ # within /dev/cpuset or /dev/cgroup. Is '' for root container.
+ name = utils.read_one_line('/proc/%i/cpuset' % os.getpid())
+ return name[1:] # strip leading /
+
+
+def get_mem_nodes(container_name):
+ # all mem nodes now available to a container, both exclusive & shared
+ file_name = mems_path(container_name)
+ if os.path.exists(file_name):
+ return rangelist_to_set(utils.read_one_line(file_name))
+ else:
+ return set()
+
+
+def _busy_mem_nodes(parent_container):
+ # Get set of numa memory nodes now used (exclusively or shared)
+ # by existing children of parent container
+ busy = set()
+ mem_files_pattern = os.path.join(full_path(parent_container),
+ '*', cpuset_prefix+'mems')
+ for mem_file in glob.glob(mem_files_pattern):
+ child_container = os.path.dirname(mem_file)
+ busy |= get_mem_nodes(child_container)
+ return busy
+
+
+def available_exclusive_mem_nodes(parent_container):
+ # Get subset of numa memory nodes of parent container which could
+ # be allocated exclusively to new child containers.
+ # This excludes nodes now allocated to existing children.
+ need_fake_numa()
+ available = get_mem_nodes(parent_container)
+ available -= _busy_mem_nodes(parent_container)
+ return available
+
+
+def my_mem_nodes():
+ # Get set of numa memory nodes owned by current process's container.
+ discover_container_style()
+ if not mem_isolation_on:
+ return set() # as expected by vmstress
+ return get_mem_nodes(my_container_name())
+
+
+def my_available_exclusive_mem_nodes():
+ # Get subset of numa memory nodes owned by current process's
+ # container, which could be allocated exclusively to new child
+ # containers. This excludes any nodes now allocated
+ # to existing children.
+ return available_exclusive_mem_nodes(my_container_name())
+
+
+def node_avail_kbytes(node):
+ return node_mbytes << 10 # crude; fixed numa node size
+
+
+def nodes_avail_mbytes(nodes):
+ # nodes' combined user+avail size, in Mbytes
+ return sum(node_avail_kbytes(n) for n in nodes) // 1024
+
+
+def container_bytes(name):
+ if fake_numa_containers:
+ return nodes_avail_mbytes(get_mem_nodes(name)) << 20
+ else:
+ while True:
+ file = memory_path(name) + '.limit_in_bytes'
+ limit = int(utils.read_one_line(file))
+ if limit < NO_LIMIT:
+ return limit
+ if name == SUPER_ROOT:
+ return root_container_bytes
+ name = os.path.dirname(name)
+
+
+def container_mbytes(name):
+ return container_bytes(name) >> 20
+
+
+def mbytes_per_mem_node():
+ # Get mbyte size of standard numa mem node, as float
+ # (some nodes are bigger than this)
+ # Replaces utils.node_size().
+ numa = get_boot_numa()
+ if numa.endswith('M'):
+ return float(numa[:-1]) # mbyte size of fake nodes
+ elif numa:
+ nodecnt = int(numa) # fake numa mem nodes for container isolation
+ else:
+ nodecnt = len(utils.numa_nodes()) # phys mem-controller nodes
+ # Use guessed total physical mem size, not kernel's
+ # lesser 'available memory' after various system tables.
+ return utils.rounded_memtotal() / (nodecnt * 1024.0)
+
+
+def get_cpus(container_name):
+ file_name = cpus_path(container_name)
+ if os.path.exists(file_name):
+ return rangelist_to_set(utils.read_one_line(file_name))
+ else:
+ return set()
+
+
+def get_tasks(container_name):
+ file_name = tasks_path(container_name)
+ try:
+ tasks = [x.rstrip() for x in open(file_name).readlines()]
+ except IOError:
+ if os.path.exists(file_name):
+ raise
+ tasks = [] # container doesn't exist anymore
+ return tasks
+
+
+def inner_containers_of(parent):
+ pattern = os.path.join(full_path(parent), '*/tasks')
+ return [unpath(os.path.dirname(task_file))
+ for task_file in glob.glob(pattern)]
+
+
+def _release_container_nest(nest):
+ # Destroy a container, and any nested sub-containers
+ nest_path = full_path(nest)
+ if os.path.exists(nest_path):
+
+ # bottom-up walk of tree, releasing all nested sub-containers
+ for child in inner_containers_of(nest):
+ _release_container_nest(child)
+
+ logging.debug("releasing container %s", nest)
+
+ # Transfer any survivor tasks (e.g. self) to parent container
+ parent = os.path.dirname(nest)
+ move_tasks_into_container(parent, get_tasks(nest))
+
+ # remove the now-empty outermost container of this nest
+ if os.path.exists(nest_path):
+ os.rmdir(nest_path) # nested, or dead manager
+
+
+def release_container(container_name=None):
+ # Destroy a container
+ my_container = my_container_name()
+ if container_name is None:
+ container_name = my_container
+ _release_container_nest(container_name)
+ displaced = my_container_name()
+ if displaced != my_container:
+ logging.debug('now running self (pid %d) in container "%s"',
+ os.getpid(), displaced)
+
+
+def remove_empty_prio_classes(prios):
+ # remove prio classes whose set of allowed priorities is empty
+ # e.g 'no:3;rt:;be:3;id:' --> 'no:3;be:3'
+ return ';'.join(p for p in prios.split(';') if p.split(':')[1])
+
+
+def all_drive_names():
+ # list of all disk drives sda,sdb,...
+ paths = glob.glob('/sys/block/sd*')
+ if not paths:
+ paths = glob.glob('/sys/block/hd*')
+ return [os.path.basename(path) for path in paths]
+
+
+def set_io_controls(container_name, disks=[], ioprio_classes=[PROPIO_NORMAL],
+ io_shares=[95], io_limits=[0]):
+ # set the propio controls for one container, for selected disks
+ # writing directly to /dev/cgroup/container_name/io.io_service_level
+ # without using containerd or container.py
+ # See wiki ProportionalIOScheduler for definitions
+ # ioprio_classes: list of service classes, one per disk
+ # using numeric propio service classes as used by kernel API, namely
+ # 1: RT, Real Time, aka PROPIO_PRIO
+ # 2: BE, Best Effort, aka PROPIO_NORMAL
+ # 3: PROPIO_IDLE
+ # io_shares: list of disk-time-fractions, one per disk,
+ # as percentage integer 0..100
+ # io_limits: list of limit on/off, one per disk
+ # 0: no limit, shares use of other containers' unused disk time
+ # 1: limited, container's use of disk time is capped to given DTF
+ # ioprio_classes defaults to best-effort
+ # io_limit defaults to no limit, use slack time
+ if not disks: # defaults to all drives
+ disks = all_drive_names()
+ io_shares = [io_shares [0]] * len(disks)
+ ioprio_classes = [ioprio_classes[0]] * len(disks)
+ io_limits = [io_limits [0]] * len(disks)
+ if not (len(disks) == len(ioprio_classes) and len(disks) == len(io_shares)
+ and len(disks) == len(io_limits)):
+ raise error.AutotestError('Unequal number of values for io controls')
+ service_level = io_attr(container_name, 'io_service_level')
+ if not os.path.exists(service_level):
+ return # kernel predates propio features
+ # or io cgroup is mounted separately from cpusets
+ disk_infos = []
+ for disk,ioclass,limit,share in zip(disks, ioprio_classes,
+ io_limits, io_shares):
+ parts = (disk, str(ioclass), str(limit), str(share))
+ disk_info = ' '.join(parts)
+ utils.write_one_line(service_level, disk_info)
+ disk_infos.append(disk_info)
+ logging.debug('set_io_controls of %s to %s',
+ container_name, ', '.join(disk_infos))
+
+
+def abbrev_list(vals):
+ """Condense unsigned (0,4,5,6,7,10) to '0,4-7,10'."""
+ ranges = []
+ lower = 0
+ upper = -2
+ for val in sorted(vals)+[-1]:
+ if val != upper+1:
+ if lower == upper:
+ ranges.append(str(lower))
+ elif lower <= upper:
+ ranges.append('%d-%d' % (lower, upper))
+ lower = val
+ upper = val
+ return ','.join(ranges)
+
+
+def create_container_with_specific_mems_cpus(name, mems, cpus):
+ need_fake_numa()
+ os.mkdir(full_path(name))
+ utils.write_one_line(cpuset_attr(name, 'mem_hardwall'), '1')
+ utils.write_one_line(mems_path(name), ','.join(map(str, mems)))
+ utils.write_one_line(cpus_path(name), ','.join(map(str, cpus)))
+ logging.debug('container %s has %d cpus and %d nodes totalling %s bytes',
+ name, len(cpus), len(get_mem_nodes(name)),
+ utils.human_format(container_bytes(name)) )
+
+
+def create_container_via_memcg(name, parent, bytes, cpus):
+ # create container via direct memcg cgroup writes
+ os.mkdir(full_path(name))
+ nodes = utils.read_one_line(mems_path(parent))
+ utils.write_one_line(mems_path(name), nodes) # inherit parent's nodes
+ utils.write_one_line(memory_path(name)+'.limit_in_bytes', str(bytes))
+ utils.write_one_line(cpus_path(name), ','.join(map(str, cpus)))
+ logging.debug('Created container %s directly via memcg,'
+ ' has %d cpus and %s bytes',
+ name, len(cpus), utils.human_format(container_bytes(name)))
+
+
+def _create_fake_numa_container_directly(name, parent, mbytes, cpus):
+ need_fake_numa()
+ lockfile = my_lock('inner') # serialize race between parallel tests
+ try:
+ # Pick specific mem nodes for new cpuset's exclusive use
+ # For now, arbitrarily pick highest available node numbers
+ needed_kbytes = mbytes * 1024
+ nodes = sorted(list(available_exclusive_mem_nodes(parent)))
+ kbytes = 0
+ nodecnt = 0
+ while kbytes < needed_kbytes and nodecnt < len(nodes):
+ nodecnt += 1
+ kbytes += node_avail_kbytes(nodes[-nodecnt])
+ if kbytes < needed_kbytes:
+ parent_mbytes = container_mbytes(parent)
+ if mbytes > parent_mbytes:
+ raise error.AutotestError(
+ "New container's %d Mbytes exceeds "
+ "parent container's %d Mbyte size"
+ % (mbytes, parent_mbytes) )
+ else:
+ raise error.AutotestError(
+ "Existing sibling containers hold "
+ "%d Mbytes needed by new container"
+ % ((needed_kbytes - kbytes)//1024) )
+ mems = nodes[-nodecnt:]
+
+ create_container_with_specific_mems_cpus(name, mems, cpus)
+ finally:
+ my_unlock(lockfile)
+
+
+def create_container_directly(name, mbytes, cpus):
+ parent = os.path.dirname(name)
+ if fake_numa_containers:
+ _create_fake_numa_container_directly(name, parent, mbytes, cpus)
+ else:
+ create_container_via_memcg(name, parent, mbytes<<20, cpus)
+
+
+def create_container_with_mbytes_and_specific_cpus(name, mbytes,
+ cpus=None, root=SUPER_ROOT, io={}, move_in=True, timeout=0):
+ """\
+ Create a cpuset container and move job's current pid into it
+ Allocate the list "cpus" of cpus to that container
+
+ name = arbitrary string tag
+ mbytes = reqested memory for job in megabytes
+ cpus = list of cpu indicies to associate with the cpuset
+ defaults to all cpus avail with given root
+ root = the parent cpuset to nest this new set within
+ '': unnested top-level container
+ io = arguments for proportional IO containers
+ move_in = True: Move current process into the new container now.
+ timeout = must be 0: persist until explicitly deleted.
+ """
+ need_mem_containers()
+ if not container_exists(root):
+ raise error.AutotestError('Parent container "%s" does not exist'
+ % root)
+ if cpus is None:
+ # default to biggest container we can make under root
+ cpus = get_cpus(root)
+ else:
+ cpus = set(cpus) # interface uses list
+ if not cpus:
+ raise error.AutotestError('Creating container with no cpus')
+ name = os.path.join(root, name) # path relative to super_root
+ if os.path.exists(full_path(name)):
+ raise error.AutotestError('Container %s already exists' % name)
+ create_container_directly(name, mbytes, cpus)
+ set_io_controls(name, **io)
+ if move_in:
+ move_self_into_container(name)
+ return name
+
+
+def get_boot_numa():
+ # get boot-time numa=fake=xyz option for current boot
+ # eg numa=fake=nnn, numa=fake=nnnM, or nothing
+ label = 'numa=fake='
+ for arg in utils.read_one_line('/proc/cmdline').split():
+ if arg.startswith(label):
+ return arg[len(label):]
+ return ''
diff --git a/client/bin/job.py b/client/bin/job.py
index 6dd19d1..3d552ce 100644
--- a/client/bin/job.py
+++ b/client/bin/job.py
@@ -663,7 +663,8 @@
# check to see if any partitions have changed
partition_list = partition_lib.get_partition_list(self,
exclude_swap=False)
- mount_info = set((p.device, p.get_mountpoint()) for p in partition_list)
+ mount_info = partition_lib.get_mount_info(partition_list)
+
old_mount_info = self._state.get('client', 'mount_info')
if mount_info != old_mount_info:
new_entries = mount_info - old_mount_info
@@ -782,7 +783,7 @@
# save the partition list and mount points, as well as the cpu count
partition_list = partition_lib.get_partition_list(self,
exclude_swap=False)
- mount_info = set((p.device, p.get_mountpoint()) for p in partition_list)
+ mount_info = partition_lib.get_mount_info(partition_list)
self._state.set('client', 'mount_info', mount_info)
self._state.set('client', 'cpu_count', utils.count_cpus())
diff --git a/client/bin/kernel.py b/client/bin/kernel.py
index da91389..f3f5a38 100644
--- a/client/bin/kernel.py
+++ b/client/bin/kernel.py
@@ -59,9 +59,17 @@
else:
arglist.append(arg)
- # add the kernel entry
- bootloader.add_kernel(image, tag, initrd=initrd, args=' '.join(arglist),
- root=root)
+ # Add the kernel entry. it will keep all arguments from the default entry.
+ # args='_dummy_' is used to workaround a boottool limitation of not being
+ # able to add arguments to a kernel that does not already have any of its
+ # own by way of its own append= section below the image= line in lilo.conf.
+ bootloader.add_kernel(image, tag, initrd=initrd, root=root, args='_dummy_')
+ # Now, for each argument in arglist, try to add it to the kernel that was
+ # just added. In each step, if the arg already existed on the args string,
+ # that particular arg will be skipped
+ for a in arglist:
+ bootloader.add_args(kernel=tag, args=a)
+ bootloader.remove_args(kernel=tag, args='_dummy_')
class BootableKernel(object):
@@ -83,7 +91,6 @@
@param subdir: job-step qualifier in status log
@param notes: additional comment in status log
"""
-
# If we can check the kernel identity do so.
if ident_check:
when = int(time.time())
@@ -94,11 +101,7 @@
self.job.next_step_prepend(["job.end_reboot", subdir,
expected_ident, notes])
- # Point bootloader to the selected tag.
- _add_kernel_to_bootloader(self.job.bootloader,
- self.job.config_get('boot.default_args'),
- self.installed_as, args, self.image,
- self.initrd)
+ self.add_to_bootloader(args)
# defer fsck for next reboot, to avoid reboots back to default kernel
utils.system('touch /fastboot') # this file is removed automatically
@@ -108,6 +111,14 @@
self.job.reboot(tag=self.installed_as)
+ def add_to_bootloader(self, args=''):
+ # Point bootloader to the selected tag.
+ _add_kernel_to_bootloader(self.job.bootloader,
+ self.job.config_get('boot.default_args'),
+ self.installed_as, args, self.image,
+ self.initrd)
+
+
class kernel(BootableKernel):
""" Class for compiling kernels.
@@ -307,6 +318,8 @@
# if base_tree is a dir, assume uncompressed kernel
if os.path.isdir(base_tree):
print 'Symlinking existing kernel source'
+ if os.path.islink(self.build_dir):
+ os.remove(self.build_dir)
os.symlink(base_tree, self.build_dir)
# otherwise, extract tarball
@@ -372,7 +385,7 @@
os.chdir(self.build_dir)
self.set_cross_cc()
- self.clean(logged=False)
+ self.clean()
build_string = "/usr/bin/time -o %s make %s -j %s vmlinux" \
% (timefile, make_opts, threads)
build_string += ' > %s 2>&1' % output
@@ -420,8 +433,28 @@
if not args:
args = ''
+ # It is important to match the version with a real directory inside
+ # /lib/modules
+ real_version_list = glob.glob('/lib/modules/%s*' % version)
+ rl = len(real_version_list)
+ if rl == 0:
+ logging.error("No directory %s found under /lib/modules. Initramfs"
+ "creation will most likely fail and your new kernel"
+ "will fail to build", version)
+ else:
+ if rl > 1:
+ logging.warning("Found more than one possible match for "
+ "kernel version %s under /lib/modules", version)
+ version = os.path.basename(real_version_list[0])
+
if vendor in ['Red Hat', 'Fedora Core']:
- utils.system('mkinitrd %s %s %s' % (args, initrd, version))
+ try:
+ cmd = os_dep.command('dracut')
+ full_cmd = '%s -f %s %s' % (cmd, initrd, version)
+ except ValueError:
+ cmd = os_dep.command('mkinitrd')
+ full_cmd = '%s %s %s %s' % (cmd, args, initrd, version)
+ utils.system(full_cmd)
elif vendor in ['SUSE']:
utils.system('mkinitrd %s -k %s -i %s -M %s' %
(args, image, initrd, system_map))
@@ -719,7 +752,7 @@
# search for initrd
for file in files:
- if file.startswith('/boot/initrd'):
+ if file.startswith('/boot/init'):
self.initrd = file
# prefer /boot/initrd-version before /boot/initrd
if len(file) > len('/boot/initrd'):
diff --git a/client/bin/kernel_unittest.py b/client/bin/kernel_unittest.py
index fe40e6a..6761c05 100755
--- a/client/bin/kernel_unittest.py
+++ b/client/bin/kernel_unittest.py
@@ -18,8 +18,11 @@
# record
bootloader.remove_kernel.expect_call(tag)
bootloader.add_kernel.expect_call(image, tag, initrd=initrd,
- args=bootloader_args,
- root=bootloader_root)
+ args='_dummy_', root=bootloader_root)
+
+ for a in bootloader_args.split():
+ bootloader.add_args.expect_call(kernel=tag, args=a)
+ bootloader.remove_args.expect_call(kernel=tag, args='_dummy_')
# run and check
kernel._add_kernel_to_bootloader(bootloader, base_args, tag, args,
@@ -475,7 +478,7 @@
# record
os.chdir.expect_call(self.build_dir)
self.kernel.set_cross_cc.expect_call()
- self.kernel.clean.expect_call(logged=False)
+ self.kernel.clean.expect_call()
build_string = "/usr/bin/time -o /dev/null make -j 8 vmlinux"
build_string += ' > /dev/null 2>&1'
utils.system.expect_call(build_string)
@@ -509,6 +512,7 @@
self.job.config_get.expect_call(
'kernel.mkinitrd_extra_args').and_return(None)
args = ''
+ glob.glob.expect_call('/lib/modules/2.6.24*').and_return(['2.6.24'])
os.path.isfile.expect_call('/usr/sbin/mkinitrd').and_return(True)
cmd = '/usr/sbin/mkinitrd'
utils.system.expect_call('%s %s -o initrd 2.6.24' % (cmd, args))
diff --git a/client/bin/partition.py b/client/bin/partition.py
index e238373..355d1b3 100644
--- a/client/bin/partition.py
+++ b/client/bin/partition.py
@@ -176,6 +176,24 @@
return partitions
+def get_mount_info(partition_list):
+ """
+ Picks up mount point information about the machine mounts. By default, we
+ try to associate mount points with UUIDs, because in newer distros the
+ partitions are uniquely identified using them.
+ """
+ mount_info = set()
+ for p in partition_list:
+ try:
+ uuid = utils.system_output('blkid -s UUID -o value %s' % p.device)
+ except error.CmdError:
+ # fall back to using the partition
+ uuid = p.device
+ mount_info.add((uuid, p.get_mountpoint()))
+
+ return mount_info
+
+
def filter_partition_list(partitions, devnames):
"""
Pick and choose which partition to keep.
@@ -311,11 +329,11 @@
def run_test_on_partitions(job, test, partitions, mountpoint_func,
- tag, fs_opt, **dargs):
+ tag, fs_opt, do_fsck=True, **dargs):
"""
Run a test that requires multiple partitions. Filesystems will be
made on the partitions and mounted, then the test will run, then the
- filesystems will be unmounted and fsck'd.
+ filesystems will be unmounted and optionally fsck'd.
@param job: A job instance to run the test
@param test: A string containing the name of the test
@@ -327,6 +345,7 @@
files that make multiple calls to this routine with the same value
of 'test'.)
@param fs_opt: An FsOptions instance that describes what filesystem to make
+ @param do_fsck: include fsck in post-test partition cleanup.
@param dargs: Dictionary of arguments to be passed to job.run_test() and
eventually the test
"""
@@ -342,8 +361,10 @@
# run the test against all the partitions
job.run_test(test, tag=tag, partitions=partitions, dir=mountpoint, **dargs)
- # fsck and then remake all the filesystems in parallel
- parallel(partitions, 'cleanup_after_test')
+ parallel(partitions, 'unmount') # unmount all partitions in parallel
+ if do_fsck:
+ parallel(partitions, 'fsck') # fsck all partitions in parallel
+ # else fsck is done by caller
class partition(object):
@@ -430,15 +451,6 @@
self.mount(mountpoint)
- def cleanup_after_test(self):
- """
- Cleans up a partition after running a filesystem test. The
- filesystem is unmounted, and then checked for errors.
- """
- self.unmount()
- self.fsck()
-
-
def run_test_on_partition(self, test, mountpoint_func, **dargs):
"""
Executes a test fs-style (umount,mkfs,mount,test)
@@ -470,7 +482,9 @@
try:
self.job.run_test(test, tag=test_tag, dir=mountpoint, **dargs)
finally:
- self.cleanup_after_test()
+ self.unmount()
+ self.fsck()
+
mountpoint = mountpoint_func(self)
@@ -496,7 +510,7 @@
if filename:
for line in open_func(filename).readlines():
parts = line.split()
- if parts[0] == self.device:
+ if parts[0] == self.device or parts[1] == self.mountpoint:
return parts[1] # The mountpoint where it's mounted
return None
diff --git a/client/common_lib/autotemp.py b/client/common_lib/autotemp.py
index 536d81c..bbe737e 100644
--- a/client/common_lib/autotemp.py
+++ b/client/common_lib/autotemp.py
@@ -31,6 +31,7 @@
t = autotemp.tempfile(unique_id='fig')
t.name # name of file
t.fd # file descriptor
+ t.fo # file object
t.clean() # clean up after yourself
"""
def __init__(self, unique_id, suffix='', prefix='', dir=None,
@@ -40,6 +41,7 @@
self.fd, self.name = module_tempfile.mkstemp(suffix=suffix,
prefix=prefix,
dir=dir, text=text)
+ self.fo = os.fdopen(self.fd)
def clean(self):
@@ -47,15 +49,17 @@
Remove the temporary file that was created.
This is also called by the destructor.
"""
+ if self.fo:
+ self.fo.close()
if self.name and os.path.exists(self.name):
os.remove(self.name)
- self.fd = self.name = None
+ self.fd = self.fo = self.name = None
def __del__(self):
try:
- if self.name:
+ if self.name is not None:
logging.debug('Clean was not called for ' + self.name)
self.clean()
except:
diff --git a/client/common_lib/base_packages.py b/client/common_lib/base_packages.py
index 3c65f30..feb0d31 100644
--- a/client/common_lib/base_packages.py
+++ b/client/common_lib/base_packages.py
@@ -119,14 +119,13 @@
dest_file_path = self.run_command(mktemp).stdout.strip()
try:
- # build up a wget command using the server name
- server_name = urlparse.urlparse(self.url)[1]
- http_cmd = self.wget_cmd_pattern % (server_name, dest_file_path)
+ # build up a wget command
+ http_cmd = self.wget_cmd_pattern % (self.url, dest_file_path)
try:
self.run_command(http_cmd, _run_command_dargs={'timeout': 30})
except Exception, e:
msg = 'HTTP test failed, unable to contact %s: %s'
- raise error.PackageFetchError(msg % (server_name, e))
+ raise error.PackageFetchError(msg % (self.url, e))
finally:
self.run_command('rm -rf %s' % dest_file_path)
diff --git a/client/common_lib/boottool.py b/client/common_lib/boottool.py
index 1c29861..f123f1c 100644
--- a/client/common_lib/boottool.py
+++ b/client/common_lib/boottool.py
@@ -264,18 +264,14 @@
def boot_once(self, title=None):
"""
- Sets a specific entry for the next boot Then falls back to the
+ Sets a specific entry for the next boot, then falls back to the
default kernel.
@param kernel: title that identifies the entry to set for booting. If
- evaluates to false it will use the default entry title.
- (FIXME: that does not make much sense, if an entry is default
- by definition that means it boots next anyways so maybe it
- should be a NOP for title evaluating to False)
+ evaluates to false, this becomes a no-op.
"""
- if not title:
- title = self.get_default_title()
- self._run_boottool('--boot-once', '--title=%s' % title)
+ if title:
+ self._run_boottool('--boot-once', '--title=%s' % title)
def enable_xen_mode(self):
diff --git a/client/common_lib/control_data_unittest.py b/client/common_lib/control_data_unittest.py
index a85d755..ad44944 100755
--- a/client/common_lib/control_data_unittest.py
+++ b/client/common_lib/control_data_unittest.py
@@ -27,7 +27,6 @@
self.control_tmp = autotemp.tempfile(unique_id='control_unit',
text=True)
os.write(self.control_tmp.fd, CONTROL)
- os.close(self.control_tmp.fd)
def tearDown(self):
diff --git a/client/common_lib/global_config_unittest.py b/client/common_lib/global_config_unittest.py
index b039334..b3ad857c 100755
--- a/client/common_lib/global_config_unittest.py
+++ b/client/common_lib/global_config_unittest.py
@@ -35,13 +35,11 @@
global_temp = autotemp.tempfile("global", ".ini",
text=True)
os.write(global_temp.fd, global_config_ini_contents)
- os.close(global_temp.fd)
shadow_temp = autotemp.tempfile("shadow", ".ini",
text=True)
fd = shadow_temp.fd
os.write(shadow_temp.fd, shadow_config_ini_contents)
- os.close(shadow_temp.fd)
return (global_temp, shadow_temp)
diff --git a/client/common_lib/utils.py b/client/common_lib/utils.py
index 8a34ef1..47f3cb4 100644
--- a/client/common_lib/utils.py
+++ b/client/common_lib/utils.py
@@ -4,6 +4,7 @@
import os, pickle, random, re, resource, select, shutil, signal, StringIO
import socket, struct, subprocess, sys, time, textwrap, urlparse
import warnings, smtplib, logging, urllib2
+from threading import Thread, Event
try:
import hashlib
except ImportError:
@@ -190,6 +191,30 @@
f.close()
+def get_field(data, param, linestart="", sep=" "):
+ """
+ Parse data from string.
+ @param data: Data to parse.
+ example:
+ data:
+ cpu 324 345 34 5 345
+ cpu0 34 11 34 34 33
+ ^^^^
+ start of line
+ params 0 1 2 3 4
+ @param param: Position of parameter after linestart marker.
+ @param linestart: String to which start line with parameters.
+ @param sep: Separator between parameters regular expression.
+ """
+ search = re.compile(r"(?<=^%s)\s*(.*)" % linestart, re.MULTILINE)
+ find = search.search(data)
+ if find != None:
+ return re.split("%s" % sep, find.group(1))[param]
+ else:
+ print "There is no line which starts with %s in data." % linestart
+ return None
+
+
def write_one_line(filename, line):
open_write_close(filename, line.rstrip('\n') + '\n')
@@ -211,9 +236,14 @@
in each column, and determining the format string dynamically.
@param matrix: Matrix representation (list with n rows of m elements).
- @param header: Optional tuple with header elements to be displayed.
+ @param header: Optional tuple or list with header elements to be displayed.
"""
+ if type(header) is list:
+ header = tuple(header)
lengths = []
+ if header:
+ for column in header:
+ lengths.append(len(column))
for row in matrix:
for column in row:
i = row.index(column)
@@ -294,6 +324,156 @@
keyval.close()
+class FileFieldMonitor(object):
+ """
+ Monitors the information from the file and reports it's values.
+
+ It gather the information at start and stop of the measurement or
+ continuously during the measurement.
+ """
+ class Monitor(Thread):
+ """
+ Internal monitor class to ensure continuous monitor of monitored file.
+ """
+ def __init__(self, master):
+ """
+ @param master: Master class which control Monitor
+ """
+ Thread.__init__(self)
+ self.master = master
+
+ def run(self):
+ """
+ Start monitor in thread mode
+ """
+ while not self.master.end_event.isSet():
+ self.master._get_value(self.master.logging)
+ time.sleep(self.master.time_step)
+
+
+ def __init__(self, status_file, data_to_read, mode_diff, continuously=False,
+ contlogging=False, separator=" +", time_step=0.1):
+ """
+ Initialize variables.
+ @param status_file: File contain status.
+ @param mode_diff: If True make a difference of value, else average.
+ @param data_to_read: List of tuples with data position.
+ format: [(start_of_line,position in params)]
+ example:
+ data:
+ cpu 324 345 34 5 345
+ cpu0 34 11 34 34 33
+ ^^^^
+ start of line
+ params 0 1 2 3 4
+ @param mode_diff: True to subtract old value from new value,
+ False make average of the values.
+ @parma continuously: Start the monitoring thread using the time_step
+ as the measurement period.
+ @param contlogging: Log data in continuous run.
+ @param separator: Regular expression of separator.
+ @param time_step: Time period of the monitoring value.
+ """
+ self.end_event = Event()
+ self.start_time = 0
+ self.end_time = 0
+ self.test_time = 0
+
+ self.status_file = status_file
+ self.separator = separator
+ self.data_to_read = data_to_read
+ self.num_of_params = len(self.data_to_read)
+ self.mode_diff = mode_diff
+ self.continuously = continuously
+ self.time_step = time_step
+
+ self.value = [0 for i in range(self.num_of_params)]
+ self.old_value = [0 for i in range(self.num_of_params)]
+ self.log = []
+ self.logging = contlogging
+
+ self.started = False
+ self.num_of_get_value = 0
+ self.monitor = None
+
+
+ def _get_value(self, logging=True):
+ """
+ Return current values.
+ @param logging: If true log value in memory. There can be problem
+ with long run.
+ """
+ data = read_file(self.status_file)
+ value = []
+ for i in range(self.num_of_params):
+ value.append(int(get_field(data,
+ self.data_to_read[i][1],
+ self.data_to_read[i][0],
+ self.separator)))
+
+ if logging:
+ self.log.append(value)
+ if not self.mode_diff:
+ value = map(lambda x, y: x + y, value, self.old_value)
+
+ self.old_value = value
+ self.num_of_get_value += 1
+ return value
+
+
+ def start(self):
+ """
+ Start value monitor.
+ """
+ if self.started:
+ self.stop()
+ self.old_value = [0 for i in range(self.num_of_params)]
+ self.num_of_get_value = 0
+ self.log = []
+ self.end_event.clear()
+ self.start_time = time.time()
+ self._get_value()
+ self.started = True
+ if (self.continuously):
+ self.monitor = FileFieldMonitor.Monitor(self)
+ self.monitor.start()
+
+
+ def stop(self):
+ """
+ Stop value monitor.
+ """
+ if self.started:
+ self.started = False
+ self.end_time = time.time()
+ self.test_time = self.end_time - self.start_time
+ self.value = self._get_value()
+ if (self.continuously):
+ self.end_event.set()
+ self.monitor.join()
+ if (self.mode_diff):
+ self.value = map(lambda x, y: x - y, self.log[-1], self.log[0])
+ else:
+ self.value = map(lambda x: x / self.num_of_get_value,
+ self.value)
+
+
+ def get_status(self):
+ """
+ @return: Status of monitored process average value,
+ time of test and array of monitored values and time step of
+ continuous run.
+ """
+ if self.started:
+ self.stop()
+ if self.mode_diff:
+ for i in range(len(self.log) - 1):
+ self.log[i] = (map(lambda x, y: x - y,
+ self.log[i + 1], self.log[i]))
+ self.log.pop()
+ return (self.value, self.test_time, self.log, self.time_step)
+
+
def is_url(path):
"""Return true if path looks like a URL"""
# for now, just handle http and ftp
@@ -801,6 +981,226 @@
return cpu_percent, to_return
+class SystemLoad(object):
+ """
+ Get system and/or process values and return average value of load.
+ """
+ def __init__(self, pids, advanced=False, time_step=0.1, cpu_cont=False,
+ use_log=False):
+ """
+ @param pids: List of pids to be monitored. If pid = 0 whole system will
+ be monitored. pid == 0 means whole system.
+ @param advanced: monitor add value for system irq count and softirq
+ for process minor and maior page fault
+ @param time_step: Time step for continuous monitoring.
+ @param cpu_cont: If True monitor CPU load continuously.
+ @param use_log: If true every monitoring is logged for dump.
+ """
+ self.pids = []
+ self.stats = {}
+ for pid in pids:
+ if pid == 0:
+ cpu = FileFieldMonitor("/proc/stat",
+ [("cpu", 0), # User Time
+ ("cpu", 2), # System Time
+ ("intr", 0), # IRQ Count
+ ("softirq", 0)], # Soft IRQ Count
+ True,
+ cpu_cont,
+ use_log,
+ " +",
+ time_step)
+ mem = FileFieldMonitor("/proc/meminfo",
+ [("MemTotal:", 0), # Mem Total
+ ("MemFree:", 0), # Mem Free
+ ("Buffers:", 0), # Buffers
+ ("Cached:", 0)], # Cached
+ False,
+ True,
+ use_log,
+ " +",
+ time_step)
+ self.stats[pid] = ["TOTAL", cpu, mem]
+ self.pids.append(pid)
+ else:
+ name = ""
+ if (type(pid) is int):
+ self.pids.append(pid)
+ name = get_process_name(pid)
+ else:
+ self.pids.append(pid[0])
+ name = pid[1]
+
+ cpu = FileFieldMonitor("/proc/%d/stat" %
+ self.pids[-1],
+ [("", 13), # User Time
+ ("", 14), # System Time
+ ("", 9), # Minority Page Fault
+ ("", 11)], # Majority Page Fault
+ True,
+ cpu_cont,
+ use_log,
+ " +",
+ time_step)
+ mem = FileFieldMonitor("/proc/%d/status" %
+ self.pids[-1],
+ [("VmSize:", 0), # Virtual Memory Size
+ ("VmRSS:", 0), # Resident Set Size
+ ("VmPeak:", 0), # Peak VM Size
+ ("VmSwap:", 0)], # VM in Swap
+ False,
+ True,
+ use_log,
+ " +",
+ time_step)
+ self.stats[self.pids[-1]] = [name, cpu, mem]
+
+ self.advanced = advanced
+
+
+ def __str__(self):
+ """
+ Define format how to print
+ """
+ out = ""
+ for pid in self.pids:
+ for stat in self.stats[pid][1:]:
+ out += str(stat.get_status()) + "\n"
+ return out
+
+
+ def start(self, pids=[]):
+ """
+ Start monitoring of the process system usage.
+ @param pids: List of PIDs you intend to control. Use pids=[] to control
+ all defined PIDs.
+ """
+ if pids == []:
+ pids = self.pids
+
+ for pid in pids:
+ for stat in self.stats[pid][1:]:
+ stat.start()
+
+
+ def stop(self, pids=[]):
+ """
+ Stop monitoring of the process system usage.
+ @param pids: List of PIDs you intend to control. Use pids=[] to control
+ all defined PIDs.
+ """
+ if pids == []:
+ pids = self.pids
+
+ for pid in pids:
+ for stat in self.stats[pid][1:]:
+ stat.stop()
+
+
+ def dump(self, pids=[]):
+ """
+ Get the status of monitoring.
+ @param pids: List of PIDs you intend to control. Use pids=[] to control
+ all defined PIDs.
+ @return:
+ tuple([cpu load], [memory load]):
+ ([(PID1, (PID1_cpu_meas)), (PID2, (PID2_cpu_meas)), ...],
+ [(PID1, (PID1_mem_meas)), (PID2, (PID2_mem_meas)), ...])
+
+ PID1_cpu_meas:
+ average_values[], test_time, cont_meas_values[[]], time_step
+ PID1_mem_meas:
+ average_values[], test_time, cont_meas_values[[]], time_step
+ where average_values[] are the measured values (mem_free,swap,...)
+ which are described in SystemLoad.__init__()-FileFieldMonitor.
+ cont_meas_values[[]] is a list of average_values in the sampling
+ times.
+ """
+ if pids == []:
+ pids = self.pids
+
+ cpus = []
+ memory = []
+ for pid in pids:
+ stat = (pid, self.stats[pid][1].get_status())
+ cpus.append(stat)
+ for pid in pids:
+ stat = (pid, self.stats[pid][2].get_status())
+ memory.append(stat)
+
+ return (cpus, memory)
+
+
+ def get_cpu_status_string(self, pids=[]):
+ """
+ Convert status to string array.
+ @param pids: List of PIDs you intend to control. Use pids=[] to control
+ all defined PIDs.
+ @return: String format to table.
+ """
+ if pids == []:
+ pids = self.pids
+
+ headers = ["NAME",
+ ("%7s") % "PID",
+ ("%5s") % "USER",
+ ("%5s") % "SYS",
+ ("%5s") % "SUM"]
+ if self.advanced:
+ headers.extend(["MINFLT/IRQC",
+ "MAJFLT/SOFTIRQ"])
+ headers.append(("%11s") % "TIME")
+ textstatus = []
+ for pid in pids:
+ stat = self.stats[pid][1].get_status()
+ time = stat[1]
+ stat = stat[0]
+ textstatus.append(["%s" % self.stats[pid][0],
+ "%7s" % pid,
+ "%4.0f%%" % (stat[0] / time),
+ "%4.0f%%" % (stat[1] / time),
+ "%4.0f%%" % ((stat[0] + stat[1]) / time),
+ "%10.3fs" % time])
+ if self.advanced:
+ textstatus[-1].insert(-1, "%11d" % stat[2])
+ textstatus[-1].insert(-1, "%14d" % stat[3])
+
+ return matrix_to_string(textstatus, tuple(headers))
+
+
+ def get_mem_status_string(self, pids=[]):
+ """
+ Convert status to string array.
+ @param pids: List of PIDs you intend to control. Use pids=[] to control
+ all defined PIDs.
+ @return: String format to table.
+ """
+ if pids == []:
+ pids = self.pids
+
+ headers = ["NAME",
+ ("%7s") % "PID",
+ ("%8s") % "TOTAL/VMSIZE",
+ ("%8s") % "FREE/VMRSS",
+ ("%8s") % "BUFFERS/VMPEAK",
+ ("%8s") % "CACHED/VMSWAP",
+ ("%11s") % "TIME"]
+ textstatus = []
+ for pid in pids:
+ stat = self.stats[pid][2].get_status()
+ time = stat[1]
+ stat = stat[0]
+ textstatus.append(["%s" % self.stats[pid][0],
+ "%7s" % pid,
+ "%10dMB" % (stat[0] / 1024),
+ "%8dMB" % (stat[1] / 1024),
+ "%12dMB" % (stat[2] / 1024),
+ "%11dMB" % (stat[3] / 1024),
+ "%10.3fs" % time])
+
+ return matrix_to_string(textstatus, tuple(headers))
+
+
def get_arch(run_function=run):
"""
Get the hardware architecture of the machine.
@@ -1100,6 +1500,14 @@
return pid
+def get_process_name(pid):
+ """
+ Get process name from PID.
+ @param pid: PID of process.
+ """
+ return get_field(read_file("/proc/%d/stat" % pid), 1)[1:-1]
+
+
def program_is_alive(program_name):
"""
Checks if the process is alive and not in Zombie state.
diff --git a/client/samples/all_tests b/client/samples/all_tests
deleted file mode 100644
index 88923ad..0000000
--- a/client/samples/all_tests
+++ /dev/null
@@ -1,23 +0,0 @@
-job.run_test('aiostress')
-job.run_test('bonnie')
-job.run_test('dbench')
-job.run_test('fio')
-job.run_test('fsx')
-job.run_test('interbench')
-job.run_test('isic')
-job.run_test('kernbench', iterations=2, threads=5)
-job.run_test('lmbench')
-job.run_test('ltp')
-
-job.run_test('reaim')
-job.run_test('scrashme')
-job.run_test('sleeptest')
-testkernel = job.kernel('/usr/local/src/linux-2.6.14.tar.bz2') # '2.6.14'
-testkernel.patch('/usr/local/src/patch-2.6.14-git6.bz2')
-testkernel.config('http://ftp.kernel.org/pub/linux/kernel/people/mbligh/config/config.up')
-job.run_test('sparse', testkernel)
-
-job.run_test('stress')
-job.run_test('tbench')
-job.run_test('unixbench')
-job.run_test('xmtest', args='-e nobody@nowhere.org -d xmtest')
diff --git a/client/samples/autotest_console b/client/samples/autotest_console
deleted file mode 100644
index 3825323..0000000
--- a/client/samples/autotest_console
+++ /dev/null
@@ -1,16 +0,0 @@
-# autotest_console.control - gives you an interactive interpreter within an
-# autotest control file.
-
-# If you install IPython (http://ipython.scipy.org/, Ubuntu package "ipython"),
-# you'll get a snazzy IPython console with readline and completion and all that.
-# Otherwise you'll get a simple console without any of that.
-
-# -Steve Howard (showard@google.com)
-
-try:
- import IPython
- ipshell = IPython.Shell.IPShellEmbed(argv=[])
- ipshell()
-except ImportError:
- import code
- code.interact('Autotest console', raw_input, local_env)
diff --git a/client/samples/build b/client/samples/build
deleted file mode 100644
index eaa75a1..0000000
--- a/client/samples/build
+++ /dev/null
@@ -1,6 +0,0 @@
-print "TEST: initing kernel"
-testkernel = job.kernel('2.6.18')
-testkernel.config('http://mbligh.org/config/opteron2')
-
-print "TEST: building kernel"
-testkernel.build()
diff --git a/client/samples/build2 b/client/samples/build2
deleted file mode 100644
index 27bbcd6..0000000
--- a/client/samples/build2
+++ /dev/null
@@ -1,12 +0,0 @@
-print "TEST: initing kernel 1"
-testkernel = job.kernel('/usr/local/src/linux-2.6.17.tar.bz2') # '2.6.17'
-testkernel.config('http://mbligh.org/config/opteron2')
-print "TEST: building kernel 1"
-testkernel.build()
-
-print "TEST: initing kernel 2"
-testkernel = job.kernel('/usr/local/src/linux-2.6.18.tar.bz2') # '2.6.18'
-testkernel.config('http://mbligh.org/config/opteron2')
-print "TEST: building kernel 2"
-testkernel.build()
-
diff --git a/client/samples/control.alt_tags b/client/samples/control.alt_tags
new file mode 100644
index 0000000..37cf733
--- /dev/null
+++ b/client/samples/control.alt_tags
@@ -0,0 +1,33 @@
+AUTHOR = "Autotest Team <autotest@test.kernel.org>"
+TIME = "MEDIUM"
+NAME = "Sample - Building and adding an alternate kernel"
+TEST_TYPE = "client"
+TEST_CLASS = "Kernel"
+TEST_CATEGORY = "Functional"
+
+DOC = """
+Builds a test kernel, then runs the kernbench test. This sample shows the
+job.bootloader object, as well as building kernels with alternate idenfifying
+tags.
+"""
+
+def step_init():
+ """
+ Build a kernel from kernel.org. This step will be executed, the machine
+ will be rebooted and then we'll proceed with step_tests.
+ """
+ job.next_step([step_tests])
+ testkernel = job.kernel('2.6.35')
+ # This is the default config that can be retrieved on gitweb
+ testkernel.config('http://git.kernel.org/?p=linux/kernel/git/stable/linux-2.6.35.y.git;a=blob_plain;f=arch/x86/configs/x86_64_defconfig;h=6c86acd847a4e28c09d951b34d488b13d44df3c7;hb=ea8a52f9f4bcc3420c38ae07f8378a2f18443970')
+ testkernel.build()
+ testkernel.install('autotest-alternate')
+ job.bootloader.list_titles()
+ testkernel.boot('autotest-alternate')
+
+
+def step_tests():
+ """
+ Run a series of autotest tests on this machine.
+ """
+ job.run_test('kernbench', iterations=2, threads=5)
diff --git a/client/samples/control.fs b/client/samples/control.fs
new file mode 100644
index 0000000..a0a65c7
--- /dev/null
+++ b/client/samples/control.fs
@@ -0,0 +1,32 @@
+AUTHOR = "Autotest Team <autotest@test.kernel.org>"
+TIME = "MEDIUM"
+NAME = "Sample - Filesystem tests with different filesystems"
+TEST_TYPE = "client"
+TEST_CLASS = "Kernel"
+TEST_CATEGORY = "Functional"
+
+DOC = """
+Runs a series of filesystem tests on a loopback partition using different
+filesystem types. his shows some features of the job.partition method, such as
+creating loopback partitions instead of using real disk partitions, looping.
+"""
+
+partition = job.partition('/tmp/looped', 1024, job.tmpdir)
+# You can use also 'real' partitions, just comment the above and uncomment
+# the below
+#partition = job.partition('/dev/sdb1', job.tmpdir)
+
+def test_fs():
+ partition.mkfs(fstype)
+ partition.mount()
+ try:
+ job.run_test('fsx', dir=partition.mountpoint, tag=fstype)
+ job.run_test('iozone', dir=partition.mountpoint, tag=fstype)
+ job.run_test('dbench', dir=partition.mountpoint, tag=fstype)
+ finally:
+ partition.unmount()
+ partition.fsck()
+
+
+for fstype in ('ext2', 'ext3', 'jfs', 'xfs', 'reiserfs'):
+ job.run_group(test_fs)
diff --git a/client/samples/control.fs_options b/client/samples/control.fs_options
new file mode 100644
index 0000000..e126c3c
--- /dev/null
+++ b/client/samples/control.fs_options
@@ -0,0 +1,36 @@
+AUTHOR = "Autotest Team <autotest@test.kernel.org>"
+TIME = "MEDIUM"
+NAME = "Sample - Filesystem tests with different fs options"
+TEST_TYPE = "client"
+TEST_CLASS = "Kernel"
+TEST_CATEGORY = "Functional"
+
+DOC = """
+Runs a series of filesystem tests on a loopback partition. This shows some
+features of the job.partition method, such as creating loopback partitions
+instead of using real disk partitions, looping and tags.
+"""
+
+partition = job.partition(device='/tmp/looped', loop_size=1024,
+ mountpoint=job.tmpdir)
+# You can use also 'real' partitions, just comment the above and uncomment
+# the below
+#partition = job.partition('/dev/sdb1', job.tmpdir)
+
+iters = 10
+
+for fstype, mountopts, tag in (('ext2', '', 'ext2'),
+ ('ext3', '-o data=writeback', 'ext3writeback'),
+ ('ext3', '-o data=ordered', 'ext3ordered'),
+ ('ext3', '-o data=journal', 'ext3journal'),
+ ('ext4', '-o data=ordered', 'ext4ordered'),
+ ('ext4', '-o data=journal', 'ext4journal'),):
+ partition.mkfs(fstype)
+ partition.mount(args=mountopts)
+ try:
+ job.run_test('fsx', dir=job.tmpdir, tag=tag)
+ job.run_test('iozone', dir=job.tmpdir, iterations=iters, tag=tag)
+ job.run_test('dbench', iterations=iters, dir=job.tmpdir, tag=tag)
+ job.run_test('tiobench', dir=job.tmpdir, tag=tag)
+ finally:
+ partition.unmount()
diff --git a/client/samples/control.interactive_console b/client/samples/control.interactive_console
new file mode 100644
index 0000000..75b09ab
--- /dev/null
+++ b/client/samples/control.interactive_console
@@ -0,0 +1,25 @@
+AUTHOR = "Steve Howard <showard@google.com>"
+TIME = "SHORT"
+NAME = "Sample - Autotest console"
+TEST_TYPE = "client"
+TEST_CLASS = "Kernel"
+TEST_CATEGORY = "Functional"
+
+DOC = """
+Gives you an interactive interpreter within an autotest control file.
+
+If you install IPython (http://ipython.scipy.org/, Ubuntu and Fedora's package
+"ipython"), you'll get a snazzy IPython console with readline and completion
+and all that. Otherwise you'll get a simple python console.
+
+The point of this control file is to give you an interactive interpreter with
+all autotest 'magic' loaded in, so you can inspect objects and have fun.
+"""
+
+try:
+ import IPython
+ ipshell = IPython.Shell.IPShellEmbed(argv=[], banner='autotest console')
+ ipshell()
+except ImportError:
+ import code
+ code.interact('autotest console', raw_input)
diff --git a/client/samples/control.kbuild_and_tests b/client/samples/control.kbuild_and_tests
new file mode 100644
index 0000000..59dd757
--- /dev/null
+++ b/client/samples/control.kbuild_and_tests
@@ -0,0 +1,66 @@
+AUTHOR = "Autotest Team <autotest@test.kernel.org>"
+TIME = "MEDIUM"
+NAME = "Sample - Kernel tests"
+TEST_TYPE = "client"
+TEST_CLASS = "Kernel"
+TEST_CATEGORY = "Functional"
+
+DOC = """
+Builds a test kernel, then runs a series of tests on it. This control file shows
+features such as:
+
+ * The step engine - autotest mechanism of executing jobs in steps, where you
+ can interrupt the flow of execution with client reboots, in order to boot
+ newly built kernels
+ * Downloading, configuring, patching, building and booting a kernel straight
+ from kernel.org.
+ * Kernel expand - provide a string such as '2.6.36-git13' and have autotest to
+ expand that and download the proper source tarballs and patches
+ automatically.
+ * Local kernel.org mirror - Alternate kernel.org mirror that you can set on
+ your control file.
+"""
+
+def step_init():
+ """
+ Build a kernel from kernel.org. This step will be executed, the machine
+ will be rebooted and then we'll proceed with step_tests.
+ """
+ job.next_step([step_tests])
+ # If you have a local/different kernel.org mirror, you can set it by
+ # uncommenting the below and set the URL properly.
+ #job.config_set('local_mirror', 'http://foo/bar')
+ testkernel = job.kernel('2.6.35')
+ # If you want to see kernel expansion in action, comment the above and
+ # uncomment the below. Keep in mind that after some months, it's expected
+ # that some of the patches might not exist, so you might want to edit
+ # this to satisfy your needs.
+ #testkernel = job.kernel('2.6.36-git13')
+ # You can provide a path to an uncompressed kernel source as well
+ #testkernel = job.kernel('/path/to/kernel-source')
+ testkernel.patch('http://www.kernel.org/pub/linux/kernel/v2.6/patch-2.6.35.7.bz2')
+ # This is the default config that can be retrieved on gitweb
+ testkernel.config('http://git.kernel.org/?p=linux/kernel/git/stable/linux-2.6.35.y.git;a=blob_plain;f=arch/x86/configs/x86_64_defconfig;h=6c86acd847a4e28c09d951b34d488b13d44df3c7;hb=ea8a52f9f4bcc3420c38ae07f8378a2f18443970')
+ testkernel.build()
+ testkernel.boot()
+
+
+def step_tests():
+ """
+ Run a series of autotest tests on this machine.
+ """
+ job.run_test('aiostress')
+ job.run_test('bonnie')
+ job.run_test('dbench')
+ job.run_test('fio')
+ job.run_test('fsx')
+ job.run_test('interbench')
+ job.run_test('isic')
+ job.run_test('kernbench', iterations=2, threads=5)
+ job.run_test('lmbench')
+ job.run_test('ltp')
+ job.run_test('reaim')
+ job.run_test('sparse')
+ job.run_test('stress')
+ job.run_test('tbench')
+ job.run_test('unixbench')
diff --git a/client/samples/control.oprofile b/client/samples/control.oprofile
new file mode 100644
index 0000000..db5730a
--- /dev/null
+++ b/client/samples/control.oprofile
@@ -0,0 +1,24 @@
+AUTHOR = "Autotest Team <autotest@test.kernel.org>"
+TIME = "MEDIUM"
+NAME = "Sample - Using profilers"
+TEST_TYPE = "client"
+TEST_CLASS = "Kernel"
+TEST_CATEGORY = "Functional"
+
+DOC = """
+Runs our sleeptest (bogus test that only sleeps for a given amount of time),
+while running the oprofile profilers, with and without special parameters
+passed to the profiler.
+"""
+
+import logging
+
+logging.info("Testing default event")
+job.profilers.add('oprofile')
+job.run_test('sleeptest', seconds=5, tag='default')
+job.profilers.delete('oprofile')
+
+logging.info("Testing ICACHE_MISSES")
+job.profilers.add('oprofile', 'ICACHE_MISSES:100000')
+job.run_test('sleeptest', seconds=5, tag='icache_misses')
+job.profilers.delete('oprofile')
diff --git a/client/samples/control.oprofile_power5 b/client/samples/control.oprofile_power5
new file mode 100644
index 0000000..18eecc9
--- /dev/null
+++ b/client/samples/control.oprofile_power5
@@ -0,0 +1,43 @@
+AUTHOR = "Autotest Team <autotest@test.kernel.org>"
+TIME = "MEDIUM"
+NAME = "Sample - Using oprofile - specific power5 options"
+TEST_TYPE = "client"
+TEST_CLASS = "Kernel"
+TEST_CATEGORY = "Functional"
+
+DOC = """
+Runs our sleeptest (bogus test that only sleeps for a given amount of time),
+while running the oprofile profilers, with and without special parameters
+passed to the profiler. This particular control shows special features of
+oprofile under power 5 cpu architecture.
+"""
+
+import logging
+
+logging.info("Testing default event")
+job.profilers.add('oprofile')
+job.run_test('sleeptest', seconds=5, tag='default')
+job.profilers.delete('oprofile')
+
+logging.info("Testing specified vmlinux")
+job.profilers.add('oprofile', '/boot/vmlinux-autotest')
+job.run_test('sleeptest', seconds=5, tag='vmlinux')
+job.profilers.delete('oprofile')
+
+logging.info("Testing one event")
+job.profilers.add('oprofile', None, ['PM_RUN_CYC_GRP153:100000'])
+job.run_test('sleeptest', seconds=5, tag='one')
+job.profilers.delete('oprofile')
+
+logging.info("Testing multiple events")
+job.profilers.add('oprofile', None,
+ ['PM_RUN_CYC_GRP153:100000', 'PM_INST_CMPL_GRP153:10000'])
+job.run_test('sleeptest', seconds=5, tag='multi')
+job.profilers.delete('oprofile')
+
+logging.info("Testing other args")
+job.profilers.add('oprofile', None,
+ ['PM_RUN_CYC_GRP153:150000', 'PM_INST_CMPL_GRP153:150000'],
+ '--callgraph=3')
+job.run_test('sleeptest', seconds=5, tag='other')
+job.profilers.delete('oprofile')
diff --git a/client/samples/control.parallel b/client/samples/control.parallel
new file mode 100644
index 0000000..47a5ac9
--- /dev/null
+++ b/client/samples/control.parallel
@@ -0,0 +1,20 @@
+AUTHOR = "Autotest Team <autotest@test.kernel.org>"
+TIME = "SHORT"
+NAME = "Sample - Parallel test execution"
+TEST_TYPE = "client"
+TEST_CLASS = "Kernel"
+TEST_CATEGORY = "Functional"
+
+DOC = """
+Runs 2 client tests in parallel, with different options.
+"""
+
+def kernbench():
+ job.run_test('kernbench', iterations=2, threads=5)
+
+
+def dbench():
+ job.run_test('dbench')
+
+
+job.parallel([kernbench], [dbench])
diff --git a/client/samples/control.patch_verify b/client/samples/control.patch_verify
new file mode 100644
index 0000000..a03e93b
--- /dev/null
+++ b/client/samples/control.patch_verify
@@ -0,0 +1,42 @@
+AUTHOR = "Autotest Team <autotest@test.kernel.org>"
+TIME = "SHORT"
+NAME = "Sample - Parallel dd with kernel build - patch verification"
+TEST_TYPE = "client"
+TEST_CLASS = "Kernel"
+TEST_CATEGORY = "Functional"
+
+DOC = """
+Parallel dd test with kernel build with the objective of seeing (or not)
+differences after a patch is applied to the mainline kernel.
+"""
+
+def tests(tag):
+ partition = job.partition('/tmp/looped', 1024, job.tmpdir)
+ # You can use also 'real' partitions, just comment the above and uncomment
+ # the below
+ #partition = job.partition('/dev/sdb1', job.tmpdir)
+
+ job.run_test('parallel_dd', fs=partition, fs_type='ext4', iterations=1,
+ megabytes=1000, streams=2, tag=tag)
+
+
+def step_init():
+ testkernel = job.kernel('/usr/src/linux-2.6.36.tar.bz2')
+ testkernel.config('/usr/src/config')
+ testkernel.build()
+ job.next_step([step_one])
+ testkernel.boot()
+
+
+def step_one():
+ tests('mainline')
+ testkernel = job.kernel('/usr/src/linux-2.6.36.tar.bz2')
+ testkernel.patch('/usr/src/ext4_patch')
+ testkernel.config('/usr/src/config')
+ testkernel.build()
+ job.next_step([step_two])
+ testkernel.boot()
+
+
+def step_two():
+ tests('post_patch')
diff --git a/client/samples/control.profilers b/client/samples/control.profilers
new file mode 100644
index 0000000..5ed545f
--- /dev/null
+++ b/client/samples/control.profilers
@@ -0,0 +1,24 @@
+AUTHOR = "Autotest Team <autotest@test.kernel.org>"
+TIME = "SHORT"
+NAME = "Sample - More profilers"
+TEST_TYPE = "client"
+TEST_CLASS = "Kernel"
+TEST_CATEGORY = "Functional"
+
+DOC = """
+Runs sleeptest with differnt profilers present in the autotest tree. Also,
+it shows the convenience logging methods, such as logging.info and
+logging.error.
+"""
+
+import logging
+
+for profiler in ('readprofile', 'oprofile', 'catprofile', 'lockmeter'):
+ try:
+ logging.info("Testing profiler %s", profiler)
+ job.profilers.add(profiler)
+ job.run_test('sleeptest', seconds=5, tag=profiler)
+ job.profilers.delete(profiler)
+ except:
+ logging.error("Test of profiler %s failed", profiler)
+ raise
diff --git a/client/samples/control.rpm_kernel b/client/samples/control.rpm_kernel
new file mode 100644
index 0000000..d3dc8b3
--- /dev/null
+++ b/client/samples/control.rpm_kernel
@@ -0,0 +1,22 @@
+AUTHOR = "Autotest Team <autotest@test.kernel.org>"
+TIME = "SHORT"
+NAME = "Sample - More profilers"
+TEST_TYPE = "client"
+TEST_CLASS = "Kernel"
+TEST_CATEGORY = "Functional"
+
+DOC = """
+Runs sleeptest after installing a kernel rpm. Please note that syntax works
+only if you have an autotest package repository properly setup.
+"""
+
+def step_init():
+ job.next_step([step_test])
+ # The below won't work unless you have configured an autotest package
+ # repository.
+ testkernel = job.kernel('kernel-smp-2.6.18-210.4.x86_64.rpm')
+ testkernel.install()
+ testkernel.boot() # does autotest by default
+
+def step_test():
+ job.run_test('sleeptest')
diff --git a/client/samples/control.soft_reboot b/client/samples/control.soft_reboot
new file mode 100644
index 0000000..30f3403
--- /dev/null
+++ b/client/samples/control.soft_reboot
@@ -0,0 +1,29 @@
+AUTHOR = "Autotest Team <autotest@test.kernel.org>"
+TIME = 'MEDIUM'
+NAME = "Sample - Machine reboot"
+TEST_TYPE = "client"
+TEST_CLASS = "Kernel"
+
+DOC = """
+This test will reboot the machine ITERATIONS number of times. Note that if you
+use this, you'll need to use the step engine for any tests that occur after
+this one. This means that this may not play well when run from the autotest
+front end.
+"""
+
+ITERATIONS = 5
+
+tries = job.get_state('soft_reboot_tries', 0) + 1
+job.set_state("soft_reboot_tries", tries)
+
+if tries < ITERATIONS:
+ import sys
+ this_functions_name = sys._getframe().f_code.co_name
+ if this_functions_name != "?":
+ # If we're not in a function (i.e. we get "?") then we're
+ # not using the step engine and thus no next step is
+ # necessary
+ job.next_step_prepend(this_functions_name)
+
+if tries <= ITERATIONS:
+ job.reboot()
diff --git a/client/samples/control.xenbuild b/client/samples/control.xenbuild
new file mode 100644
index 0000000..12e09f3
--- /dev/null
+++ b/client/samples/control.xenbuild
@@ -0,0 +1,31 @@
+AUTHOR = "Autotest Team <autotest@test.kernel.org>"
+TIME = 'MEDIUM'
+NAME = "Sample - Xen Build"
+TEST_TYPE = "client"
+TEST_CLASS = "Kernel"
+
+DOC = """
+this is a sample job to build xen and kernel with xen patches.
+The xen unstable tarball comes with scripts that will automatically
+patch a linux kernel, however, in some cases, PPC for example, the
+kernel is built from a seperate source. The xen class supports
+defining your own kernel job, and handing that to the xen job. If
+no kernel job is specified, it will create one using the kernel source
+that the xen tarball uses.
+"""
+
+def step_init():
+ xensrc = 'http://www.cl.cam.ac.uk/Research/SRG/netos/xen/downloads/xen-unstable-src.tgz'
+ # Uncomment the lines below if you want to build an alternate kernel
+ #testkernel = job.kernel('/usr/local/src/linux-2.6.18.tar.bz2')
+ #testkernel.config('http://mbligh.org/config/opteron2')
+ #testxen = job.xen(xensrc, kjob = testkernel)
+ testxen = job.xen(xensrc)
+ testxen.build()
+ testxen.install(tag='autotest', prefix='/tmp/xen')
+ # Uncomment the line below if you want to boot your newly built kernel
+ testxen.boot()
+
+
+def step_test():
+ job.run_test('sleeptest')
diff --git a/client/samples/ext3_options b/client/samples/ext3_options
deleted file mode 100644
index 7e7cf2c..0000000
--- a/client/samples/ext3_options
+++ /dev/null
@@ -1,20 +0,0 @@
-fs = job.filesystem('/dev/sda3', job.tmpdir)
-
-iters=10
-
-for fstype, mountopts, tag in (('ext2', '', 'ext2'),
- ('ext3', '-o data=writeback', 'ext3writeback'),
- ('ext3', '-o data=ordered', 'ext3ordered'),
- ('ext3', '-o data=journal', 'ext3journal')):
- fs.mkfs(fstype)
- fs.mount(args=mountopts)
- try:
- job.run_test('fsx', dir=job.tmpdir, tag=tag)
- job.run_test('iozone', dir=job.tmpdir, iterations=iters, tag=tag)
- job.run_test('dbench',
- iterations=iters,
- dir=job.tmpdir,
- tag=tag)
- job.run_test('tiobench', dir=job.tmpdir, tag=tag)
- finally:
- fs.unmount()
diff --git a/client/samples/kernelexpand b/client/samples/kernelexpand
deleted file mode 100644
index bdc4daa..0000000
--- a/client/samples/kernelexpand
+++ /dev/null
@@ -1 +0,0 @@
-testkernel = job.kernel('2.6.18-rc7-git1')
diff --git a/client/samples/mirror b/client/samples/mirror
deleted file mode 100644
index 297a64c..0000000
--- a/client/samples/mirror
+++ /dev/null
@@ -1,3 +0,0 @@
-testkernel = job.kernel('2.6.14-rc1-mm1')
-job.config_set('local_mirror', 'http://foo/bar')
-testkernel = job.kernel('2.6.14-rc1-mm1')
diff --git a/client/samples/oprofile b/client/samples/oprofile
deleted file mode 100644
index 42c0d36..0000000
--- a/client/samples/oprofile
+++ /dev/null
@@ -1,11 +0,0 @@
-print "Testing default event"
-
-job.profilers.add('oprofile')
-job.run_test('sleeptest', seconds=1, tag='default')
-job.profilers.delete('oprofile')
-
-print "Testing ICACHE_MISSES"
-
-job.profilers.add('oprofile', 'ICACHE_MISSES:100000')
-job.run_test('sleeptest', seconds=1, tag='icache_misses')
-job.profilers.delete('oprofile')
diff --git a/client/samples/oprofile.power5+ b/client/samples/oprofile.power5+
deleted file mode 100644
index 7a84fa2..0000000
--- a/client/samples/oprofile.power5+
+++ /dev/null
@@ -1,24 +0,0 @@
-print "Testing default event"
-job.profilers.add('oprofile')
-job.run_test('sleeptest', seconds=1, tag='default')
-job.profilers.delete('oprofile')
-
-print "Testing specified vmlinux"
-job.profilers.add('oprofile', '/boot/vmlinux-autotest')
-job.run_test('sleeptest', seconds=1, tag='vmlinux')
-job.profilers.delete('oprofile')
-
-print "Testing one event"
-job.profilers.add('oprofile', None, ['PM_RUN_CYC_GRP153:100000'])
-job.run_test('sleeptest', seconds=1, tag='one')
-job.profilers.delete('oprofile')
-
-print "Testing multiple events"
-job.profilers.add('oprofile', None, ['PM_RUN_CYC_GRP153:100000', 'PM_INST_CMPL_GRP153:10000'])
-job.run_test('sleeptest', seconds=1, tag='multi')
-job.profilers.delete('oprofile')
-
-print "Testing other args"
-job.profilers.add('oprofile', None, ['PM_RUN_CYC_GRP153:150000', 'PM_INST_CMPL_GRP153:150000'], '--callgraph=3')
-job.run_test('sleeptest', seconds=1, tag='other')
-job.profilers.delete('oprofile')
diff --git a/client/samples/parallel b/client/samples/parallel
deleted file mode 100644
index c871039..0000000
--- a/client/samples/parallel
+++ /dev/null
@@ -1,7 +0,0 @@
-def kernbench():
- job.run_test('kernbench', iterations=2, threads=5)
-
-def dbench():
- job.run_test('dbench')
-
-job.parallel([kernbench], [dbench])
diff --git a/client/samples/parallel_dd b/client/samples/parallel_dd
deleted file mode 100644
index 57e712b..0000000
--- a/client/samples/parallel_dd
+++ /dev/null
@@ -1,22 +0,0 @@
-def tests(tag):
- fs = job.filesystem('/dev/sda3', job.tmpdir)
- job.run_test('parallel_dd', fs=fs, fs_type='ext2', iterations=1, megabytes=1000, streams=2, tag=tag)
-
-def step_init():
- testkernel = job.kernel('/usr/src/linux-2.6.18.tar.bz2')
- testkernel.config('/usr/src/config')
- testkernel.build()
- job.next_step([step_one])
- testkernel.boot()
-
-def step_one():
- tests('mainline')
- testkernel = job.kernel('/usr/src/linux-2.6.18.tar.bz2')
- testkernel.patch('/usr/src/ext2_reservations')
- testkernel.config('/usr/src/config')
- testkernel.build()
- job.next_step([step_two])
- testkernel.boot()
-
-def step_two():
- tests('extents')
diff --git a/client/samples/profilers b/client/samples/profilers
deleted file mode 100644
index 949ddd6..0000000
--- a/client/samples/profilers
+++ /dev/null
@@ -1,9 +0,0 @@
-for profiler in ('readprofile', 'oprofile', 'catprofile', 'lockmeter'):
- try:
- print "Testing profiler %s ..." % profiler
- job.profilers.add(profiler)
- job.run_test('sleeptest', seconds=3, tag=profiler)
- job.profilers.delete(profiler)
- except:
- print "Test of profiler %s failed" % profiler
- raise
diff --git a/client/samples/reboot b/client/samples/reboot
deleted file mode 100644
index b0daab3..0000000
--- a/client/samples/reboot
+++ /dev/null
@@ -1 +0,0 @@
-job.reboot()
diff --git a/client/samples/rpm_kernel b/client/samples/rpm_kernel
deleted file mode 100644
index 0aa8616..0000000
--- a/client/samples/rpm_kernel
+++ /dev/null
@@ -1,9 +0,0 @@
-def step_init():
- job.next_step([step_test])
- testkernel = job.kernel('kernel-smp-2.6.18-210.4.x86_64.rpm')
- testkernel.install()
- testkernel.boot() # does autotest by default
-
-def step_test():
- print "TEST: sleeptest"
- job.run_test('sleeptest')
diff --git a/client/samples/soft_reboot b/client/samples/soft_reboot
deleted file mode 100644
index c5d6727..0000000
--- a/client/samples/soft_reboot
+++ /dev/null
@@ -1,24 +0,0 @@
-TIME = 'MEDIUM'
-DOC = """\
-This test will reboot the machine ITERATIONS number of times. Note that if you
-use this, you'll need to use the step engine for any tests that occur after
-this one. This means that this may not play well when run from the autotest
-front end."""
-
-
-ITERATIONS = 5
-
-tries = job.get_state('soft_reboot_tries', 0) + 1
-job.set_state("soft_reboot_tries", tries)
-
-if tries < ITERATIONS:
- import sys
- this_functions_name = sys._getframe().f_code.co_name
- if this_functions_name != "?":
- # If we're not in a function (i.e. we get "?") then we're
- # not using the step engine and thus no next step is
- # necessary
- job.next_step_prepend(this_functions_name)
-
-if tries <= ITERATIONS:
- job.reboot()
diff --git a/client/samples/test_add_kernel b/client/samples/test_add_kernel
deleted file mode 100644
index e6cae50..0000000
--- a/client/samples/test_add_kernel
+++ /dev/null
@@ -1,19 +0,0 @@
-print "TEST: initing kernel"
-testkernel = job.kernel('/usr/local/src/linux-2.6.14.tar.bz2') # '2.6.14'
-testkernel.patch('/usr/local/src/patch-2.6.14-git6.bz2')
-testkernel.config('http://ftp.kernel.org/pub/linux/kernel/people/mbligh/config/config.up')
-
-print "TEST: building kernel"
-testkernel.build()
-
-print "TEST: installing kernel"
-testkernel.install('autotest')
-
-print "TEST: adding kernel to bootloader"
-testkernel.add_to_bootloader('autotest') # using default boot args (/proc/cmdline)
-
-print "TEST: listing bootloader entries"
-job.bootloader.list_titles()
-
-print "TEST: initing kernbench"
-job.run_test('kernbench', iterations=2, threads=5)
diff --git a/client/samples/test_install b/client/samples/test_install
deleted file mode 100644
index ac50986..0000000
--- a/client/samples/test_install
+++ /dev/null
@@ -1,7 +0,0 @@
-print "TEST: initing kernel"
-testkernel = job.kernel('/usr/local/autotest/tmp2/build/build')
-print "TEST: installing kernel"
-testkernel.install()
-# print "TEST: booting system"
-# system.boot() # does autotest by default
-
diff --git a/client/samples/test_job b/client/samples/test_job
deleted file mode 100644
index 3725cb9..0000000
--- a/client/samples/test_job
+++ /dev/null
@@ -1,18 +0,0 @@
-def step_init():
- job.next_step([step_test])
-
- print "TEST: initing kernel"
- testkernel = job.kernel('2.6.18')
- testkernel.config('http://mbligh.org/config/opteron2')
-
- print "TEST: building kernel"
- testkernel.build()
-
- print "TEST: booting system"
- testkernel.boot() # does autotest by default
-
-def step_test():
- print "TEST: sleeptest"
- job.run_test('sleeptest', seconds=10)
- print "TEST: kernbench"
- job.run_test('kernbench')
diff --git a/client/samples/test_xenbuild b/client/samples/test_xenbuild
deleted file mode 100644
index 2a9dbcf..0000000
--- a/client/samples/test_xenbuild
+++ /dev/null
@@ -1,38 +0,0 @@
-# this is a sample job to build xen and kernel with xen patches.
-# The xen unstable tarball comes with scripts that will automatically
-# patch a linux kernel, however, in some cases, PPC for example, the
-# kernel is built from a seperate source. The xen class supports
-# defining your own kernel job, and handing that to the xen job. If
-# no kernel job is specified, it will create one using the kernel source
-# that the xen tarball uses.
-xensrc = 'http://www.cl.cam.ac.uk/Research/SRG/netos/xen/downloads/xen-unstable-src.tgz'
-
-#
-# uncomment the section below if you want to specify your own kernel job
-# for the xen build
-#
-# ------ start uncomment ------
-#print "TEST: initing kernel"
-#testkernel = job.kernel('/usr/local/src/linux-2.6.18.tar.bz2') # 2.4.18
-#testkernel.config('http://mbligh.org/config/opteron2')
-#print "TEST: initing xen"
-#testxen = job.xen(xensrc, kjob = testkernel)
-# ------ end uncomment ------
-
-# build xen using default xen kernel
-# comment the next two lines out if you are using the above custom
-# kernel job
-
-print "TEST: initing xen"
-testxen = job.xen(xensrc)
-
-print "TEST: building xen"
-testxen.build()
-
-print "TEST: installing xen"
-# using a dir in tmp as xen install needs root perms
-#
-testxen.install(tag='autotest', prefix='/tmp/xen')
-
-# uncomment if you run as root and want to install the xen build
-# testxen.boot()
diff --git a/client/tests/kvm/address_pools.cfg.sample b/client/tests/kvm/address_pools.cfg.sample
deleted file mode 100644
index b5967ce..0000000
--- a/client/tests/kvm/address_pools.cfg.sample
+++ /dev/null
@@ -1,65 +0,0 @@
-# Copy this file to address_pools.cfg and edit it.
-#
-# This file specifies several MAC-IP ranges for each host in the network that
-# may run KVM tests. A MAC address must not be used twice, so these ranges
-# must not overlap. The VMs running on each host will only use MAC addresses
-# from the pool of that host.
-# If you wish to use a static MAC-IP mapping, where each MAC address range is
-# mapped to a known corresponding IP address range, specify the bases of the IP
-# address ranges in this file.
-# If you specify a MAC address range without a corresponding IP address range,
-# the IP addresses for that range will be determined at runtime by listening
-# to DHCP traffic using tcpdump.
-# If you wish to determine IP addresses using tcpdump in any case, regardless
-# of any # IP addresses specified in this file, uncomment the following line:
-#always_use_tcpdump = yes
-# You may also specify this parameter for specific hosts by adding it in the
-# appropriate sections below.
-
-variants:
- # Rename host1 to an actual (short) hostname in the network that will be running the Autotest client
- - @host1:
- # Add/remove ranges here
- address_ranges = r1 r2
-
- # Modify the following parameters to reflect the DHCP server's configuration
- address_range_base_mac_r1 = 52:54:00:12:35:56
- #address_range_base_ip_r1 = 10.0.2.20
- address_range_size_r1 = 16
-
- # Modify the following parameters to reflect the DHCP server's configuration
- address_range_base_mac_r2 = 52:54:00:12:35:80
- #address_range_base_ip_r2 = 10.0.2.40
- address_range_size_r2 = 16
-
- # Rename host2 to an actual (short) hostname in the network that will be running the Autotest client
- - @host2:
- # Add/remove ranges here
- address_ranges = r1 r2
-
- # Modify the following parameters to reflect the DHCP server's configuration
- address_range_base_mac_r1 = 52:54:00:12:36:56
- #address_range_base_ip_r1 = 10.0.3.20
- address_range_size_r1 = 16
-
- # Modify the following parameters to reflect the DHCP server's configuration
- address_range_base_mac_r2 = 52:54:00:12:36:80
- #address_range_base_ip_r2 = 10.0.3.40
- address_range_size_r2 = 16
-
- # Add additional hosts here...
-
- # This will be used for hosts that do not appear on the list
- - @default_host:
- # Add/remove ranges here
- address_ranges = r1 r2
-
- # Modify the following parameters to reflect the DHCP server's configuration
- address_range_base_mac_r1 = 52:54:00:12:34:56
- #address_range_base_ip_r1 = 10.0.1.20
- address_range_size_r1 = 16
-
- # Modify the following parameters to reflect the DHCP server's configuration
- address_range_base_mac_r2 = 52:54:00:12:34:80
- #address_range_base_ip_r2 = 10.0.1.40
- address_range_size_r2 = 16
diff --git a/client/tests/kvm/control b/client/tests/kvm/control
index a69eacf..63bbe5d 100644
--- a/client/tests/kvm/control
+++ b/client/tests/kvm/control
@@ -55,14 +55,6 @@
tests_cfg_path = os.path.join(kvm_test_dir, "tests.cfg")
tests_cfg.fork_and_parse(tests_cfg_path, str)
-pools_cfg_path = os.path.join(kvm_test_dir, "address_pools.cfg")
-tests_cfg.parse_file(pools_cfg_path)
-hostname = os.uname()[1].split(".")[0]
-if tests_cfg.count("^" + hostname):
- tests_cfg.parse_string("only ^%s" % hostname)
-else:
- tests_cfg.parse_string("only ^default_host")
-
# Run the tests
kvm_utils.run_tests(tests_cfg.get_generator(), job)
diff --git a/client/tests/kvm/control.parallel b/client/tests/kvm/control.parallel
index 07bc6e5..ac84638 100644
--- a/client/tests/kvm/control.parallel
+++ b/client/tests/kvm/control.parallel
@@ -171,15 +171,6 @@
filename = os.path.join(pwd, "tests.cfg")
cfg.fork_and_parse(filename, str)
-filename = os.path.join(pwd, "address_pools.cfg")
-if os.path.exists(filename):
- cfg.parse_file(filename)
- hostname = os.uname()[1].split(".")[0]
- if cfg.count("^" + hostname):
- cfg.parse_string("only ^%s" % hostname)
- else:
- cfg.parse_string("only ^default_host")
-
tests = cfg.get_list()
diff --git a/client/tests/kvm/get_started.py b/client/tests/kvm/get_started.py
index 00b5f6b..6fa6b5f 100755
--- a/client/tests/kvm/get_started.py
+++ b/client/tests/kvm/get_started.py
@@ -68,8 +68,8 @@
"config samples to actual config files)")
kvm_test_dir = os.path.dirname(sys.modules[__name__].__file__)
kvm_test_dir = os.path.abspath(kvm_test_dir)
- config_file_list = ["address_pools.cfg", "build.cfg", "cdkeys.cfg",
- "tests_base.cfg", "tests.cfg", "unittests.cfg"]
+ config_file_list = ["build.cfg", "cdkeys.cfg", "tests_base.cfg",
+ "tests.cfg", "unittests.cfg"]
for config_file in config_file_list:
src_file = os.path.join(kvm_test_dir, "%s.sample" % config_file)
dst_file = os.path.join(kvm_test_dir, config_file)
diff --git a/client/tests/kvm/kvm_monitor.py b/client/tests/kvm/kvm_monitor.py
index 8440835..7e6b594 100644
--- a/client/tests/kvm/kvm_monitor.py
+++ b/client/tests/kvm/kvm_monitor.py
@@ -4,7 +4,7 @@
@copyright: 2008-2010 Red Hat Inc.
"""
-import socket, time, threading, logging
+import socket, time, threading, logging, select
import kvm_utils
try:
import json
@@ -21,7 +21,7 @@
pass
-class MonitorSendError(MonitorError):
+class MonitorSocketError(MonitorError):
pass
@@ -38,7 +38,15 @@
class QMPCmdError(MonitorError):
- pass
+ def __init__(self, cmd, qmp_args, data):
+ MonitorError.__init__(self, cmd, qmp_args, data)
+ self.cmd = cmd
+ self.qmp_args = qmp_args
+ self.data = data
+
+ def __str__(self):
+ return ("QMP command %r failed (arguments: %r, error message: %r)" %
+ (self.cmd, self.qmp_args, self.data))
class Monitor:
@@ -58,7 +66,6 @@
self.filename = filename
self._lock = threading.RLock()
self._socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
- self._socket.setblocking(False)
try:
self._socket.connect(filename)
@@ -102,13 +109,19 @@
return False
+ def _data_available(self, timeout=0):
+ timeout = max(0, timeout)
+ return bool(select.select([self._socket], [], [], timeout)[0])
+
+
def _recvall(self):
s = ""
- while True:
+ while self._data_available():
try:
data = self._socket.recv(1024)
- except socket.error:
- break
+ except socket.error, (errno, msg):
+ raise MonitorSocketError("Could not receive data from monitor "
+ "(%s)" % msg)
if not data:
break
s += data
@@ -130,7 +143,7 @@
suppress_exceptions is False
@raise MonitorProtocolError: Raised if the initial (qemu) prompt isn't
found and suppress_exceptions is False
- @note: Other exceptions may be raised. See _get_command_output's
+ @note: Other exceptions may be raised. See cmd()'s
docstring.
"""
try:
@@ -146,7 +159,7 @@
"Output so far: %r" % o)
# Save the output of 'help' for future use
- self._help_str = self._get_command_output("help")
+ self._help_str = self.cmd("help")
except MonitorError, e:
if suppress_exceptions:
@@ -158,46 +171,47 @@
# Private methods
def _read_up_to_qemu_prompt(self, timeout=20):
- o = ""
+ s = ""
end_time = time.time() + timeout
- while time.time() < end_time:
+ while self._data_available(end_time - time.time()):
+ data = self._recvall()
+ if not data:
+ break
+ s += data
try:
- data = self._socket.recv(1024)
- if not data:
- break
- o += data
- if o.splitlines()[-1].split()[-1] == "(qemu)":
- return True, "\n".join(o.splitlines()[:-1])
- except (socket.error, IndexError):
- time.sleep(0.01)
- return False, "\n".join(o.splitlines())
+ if s.splitlines()[-1].split()[-1] == "(qemu)":
+ return True, "\n".join(s.splitlines()[:-1])
+ except IndexError:
+ continue
+ return False, "\n".join(s.splitlines())
- def _send_command(self, command):
+ def _send(self, cmd):
"""
Send a command without waiting for output.
- @param command: Command to send
- @return: True if successful, False otherwise
+ @param cmd: Command to send
@raise MonitorLockError: Raised if the lock cannot be acquired
- @raise MonitorSendError: Raised if the command cannot be sent
+ @raise MonitorSocketError: Raised if a socket error occurs
"""
if not self._acquire_lock(20):
raise MonitorLockError("Could not acquire exclusive lock to send "
- "monitor command '%s'" % command)
+ "monitor command '%s'" % cmd)
try:
try:
- self._socket.sendall(command + "\n")
- except socket.error:
- raise MonitorSendError("Could not send monitor command '%s'" %
- command)
+ self._socket.sendall(cmd + "\n")
+ except socket.error, (errno, msg):
+ raise MonitorSocketError("Could not send monitor command '%s' "
+ "(%s)" % (cmd, msg))
finally:
self._lock.release()
- def _get_command_output(self, command, timeout=20):
+ # Public methods
+
+ def cmd(self, command, timeout=20):
"""
Send command to the monitor.
@@ -205,7 +219,7 @@
@param timeout: Time duration to wait for the (qemu) prompt to return
@return: Output received from the monitor
@raise MonitorLockError: Raised if the lock cannot be acquired
- @raise MonitorSendError: Raised if the command cannot be sent
+ @raise MonitorSocketError: Raised if a socket error occurs
@raise MonitorProtocolError: Raised if the (qemu) prompt cannot be
found after sending the command
"""
@@ -217,7 +231,7 @@
# Read any data that might be available
self._recvall()
# Send command
- self._send_command(command)
+ self._send(command)
# Read output
s, o = self._read_up_to_qemu_prompt(timeout)
# Remove command echo from output
@@ -234,8 +248,6 @@
self._lock.release()
- # Public methods
-
def is_responsive(self):
"""
Make sure the monitor is responsive by sending a command.
@@ -243,7 +255,7 @@
@return: True if responsive, False otherwise
"""
try:
- self._get_command_output("help")
+ self.cmd("info status")
return True
except MonitorError:
return False
@@ -252,39 +264,22 @@
# Command wrappers
# Notes:
# - All of the following commands raise exceptions in a similar manner to
- # cmd() and _get_command_output().
+ # cmd().
# - A command wrapper should use self._help_str if it requires information
# about the monitor's capabilities.
- def cmd(self, command, timeout=20):
- """
- Send a simple command with no parameters and return its output.
- Should only be used for commands that take no parameters and are
- implemented under the same name for both the human and QMP monitors.
-
- @param command: Command to send
- @param timeout: Time duration to wait for (qemu) prompt after command
- @return: The output of the command
- @raise MonitorLockError: Raised if the lock cannot be acquired
- @raise MonitorSendError: Raised if the command cannot be sent
- @raise MonitorProtocolError: Raised if the (qemu) prompt cannot be
- found after sending the command
- """
- return self._get_command_output(command, timeout)
-
-
def quit(self):
"""
Send "quit" without waiting for output.
"""
- self._send_command("quit")
+ self._send("quit")
def info(self, what):
"""
Request info about something and return the output.
"""
- return self._get_command_output("info %s" % what)
+ return self.cmd("info %s" % what)
def query(self, what):
@@ -301,7 +296,7 @@
@param filename: Location for the screendump
@return: The command's output
"""
- return self._get_command_output("screendump %s" % filename)
+ return self.cmd("screendump %s" % filename)
def migrate(self, uri, full_copy=False, incremental_copy=False, wait=False):
@@ -323,7 +318,7 @@
if incremental_copy:
cmd += " -i"
cmd += " %s" % uri
- return self._get_command_output(cmd)
+ return self.cmd(cmd)
def migrate_set_speed(self, value):
@@ -333,7 +328,7 @@
@param value: Speed in bytes/sec
@return: The command's output
"""
- return self._get_command_output("migrate_set_speed %s" % value)
+ return self.cmd("migrate_set_speed %s" % value)
def sendkey(self, keystr, hold_time=1):
@@ -344,7 +339,7 @@
@param hold_time: Hold time in ms (should normally stay 1 ms)
@return: The command's output
"""
- return self._get_command_output("sendkey %s %s" % (keystr, hold_time))
+ return self.cmd("sendkey %s %s" % (keystr, hold_time))
def mouse_move(self, dx, dy):
@@ -355,7 +350,7 @@
@param dy: Y amount
@return: The command's output
"""
- return self._get_command_output("mouse_move %d %d" % (dx, dy))
+ return self.cmd("mouse_move %d %d" % (dx, dy))
def mouse_button(self, state):
@@ -365,7 +360,7 @@
@param state: Button state (1=L, 2=M, 4=R)
@return: The command's output
"""
- return self._get_command_output("mouse_button %d" % state)
+ return self.cmd("mouse_button %d" % state)
class QMPMonitor(Monitor):
@@ -387,7 +382,7 @@
@raise MonitorNotSupportedError: Raised if json isn't available and
suppress_exceptions is False
@note: Other exceptions may be raised if the qmp_capabilities command
- fails. See _get_command_output's docstring.
+ fails. See cmd()'s docstring.
"""
try:
Monitor.__init__(self, name, filename)
@@ -408,7 +403,7 @@
while time.time() < end_time:
for obj in self._read_objects():
if "QMP" in obj:
- self._greeting = obj["QMP"]
+ self._greeting = obj
break
if self._greeting:
break
@@ -417,7 +412,7 @@
raise MonitorProtocolError("No QMP greeting message received")
# Issue qmp_capabilities
- self._get_command_output("qmp_capabilities")
+ self.cmd("qmp_capabilities")
except MonitorError, e:
if suppress_exceptions:
@@ -439,7 +434,7 @@
def _read_objects(self, timeout=5):
"""
- Read lines from monitor and try to decode them.
+ Read lines from the monitor and try to decode them.
Stop when all available lines have been successfully decoded, or when
timeout expires. If any decoded objects are asynchronous events, store
them in self._events. Return all decoded objects.
@@ -447,67 +442,86 @@
@param timeout: Time to wait for all lines to decode successfully
@return: A list of objects
"""
+ if not self._data_available():
+ return []
s = ""
- objs = []
end_time = time.time() + timeout
- while time.time() < end_time:
+ while self._data_available(end_time - time.time()):
s += self._recvall()
+ # Make sure all lines are decodable
for line in s.splitlines():
- if not line:
- continue
- try:
- obj = json.loads(line)
- except:
- # Found an incomplete or broken line -- keep reading
- break
- objs += [obj]
+ if line:
+ try:
+ json.loads(line)
+ except:
+ # Found an incomplete or broken line -- keep reading
+ break
else:
# All lines are OK -- stop reading
break
- time.sleep(0.1)
+ # Decode all decodable lines
+ objs = []
+ for line in s.splitlines():
+ try:
+ objs += [json.loads(line)]
+ except:
+ pass
# Keep track of asynchronous events
self._events += [obj for obj in objs if "event" in obj]
return objs
- def _send_command(self, cmd, args=None, id=None):
+ def _send(self, data):
"""
- Send command without waiting for response.
+ Send raw data without waiting for response.
- @param cmd: Command to send
- @param args: A dict containing command arguments, or None
- @raise MonitorLockError: Raised if the lock cannot be acquired
- @raise MonitorSendError: Raised if the command cannot be sent
+ @param data: Data to send
+ @raise MonitorSocketError: Raised if a socket error occurs
"""
- if not self._acquire_lock(20):
- raise MonitorLockError("Could not acquire exclusive lock to send "
- "QMP command '%s'" % cmd)
-
try:
- cmdobj = self._build_cmd(cmd, args, id)
- try:
- self._socket.sendall(json.dumps(cmdobj) + "\n")
- except socket.error:
- raise MonitorSendError("Could not send QMP command '%s'" % cmd)
-
- finally:
- self._lock.release()
+ self._socket.sendall(data)
+ except socket.error, (errno, msg):
+ raise MonitorSocketError("Could not send data: %r (%s)" %
+ (data, msg))
- def _get_command_output(self, cmd, args=None, timeout=20):
+ def _get_response(self, id=None, timeout=20):
"""
- Send monitor command and wait for response.
+ Read a response from the QMP monitor.
+
+ @param id: If not None, look for a response with this id
+ @param timeout: Time duration to wait for response
+ @return: The response dict, or None if none was found
+ """
+ end_time = time.time() + timeout
+ while self._data_available(end_time - time.time()):
+ for obj in self._read_objects():
+ if isinstance(obj, dict):
+ if id is not None and obj.get("id") != id:
+ continue
+ if "return" in obj or "error" in obj:
+ return obj
+
+
+ # Public methods
+
+ def cmd(self, cmd, args=None, timeout=20):
+ """
+ Send a QMP monitor command and return the response.
+
+ Note: an id is automatically assigned to the command and the response
+ is checked for the presence of the same id.
@param cmd: Command to send
@param args: A dict containing command arguments, or None
@param timeout: Time duration to wait for response
@return: The response received
@raise MonitorLockError: Raised if the lock cannot be acquired
- @raise MonitorSendError: Raised if the command cannot be sent
+ @raise MonitorSocketError: Raised if a socket error occurs
@raise MonitorProtocolError: Raised if no response is received
@raise QMPCmdError: Raised if the response is an error message
- (the exception's args are (msg, data) where msg is a string and
- data is the error data)
+ (the exception's args are (cmd, args, data) where data is the
+ error data)
"""
if not self._acquire_lock(20):
raise MonitorLockError("Could not acquire exclusive lock to send "
@@ -518,28 +532,88 @@
self._read_objects()
# Send command
id = kvm_utils.generate_random_string(8)
- self._send_command(cmd, args, id)
+ self._send(json.dumps(self._build_cmd(cmd, args, id)) + "\n")
# Read response
- end_time = time.time() + timeout
- while time.time() < end_time:
- for obj in self._read_objects():
- if isinstance(obj, dict) and obj.get("id") == id:
- if "return" in obj:
- return obj["return"]
- elif "error" in obj:
- raise QMPCmdError("QMP command '%s' failed" % cmd,
- obj["error"])
- time.sleep(0.1)
- # No response found
- raise MonitorProtocolError("Received no response to QMP command "
- "'%s', or received a response with an "
- "incorrect id" % cmd)
+ r = self._get_response(id, timeout)
+ if r is None:
+ raise MonitorProtocolError("Received no response to QMP "
+ "command '%s', or received a "
+ "response with an incorrect id"
+ % cmd)
+ if "return" in r:
+ return r["return"]
+ if "error" in r:
+ raise QMPCmdError(cmd, args, r["error"])
finally:
self._lock.release()
- # Public methods
+ def cmd_raw(self, data, timeout=20):
+ """
+ Send a raw string to the QMP monitor and return the response.
+ Unlike cmd(), return the raw response dict without performing any
+ checks on it.
+
+ @param data: The data to send
+ @param timeout: Time duration to wait for response
+ @return: The response received
+ @raise MonitorLockError: Raised if the lock cannot be acquired
+ @raise MonitorSocketError: Raised if a socket error occurs
+ @raise MonitorProtocolError: Raised if no response is received
+ """
+ if not self._acquire_lock(20):
+ raise MonitorLockError("Could not acquire exclusive lock to send "
+ "data: %r" % data)
+
+ try:
+ self._read_objects()
+ self._send(data)
+ r = self._get_response(None, timeout)
+ if r is None:
+ raise MonitorProtocolError("Received no response to data: %r" %
+ data)
+ return r
+
+ finally:
+ self._lock.release()
+
+
+ def cmd_obj(self, obj, timeout=20):
+ """
+ Transform a Python object to JSON, send the resulting string to the QMP
+ monitor, and return the response.
+ Unlike cmd(), return the raw response dict without performing any
+ checks on it.
+
+ @param obj: The object to send
+ @param timeout: Time duration to wait for response
+ @return: The response received
+ @raise MonitorLockError: Raised if the lock cannot be acquired
+ @raise MonitorSocketError: Raised if a socket error occurs
+ @raise MonitorProtocolError: Raised if no response is received
+ """
+ return self.cmd_raw(json.dumps(obj) + "\n")
+
+
+ def cmd_qmp(self, cmd, args=None, id=None, timeout=20):
+ """
+ Build a QMP command from the passed arguments, send it to the monitor
+ and return the response.
+ Unlike cmd(), return the raw response dict without performing any
+ checks on it.
+
+ @param cmd: Command to send
+ @param args: A dict containing command arguments, or None
+ @param id: An id for the command, or None
+ @param timeout: Time duration to wait for response
+ @return: The response received
+ @raise MonitorLockError: Raised if the lock cannot be acquired
+ @raise MonitorSocketError: Raised if a socket error occurs
+ @raise MonitorProtocolError: Raised if no response is received
+ """
+ return self.cmd_obj(self._build_cmd(cmd, args, id), timeout)
+
def is_responsive(self):
"""
@@ -548,7 +622,7 @@
@return: True if responsive, False otherwise
"""
try:
- self._get_command_output("query-version")
+ self.cmd("query-status")
return True
except MonitorError:
return False
@@ -597,38 +671,29 @@
self._lock.release()
+ def get_greeting(self):
+ """
+ Return QMP greeting message.
+ """
+ return self._greeting
+
+
# Command wrappers
# Note: all of the following functions raise exceptions in a similar manner
- # to cmd() and _get_command_output().
-
- def cmd(self, command, timeout=20):
- """
- Send a simple command with no parameters and return its output.
- Should only be used for commands that take no parameters and are
- implemented under the same name for both the human and QMP monitors.
-
- @param command: Command to send
- @param timeout: Time duration to wait for response
- @return: The response to the command
- @raise MonitorLockError: Raised if the lock cannot be acquired
- @raise MonitorSendError: Raised if the command cannot be sent
- @raise MonitorProtocolError: Raised if no response is received
- """
- return self._get_command_output(command, timeout=timeout)
-
+ # to cmd().
def quit(self):
"""
Send "quit" and return the response.
"""
- return self._get_command_output("quit")
+ return self.cmd("quit")
def info(self, what):
"""
Request info about something and return the response.
"""
- return self._get_command_output("query-%s" % what)
+ return self.cmd("query-%s" % what)
def query(self, what):
@@ -646,7 +711,7 @@
@return: The response to the command
"""
args = {"filename": filename}
- return self._get_command_output("screendump", args)
+ return self.cmd("screendump", args)
def migrate(self, uri, full_copy=False, incremental_copy=False, wait=False):
@@ -662,7 +727,7 @@
args = {"uri": uri,
"blk": full_copy,
"inc": incremental_copy}
- return self._get_command_output("migrate", args)
+ return self.cmd("migrate", args)
def migrate_set_speed(self, value):
@@ -673,4 +738,4 @@
@return: The response to the command
"""
args = {"value": value}
- return self._get_command_output("migrate_set_speed", args)
+ return self.cmd("migrate_set_speed", args)
diff --git a/client/tests/kvm/kvm_preprocessing.py b/client/tests/kvm/kvm_preprocessing.py
index e3de0b3..1ddf99b 100644
--- a/client/tests/kvm/kvm_preprocessing.py
+++ b/client/tests/kvm/kvm_preprocessing.py
@@ -59,14 +59,8 @@
kvm_utils.env_register_vm(env, name, vm)
start_vm = False
- for_migration = False
- if params.get("start_vm_for_migration") == "yes":
- logging.debug("'start_vm_for_migration' specified; (re)starting VM "
- "with -incoming option...")
- start_vm = True
- for_migration = True
- elif params.get("restart_vm") == "yes":
+ if params.get("restart_vm") == "yes":
logging.debug("'restart_vm' specified; (re)starting VM...")
start_vm = True
elif params.get("start_vm") == "yes":
@@ -81,7 +75,7 @@
if start_vm:
# Start the VM (or restart it if it's already up)
- if not vm.create(name, params, test.bindir, for_migration):
+ if not vm.create(name, params, test.bindir):
raise error.TestError("Could not start VM")
else:
# Don't start the VM, just update its params
diff --git a/client/tests/kvm/kvm_subprocess.py b/client/tests/kvm/kvm_subprocess.py
index f815069..8321bb3 100755
--- a/client/tests/kvm/kvm_subprocess.py
+++ b/client/tests/kvm/kvm_subprocess.py
@@ -561,6 +561,7 @@
for t in threading.enumerate():
if hasattr(t, "name") and t.name.startswith("tail_thread"):
t.join(10)
+ _thread_kill_requested = False
class kvm_tail(kvm_spawn):
diff --git a/client/tests/kvm/kvm_test_utils.py b/client/tests/kvm/kvm_test_utils.py
index 5412aac..014f265 100644
--- a/client/tests/kvm/kvm_test_utils.py
+++ b/client/tests/kvm/kvm_test_utils.py
@@ -21,7 +21,7 @@
@copyright: 2008-2009 Red Hat Inc.
"""
-import time, os, logging, re, commands
+import time, os, logging, re, commands, signal
from autotest_lib.client.common_lib import error
from autotest_lib.client.bin import utils
import kvm_utils, kvm_vm, kvm_subprocess, scan_results
@@ -157,9 +157,11 @@
def mig_cancelled():
o = vm.monitor.info("migrate")
if isinstance(o, str):
- return "Migration status: cancelled" in o
+ return ("Migration status: cancelled" in o or
+ "Migration status: canceled" in o)
else:
- return o.get("status") == "cancelled"
+ return (o.get("status") == "cancelled" or
+ o.get("status") == "canceled")
def wait_for_migration():
if not kvm_utils.wait_for(mig_finished, mig_timeout, 2, 2,
@@ -167,79 +169,122 @@
raise error.TestFail("Timeout expired while waiting for migration "
"to finish")
+ dest_vm = vm.clone()
- migration_file = os.path.join("/tmp/",
- mig_protocol + time.strftime("%Y%m%d-%H%M%S"))
- if mig_protocol == "tcp":
- mig_extra_params = " -incoming tcp:0:%d"
- elif mig_protocol == "unix":
- mig_extra_params = " -incoming unix:%s"
- elif mig_protocol == "exec":
+ if mig_protocol == "exec":
# Exec is a little different from other migrate methods - first we
# ask the monitor the migration, then the vm state is dumped to a
# compressed file, then we start the dest vm with -incoming pointing
# to it
- mig_extra_params = " -incoming \"exec: gzip -c -d %s\"" % migration_file
- uri = "\"exec:gzip -c > %s\"" % migration_file
- vm.monitor.cmd("stop")
- o = vm.monitor.migrate(uri)
- wait_for_migration()
-
- # Clone the source VM and ask the clone to wait for incoming migration
- dest_vm = vm.clone()
- if not dest_vm.create(extra_params=mig_extra_params):
- raise error.TestError("Could not create dest VM")
-
- try:
- if mig_protocol == "tcp":
- uri = "tcp:localhost:%d" % dest_vm.migration_port
- elif mig_protocol == "unix":
- uri = "unix:%s" % dest_vm.migration_file
-
- if mig_protocol != "exec":
- o = vm.monitor.migrate(uri)
-
- if mig_protocol == "tcp" and mig_cancel:
- time.sleep(2)
- o = vm.monitor.cmd("migrate_cancel")
- if not kvm_utils.wait_for(mig_cancelled, 60, 2, 2,
- "Waiting for migration cancel"):
- raise error.TestFail("Fail to cancel migration")
- dest_vm.destroy(gracefully=False)
- return vm
-
+ try:
+ exec_file = "/tmp/exec-%s.gz" % kvm_utils.generate_random_string(8)
+ exec_cmd = "gzip -c -d %s" % exec_file
+ uri = '"exec:gzip -c > %s"' % exec_file
+ vm.monitor.cmd("stop")
+ vm.monitor.migrate(uri)
wait_for_migration()
- # Report migration status
- if mig_succeeded():
- logging.info("Migration finished successfully")
- elif mig_failed():
- raise error.TestFail("Migration failed")
- else:
- raise error.TestFail("Migration ended with unknown status")
+ if not dest_vm.create(migration_mode=mig_protocol,
+ migration_exec_cmd=exec_cmd, mac_source=vm):
+ raise error.TestError("Could not create dest VM")
+ finally:
+ logging.debug("Removing migration file %s", exec_file)
+ try:
+ os.remove(exec_file)
+ except OSError:
+ pass
+ else:
+ if not dest_vm.create(migration_mode=mig_protocol, mac_source=vm):
+ raise error.TestError("Could not create dest VM")
+ try:
+ if mig_protocol == "tcp":
+ uri = "tcp:localhost:%d" % dest_vm.migration_port
+ elif mig_protocol == "unix":
+ uri = "unix:%s" % dest_vm.migration_file
+ vm.monitor.migrate(uri)
- o = dest_vm.monitor.info("status")
- if "paused" in o:
- logging.debug("Destination VM is paused, resuming it...")
- dest_vm.monitor.cmd("cont")
+ if mig_cancel:
+ time.sleep(2)
+ vm.monitor.cmd("migrate_cancel")
+ if not kvm_utils.wait_for(mig_cancelled, 60, 2, 2,
+ "Waiting for migration "
+ "cancellation"):
+ raise error.TestFail("Failed to cancel migration")
+ dest_vm.destroy(gracefully=False)
+ return vm
+ else:
+ wait_for_migration()
+ except:
+ dest_vm.destroy()
+ raise
- if os.path.exists(migration_file):
- logging.debug("Removing migration file %s", migration_file)
- os.remove(migration_file)
+ # Report migration status
+ if mig_succeeded():
+ logging.info("Migration finished successfully")
+ elif mig_failed():
+ raise error.TestFail("Migration failed")
+ else:
+ raise error.TestFail("Migration ended with unknown status")
- # Kill the source VM
- vm.destroy(gracefully=False)
+ if "paused" in dest_vm.monitor.info("status"):
+ logging.debug("Destination VM is paused, resuming it...")
+ dest_vm.monitor.cmd("cont")
- # Replace the source VM with the new cloned VM
- if env is not None:
- kvm_utils.env_register_vm(env, vm.name, dest_vm)
+ # Kill the source VM
+ vm.destroy(gracefully=False)
- # Return the new cloned VM
- return dest_vm
+ # Replace the source VM with the new cloned VM
+ if env is not None:
+ kvm_utils.env_register_vm(env, vm.name, dest_vm)
- except:
- dest_vm.destroy()
- raise
+ # Return the new cloned VM
+ return dest_vm
+
+
+def stop_windows_service(session, service, timeout=120):
+ """
+ Stop a Windows service using sc.
+ If the service is already stopped or is not installed, do nothing.
+
+ @param service: The name of the service
+ @param timeout: Time duration to wait for service to stop
+ @raise error.TestError: Raised if the service can't be stopped
+ """
+ end_time = time.time() + timeout
+ while time.time() < end_time:
+ o = session.get_command_output("sc stop %s" % service, timeout=60)
+ # FAILED 1060 means the service isn't installed.
+ # FAILED 1062 means the service hasn't been started.
+ if re.search(r"\bFAILED (1060|1062)\b", o, re.I):
+ break
+ time.sleep(1)
+ else:
+ raise error.TestError("Could not stop service '%s'" % service)
+
+
+def start_windows_service(session, service, timeout=120):
+ """
+ Start a Windows service using sc.
+ If the service is already running, do nothing.
+ If the service isn't installed, fail.
+
+ @param service: The name of the service
+ @param timeout: Time duration to wait for service to start
+ @raise error.TestError: Raised if the service can't be started
+ """
+ end_time = time.time() + timeout
+ while time.time() < end_time:
+ o = session.get_command_output("sc start %s" % service, timeout=60)
+ # FAILED 1060 means the service isn't installed.
+ if re.search(r"\bFAILED 1060\b", o, re.I):
+ raise error.TestError("Could not start service '%s' "
+ "(service not installed)" % service)
+ # FAILED 1056 means the service is already running.
+ if re.search(r"\bFAILED 1056\b", o, re.I):
+ break
+ time.sleep(1)
+ else:
+ raise error.TestError("Could not start service '%s'" % service)
def get_time(session, time_command, time_filter_re, time_format):
@@ -505,3 +550,131 @@
e_msg = ("Tests %s failed during control file execution" %
" ".join(bad_results))
raise error.TestFail(e_msg)
+
+
+def get_loss_ratio(output):
+ """
+ Get the packet loss ratio from the output of ping
+.
+ @param output: Ping output.
+ """
+ try:
+ return int(re.findall('(\d+)% packet loss', output)[0])
+ except IndexError:
+ logging.debug(output)
+ return -1
+
+
+def raw_ping(command, timeout, session, output_func):
+ """
+ Low-level ping command execution.
+
+ @param command: Ping command.
+ @param timeout: Timeout of the ping command.
+ @param session: Local executon hint or session to execute the ping command.
+ """
+ if session is None:
+ process = kvm_subprocess.run_bg(command, output_func=output_func,
+ timeout=timeout)
+
+ # Send SIGINT signal to notify the timeout of running ping process,
+ # Because ping have the ability to catch the SIGINT signal so we can
+ # always get the packet loss ratio even if timeout.
+ if process.is_alive():
+ kvm_utils.kill_process_tree(process.get_pid(), signal.SIGINT)
+
+ status = process.get_status()
+ output = process.get_output()
+
+ process.close()
+ return status, output
+ else:
+ session.sendline(command)
+ status, output = session.read_up_to_prompt(timeout=timeout,
+ print_func=output_func)
+ if not status:
+ # Send ctrl+c (SIGINT) through ssh session
+ session.send("\003")
+ status, output2 = session.read_up_to_prompt(print_func=output_func)
+ output += output2
+ if not status:
+ # We also need to use this session to query the return value
+ session.send("\003")
+
+ session.sendline(session.status_test_command)
+ s2, o2 = session.read_up_to_prompt()
+ if not s2:
+ status = -1
+ else:
+ try:
+ status = int(re.findall("\d+", o2)[0])
+ except:
+ status = -1
+
+ return status, output
+
+
+def ping(dest=None, count=None, interval=None, interface=None,
+ packetsize=None, ttl=None, hint=None, adaptive=False,
+ broadcast=False, flood=False, timeout=0,
+ output_func=logging.debug, session=None):
+ """
+ Wrapper of ping.
+
+ @param dest: Destination address.
+ @param count: Count of icmp packet.
+ @param interval: Interval of two icmp echo request.
+ @param interface: Specified interface of the source address.
+ @param packetsize: Packet size of icmp.
+ @param ttl: IP time to live.
+ @param hint: Path mtu discovery hint.
+ @param adaptive: Adaptive ping flag.
+ @param broadcast: Broadcast ping flag.
+ @param flood: Flood ping flag.
+ @param timeout: Timeout for the ping command.
+ @param output_func: Function used to log the result of ping.
+ @param session: Local executon hint or session to execute the ping command.
+ """
+ if dest is not None:
+ command = "ping %s " % dest
+ else:
+ command = "ping localhost "
+ if count is not None:
+ command += " -c %s" % count
+ if interval is not None:
+ command += " -i %s" % interval
+ if interface is not None:
+ command += " -I %s" % interface
+ if packetsize is not None:
+ command += " -s %s" % packetsize
+ if ttl is not None:
+ command += " -t %s" % ttl
+ if hint is not None:
+ command += " -M %s" % hint
+ if adaptive:
+ command += " -A"
+ if broadcast:
+ command += " -b"
+ if flood:
+ command += " -f -q"
+ output_func = None
+
+ return raw_ping(command, timeout, session, output_func)
+
+
+def get_linux_ifname(session, mac_address):
+ """
+ Get the interface name through the mac address.
+
+ @param session: session to the virtual machine
+ @mac_address: the macaddress of nic
+ """
+
+ output = session.get_command_output("ifconfig -a")
+
+ try:
+ ethname = re.findall("(\w+)\s+Link.*%s" % mac_address, output,
+ re.IGNORECASE)[0]
+ return ethname
+ except:
+ return None
diff --git a/client/tests/kvm/kvm_utils.py b/client/tests/kvm/kvm_utils.py
index fb2d1c2..b849b37 100644
--- a/client/tests/kvm/kvm_utils.py
+++ b/client/tests/kvm/kvm_utils.py
@@ -5,9 +5,26 @@
"""
import time, string, random, socket, os, signal, re, logging, commands, cPickle
-from autotest_lib.client.bin import utils
+import fcntl, shelve, ConfigParser
+from autotest_lib.client.bin import utils, os_dep
from autotest_lib.client.common_lib import error, logging_config
import kvm_subprocess
+try:
+ import koji
+ KOJI_INSTALLED = True
+except ImportError:
+ KOJI_INSTALLED = False
+
+
+def _lock_file(filename):
+ f = open(filename, "w")
+ fcntl.lockf(f, fcntl.LOCK_EX)
+ return f
+
+
+def _unlock_file(f):
+ fcntl.lockf(f, fcntl.LOCK_UN)
+ f.close()
def dump_env(obj, filename):
@@ -82,163 +99,113 @@
# Functions related to MAC/IP addresses
-def mac_str_to_int(addr):
+def _open_mac_pool(lock_mode):
+ lock_file = open("/tmp/mac_lock", "w+")
+ fcntl.lockf(lock_file, lock_mode)
+ pool = shelve.open("/tmp/address_pool")
+ return pool, lock_file
+
+
+def _close_mac_pool(pool, lock_file):
+ pool.close()
+ fcntl.lockf(lock_file, fcntl.LOCK_UN)
+ lock_file.close()
+
+
+def _generate_mac_address_prefix(mac_pool):
"""
- Convert MAC address string to integer.
+ Generate a random MAC address prefix and add it to the MAC pool dictionary.
+ If there's a MAC prefix there already, do not update the MAC pool and just
+ return what's in there. By convention we will set KVM autotest MAC
+ addresses to start with 0x9a.
- @param addr: String representing the MAC address.
+ @param mac_pool: The MAC address pool object.
+ @return: The MAC address prefix.
"""
- return sum(int(s, 16) * 256 ** i
- for i, s in enumerate(reversed(addr.split(":"))))
+ if "prefix" in mac_pool:
+ prefix = mac_pool["prefix"]
+ logging.debug("Used previously generated MAC address prefix for this "
+ "host: %s", prefix)
+ else:
+ r = random.SystemRandom()
+ prefix = "9a:%02x:%02x:%02x:" % (r.randint(0x00, 0xff),
+ r.randint(0x00, 0xff),
+ r.randint(0x00, 0xff))
+ mac_pool["prefix"] = prefix
+ logging.debug("Generated MAC address prefix for this host: %s", prefix)
+ return prefix
-def mac_int_to_str(addr):
+def generate_mac_address(vm_instance, nic_index):
"""
- Convert MAC address integer to string.
+ Randomly generate a MAC address and add it to the MAC address pool.
- @param addr: Integer representing the MAC address.
+ Try to generate a MAC address based on a randomly generated MAC address
+ prefix and add it to a persistent dictionary.
+ key = VM instance + NIC index, value = MAC address
+ e.g. {'20100310-165222-Wt7l:0': '9a:5d:94:6a:9b:f9'}
+
+ @param vm_instance: The instance attribute of a VM.
+ @param nic_index: The index of the NIC.
+ @return: MAC address string.
"""
- return ":".join("%02x" % (addr >> 8 * i & 0xFF)
- for i in reversed(range(6)))
+ mac_pool, lock_file = _open_mac_pool(fcntl.LOCK_EX)
+ key = "%s:%s" % (vm_instance, nic_index)
+ if key in mac_pool:
+ mac = mac_pool[key]
+ else:
+ prefix = _generate_mac_address_prefix(mac_pool)
+ r = random.SystemRandom()
+ while key not in mac_pool:
+ mac = prefix + "%02x:%02x" % (r.randint(0x00, 0xff),
+ r.randint(0x00, 0xff))
+ if mac in mac_pool.values():
+ continue
+ mac_pool[key] = mac
+ logging.debug("Generated MAC address for NIC %s: %s", key, mac)
+ _close_mac_pool(mac_pool, lock_file)
+ return mac
-def ip_str_to_int(addr):
+def free_mac_address(vm_instance, nic_index):
"""
- Convert IP address string to integer.
+ Remove a MAC address from the address pool.
- @param addr: String representing the IP address.
+ @param vm_instance: The instance attribute of a VM.
+ @param nic_index: The index of the NIC.
"""
- return sum(int(s) * 256 ** i
- for i, s in enumerate(reversed(addr.split("."))))
+ mac_pool, lock_file = _open_mac_pool(fcntl.LOCK_EX)
+ key = "%s:%s" % (vm_instance, nic_index)
+ if key in mac_pool:
+ logging.debug("Freeing MAC address for NIC %s: %s", key, mac_pool[key])
+ del mac_pool[key]
+ _close_mac_pool(mac_pool, lock_file)
-def ip_int_to_str(addr):
+def set_mac_address(vm_instance, nic_index, mac):
"""
- Convert IP address integer to string.
+ Set a MAC address in the pool.
- @param addr: Integer representing the IP address.
+ @param vm_instance: The instance attribute of a VM.
+ @param nic_index: The index of the NIC.
"""
- return ".".join(str(addr >> 8 * i & 0xFF)
- for i in reversed(range(4)))
+ mac_pool, lock_file = _open_mac_pool(fcntl.LOCK_EX)
+ mac_pool["%s:%s" % (vm_instance, nic_index)] = mac
+ _close_mac_pool(mac_pool, lock_file)
-def offset_mac(base, offset):
+def get_mac_address(vm_instance, nic_index):
"""
- Add offset to a given MAC address.
+ Return a MAC address from the pool.
- @param base: String representing a MAC address.
- @param offset: Offset to add to base (integer)
- @return: A string representing the offset MAC address.
+ @param vm_instance: The instance attribute of a VM.
+ @param nic_index: The index of the NIC.
+ @return: MAC address string.
"""
- return mac_int_to_str(mac_str_to_int(base) + offset)
-
-
-def offset_ip(base, offset):
- """
- Add offset to a given IP address.
-
- @param base: String representing an IP address.
- @param offset: Offset to add to base (integer)
- @return: A string representing the offset IP address.
- """
- return ip_int_to_str(ip_str_to_int(base) + offset)
-
-
-def get_mac_ip_pair_from_dict(dict):
- """
- Fetch a MAC-IP address pair from dict and return it.
-
- The parameters in dict are expected to conform to a certain syntax.
- Typical usage may be:
-
- address_ranges = r1 r2 r3
-
- address_range_base_mac_r1 = 55:44:33:22:11:00
- address_range_base_ip_r1 = 10.0.0.0
- address_range_size_r1 = 16
-
- address_range_base_mac_r2 = 55:44:33:22:11:40
- address_range_base_ip_r2 = 10.0.0.60
- address_range_size_r2 = 25
-
- address_range_base_mac_r3 = 55:44:33:22:12:10
- address_range_base_ip_r3 = 10.0.1.20
- address_range_size_r3 = 230
-
- address_index = 0
-
- All parameters except address_index specify a MAC-IP address pool. The
- pool consists of several MAC-IP address ranges.
- address_index specified the index of the desired MAC-IP pair from the pool.
-
- @param dict: The dictionary from which to fetch the addresses.
- """
- index = int(dict.get("address_index", 0))
- for mac_range_name in get_sub_dict_names(dict, "address_ranges"):
- mac_range_params = get_sub_dict(dict, mac_range_name)
- mac_base = mac_range_params.get("address_range_base_mac")
- ip_base = mac_range_params.get("address_range_base_ip")
- size = int(mac_range_params.get("address_range_size", 1))
- if index < size:
- return (mac_base and offset_mac(mac_base, index),
- ip_base and offset_ip(ip_base, index))
- index -= size
- return (None, None)
-
-
-def get_sub_pool(dict, piece, num_pieces):
- """
- Split a MAC-IP pool and return a single requested piece.
-
- For example, get_sub_pool(dict, 0, 3) will split the pool in 3 pieces and
- return a dict representing the first piece.
-
- @param dict: A dict that contains pool parameters.
- @param piece: The index of the requested piece. Should range from 0 to
- num_pieces - 1.
- @param num_pieces: The total number of pieces.
- @return: A copy of dict, modified to describe the requested sub-pool.
- """
- range_dicts = [get_sub_dict(dict, name) for name in
- get_sub_dict_names(dict, "address_ranges")]
- if not range_dicts:
- return dict
- ranges = [[d.get("address_range_base_mac"),
- d.get("address_range_base_ip"),
- int(d.get("address_range_size", 1))] for d in range_dicts]
- total_size = sum(r[2] for r in ranges)
- base = total_size * piece / num_pieces
- size = total_size * (piece + 1) / num_pieces - base
-
- # Find base of current sub-pool
- for i in range(len(ranges)):
- r = ranges[i]
- if base < r[2]:
- r[0] = r[0] and offset_mac(r[0], base)
- r[1] = r[1] and offset_ip(r[1], base)
- r[2] -= base
- break
- base -= r[2]
-
- # Collect ranges up to end of current sub-pool
- new_ranges = []
- for i in range(i, len(ranges)):
- r = ranges[i]
- new_ranges.append(r)
- if size <= r[2]:
- r[2] = size
- break
- size -= r[2]
-
- # Write new dict
- new_dict = dict.copy()
- new_dict["address_ranges"] = " ".join("r%d" % i for i in
- range(len(new_ranges)))
- for i in range(len(new_ranges)):
- new_dict["address_range_base_mac_r%d" % i] = new_ranges[i][0]
- new_dict["address_range_base_ip_r%d" % i] = new_ranges[i][1]
- new_dict["address_range_size_r%d" % i] = new_ranges[i][2]
- return new_dict
+ mac_pool, lock_file = _open_mac_pool(fcntl.LOCK_SH)
+ mac = mac_pool.get("%s:%s" % (vm_instance, nic_index))
+ _close_mac_pool(mac_pool, lock_file)
+ return mac
def verify_ip_address_ownership(ip, macs, timeout=10.0):
@@ -715,7 +682,7 @@
# The following are utility functions related to ports.
-def is_port_free(port):
+def is_port_free(port, address):
"""
Return True if the given port is available for use.
@@ -724,15 +691,22 @@
try:
s = socket.socket()
#s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
- s.bind(("localhost", port))
- free = True
+ if address == "localhost":
+ s.bind(("localhost", port))
+ free = True
+ else:
+ s.connect((address, port))
+ free = False
except socket.error:
- free = False
+ if address == "localhost":
+ free = False
+ else:
+ free = True
s.close()
return free
-def find_free_port(start_port, end_port):
+def find_free_port(start_port, end_port, address="localhost"):
"""
Return a host free port in the range [start_port, end_port].
@@ -740,12 +714,12 @@
@param end_port: Port immediately after the last one that will be checked.
"""
for i in range(start_port, end_port):
- if is_port_free(i):
+ if is_port_free(i, address):
return i
return None
-def find_free_ports(start_port, end_port, count):
+def find_free_ports(start_port, end_port, count, address="localhost"):
"""
Return count of host free ports in the range [start_port, end_port].
@@ -756,7 +730,7 @@
ports = []
i = start_port
while i < end_port and count > 0:
- if is_port_free(i):
+ if is_port_free(i, address):
ports.append(i)
count -= 1
i += 1
@@ -1278,3 +1252,141 @@
logging.info("Released device %s successfully", pci_id)
except:
return
+
+
+class KojiDownloader(object):
+ """
+ Stablish a connection with the build system, either koji or brew.
+
+ This class provides a convenience methods to retrieve packages hosted on
+ the build system.
+ """
+ def __init__(self, cmd):
+ """
+ Verifies whether the system has koji or brew installed, then loads
+ the configuration file that will be used to download the files.
+
+ @param cmd: Command name, either 'brew' or 'koji'. It is important
+ to figure out the appropriate configuration used by the
+ downloader.
+ @param dst_dir: Destination dir for the packages.
+ """
+ if not KOJI_INSTALLED:
+ raise ValueError('No koji/brew installed on the machine')
+
+ if os.path.isfile(cmd):
+ koji_cmd = cmd
+ else:
+ koji_cmd = os_dep.command(cmd)
+
+ logging.debug("Found %s as the buildsystem interface", koji_cmd)
+
+ config_map = {'/usr/bin/koji': '/etc/koji.conf',
+ '/usr/bin/brew': '/etc/brewkoji.conf'}
+
+ try:
+ config_file = config_map[koji_cmd]
+ except IndexError:
+ raise ValueError('Could not find config file for %s' % koji_cmd)
+
+ base_name = os.path.basename(koji_cmd)
+ if os.access(config_file, os.F_OK):
+ f = open(config_file)
+ config = ConfigParser.ConfigParser()
+ config.readfp(f)
+ f.close()
+ else:
+ raise IOError('Configuration file %s missing or with wrong '
+ 'permissions' % config_file)
+
+ if config.has_section(base_name):
+ self.koji_options = {}
+ session_options = {}
+ server = None
+ for name, value in config.items(base_name):
+ if name in ('user', 'password', 'debug_xmlrpc', 'debug'):
+ session_options[name] = value
+ self.koji_options[name] = value
+ self.session = koji.ClientSession(self.koji_options['server'],
+ session_options)
+ else:
+ raise ValueError('Koji config file %s does not have a %s '
+ 'session' % (config_file, base_name))
+
+
+ def get(self, src_package, dst_dir, rfilter=None, tag=None, build=None,
+ arch=None):
+ """
+ Download a list of packages from the build system.
+
+ This will download all packages originated from source package [package]
+ with given [tag] or [build] for the architecture reported by the
+ machine.
+
+ @param src_package: Source package name.
+ @param dst_dir: Destination directory for the downloaded packages.
+ @param rfilter: Regexp filter, only download the packages that match
+ that particular filter.
+ @param tag: Build system tag.
+ @param build: Build system ID.
+ @param arch: Package arch. Useful when you want to download noarch
+ packages.
+
+ @return: List of paths with the downloaded rpm packages.
+ """
+ if build and build.isdigit():
+ build = int(build)
+
+ if tag and build:
+ logging.info("Both tag and build parameters provided, ignoring tag "
+ "parameter...")
+
+ if not tag and not build:
+ raise ValueError("Koji install selected but neither koji_tag "
+ "nor koji_build parameters provided. Please "
+ "provide an appropriate tag or build name.")
+
+ if not build:
+ builds = self.session.listTagged(tag, latest=True,
+ package=src_package)
+ if not builds:
+ raise ValueError("Tag %s has no builds of %s" % (tag,
+ src_package))
+ info = builds[0]
+ else:
+ info = self.session.getBuild(build)
+
+ if info is None:
+ raise ValueError('No such brew/koji build: %s' % build)
+
+ if arch is None:
+ arch = utils.get_arch()
+
+ rpms = self.session.listRPMs(buildID=info['id'],
+ arches=arch)
+ if not rpms:
+ raise ValueError("No %s packages available for %s" %
+ arch, koji.buildLabel(info))
+
+ rpm_paths = []
+ for rpm in rpms:
+ rpm_name = koji.pathinfo.rpm(rpm)
+ url = ("%s/%s/%s/%s/%s" % (self.koji_options['pkgurl'],
+ info['package_name'],
+ info['version'], info['release'],
+ rpm_name))
+ if rfilter:
+ filter_regexp = re.compile(rfilter, re.IGNORECASE)
+ if filter_regexp.match(os.path.basename(rpm_name)):
+ download = True
+ else:
+ download = False
+ else:
+ download = True
+
+ if download:
+ r = utils.get_file(url,
+ os.path.join(dst_dir, os.path.basename(url)))
+ rpm_paths.append(r)
+
+ return rpm_paths
diff --git a/client/tests/kvm/kvm_vm.py b/client/tests/kvm/kvm_vm.py
index 135d08e..a860437 100755
--- a/client/tests/kvm/kvm_vm.py
+++ b/client/tests/kvm/kvm_vm.py
@@ -5,7 +5,7 @@
@copyright: 2008-2009 Red Hat Inc.
"""
-import time, socket, os, logging, fcntl, re, commands, glob
+import time, socket, os, logging, fcntl, re, commands, shelve, glob
import kvm_utils, kvm_subprocess, kvm_monitor, rss_file_transfer
from autotest_lib.client.common_lib import error
from autotest_lib.client.bin import utils
@@ -109,15 +109,15 @@
self.serial_console = None
self.redirs = {}
self.vnc_port = 5900
- self.uuid = None
self.monitors = []
self.pci_assignable = None
+ self.netdev_id = []
+ self.uuid = None
self.name = name
self.params = params
self.root_dir = root_dir
self.address_cache = address_cache
- self.netdev_id = []
# Find a unique identifier for this VM
while True:
@@ -234,20 +234,40 @@
if boot: cmd += ",boot=on"
return cmd
- def add_nic(help, vlan, model=None, mac=None, netdev_id=None):
- if has_option(help, "netdev"):
- cmd = " -net nic,netdev=%s" % netdev_id
+ def add_nic(help, vlan, model=None, mac=None, netdev_id=None,
+ nic_extra_params=None):
+ if has_option(help, "device"):
+ if model == "virtio":
+ model="virtio-net-pci"
+ if not model:
+ model= "rtl8139"
+ cmd = " -device %s" % model
+ if mac:
+ cmd += ",mac=%s" % mac
+ if has_option(help, "netdev"):
+ cmd += ",netdev=%s" % netdev_id
+ else:
+ cmd += "vlan=%d," % vlan
+ if nic_extra_params:
+ cmd += ",%s" % nic_extra_params
else:
- cmd = " -net nic,vlan=%d" % vlan
- if model: cmd += ",model=%s" % model
- if mac: cmd += ",macaddr='%s'" % mac
+ if has_option(help, "netdev"):
+ cmd = " -net nic,netdev=%s" % netdev_id
+ else:
+ cmd = " -net nic,vlan=%d" % vlan
+ if model:
+ cmd += ",model=%s" % model
+ if mac:
+ cmd += ",macaddr='%s'" % mac
return cmd
def add_net(help, vlan, mode, ifname=None, script=None,
downscript=None, tftp=None, bootfile=None, hostfwd=[],
- netdev_id=None):
+ netdev_id=None, vhost=False):
if has_option(help, "netdev"):
cmd = " -netdev %s,id=%s" % (mode, netdev_id)
+ if vhost:
+ cmd +=",vhost=on"
else:
cmd = " -net %s,vlan=%d" % (mode, vlan)
if mode == "tap":
@@ -384,11 +404,10 @@
for nic_name in kvm_utils.get_sub_dict_names(params, "nics"):
nic_params = kvm_utils.get_sub_dict(params, nic_name)
# Handle the '-net nic' part
- mac = None
- if "address_index" in nic_params:
- mac = kvm_utils.get_mac_ip_pair_from_dict(nic_params)[0]
+ mac = self.get_mac_address(vlan)
qemu_cmd += add_nic(help, vlan, nic_params.get("nic_model"), mac,
- self.netdev_id[vlan])
+ self.netdev_id[vlan],
+ nic_params.get("nic_extra_params"))
# Handle the '-net tap' or '-net user' part
script = nic_params.get("nic_script")
downscript = nic_params.get("nic_downscript")
@@ -400,10 +419,11 @@
if tftp:
tftp = kvm_utils.get_path(root_dir, tftp)
qemu_cmd += add_net(help, vlan, nic_params.get("nic_mode", "user"),
- nic_params.get("nic_ifname"),
+ self.get_ifname(vlan),
script, downscript, tftp,
nic_params.get("bootp"), redirs,
- self.netdev_id[vlan])
+ self.netdev_id[vlan],
+ nic_params.get("vhost")=="yes")
# Proceed to next NIC
vlan += 1
@@ -487,22 +507,22 @@
return qemu_cmd
- def create(self, name=None, params=None, root_dir=None,
- for_migration=False, timeout=5.0, extra_params=None):
+ def create(self, name=None, params=None, root_dir=None, timeout=5.0,
+ migration_mode=None, migration_exec_cmd=None, mac_source=None):
"""
Start the VM by running a qemu command.
- All parameters are optional. The following applies to all parameters
- but for_migration: If a parameter is not supplied, the corresponding
- value stored in the class attributes is used, and if it is supplied,
- it is stored for later use.
+ All parameters are optional. If name, params or root_dir are not
+ supplied, the respective values stored as class attributes are used.
@param name: The name of the object
@param params: A dict containing VM params
@param root_dir: Base directory for relative filenames
- @param for_migration: If True, start the VM with the -incoming
- option
- @param extra_params: extra params for qemu command.e.g -incoming option
- Please use this parameter instead of for_migration.
+ @param migration_mode: If supplied, start VM for incoming migration
+ using this protocol (either 'tcp', 'unix' or 'exec')
+ @param migration_exec_cmd: Command to embed in '-incoming "exec: ..."'
+ (e.g. 'gzip -c -d filename') if migration_mode is 'exec'
+ @param mac_source: A VM object from which to copy MAC addresses. If not
+ specified, new addresses will be generated.
"""
self.destroy()
@@ -577,6 +597,15 @@
self.uuid = f.read().strip()
f.close()
+ # Generate or copy MAC addresses for all NICs
+ num_nics = len(kvm_utils.get_sub_dict_names(params, "nics"))
+ for vlan in range(num_nics):
+ mac = mac_source and mac_source.get_mac_address(vlan)
+ if mac:
+ kvm_utils.set_mac_address(self.instance, vlan, mac)
+ else:
+ kvm_utils.generate_mac_address(self.instance, vlan)
+
# Assign a PCI assignable device
self.pci_assignable = None
pa_type = params.get("pci_assignable")
@@ -623,17 +652,15 @@
# Make qemu command
qemu_command = self.make_qemu_command()
- # Enable migration support for VM by adding extra_params.
- if extra_params is not None:
- if " -incoming tcp:0:%d" == extra_params:
- self.migration_port = kvm_utils.find_free_port(5200, 6000)
- qemu_command += extra_params % self.migration_port
- elif " -incoming unix:%s" == extra_params:
- self.migration_file = os.path.join("/tmp/", "unix-" +
- time.strftime("%Y%m%d-%H%M%S"))
- qemu_command += extra_params % self.migration_file
- else:
- qemu_command += extra_params
+ # Add migration parameters if required
+ if migration_mode == "tcp":
+ self.migration_port = kvm_utils.find_free_port(5200, 6000)
+ qemu_command += " -incoming tcp:0:%d" % self.migration_port
+ elif migration_mode == "unix":
+ self.migration_file = "/tmp/migration-unix-%s" % self.instance
+ qemu_command += " -incoming unix:%s" % self.migration_file
+ elif migration_mode == "exec":
+ qemu_command += ' -incoming "exec:%s"' % migration_exec_cmd
logging.debug("Running qemu command:\n%s", qemu_command)
self.process = kvm_subprocess.run_bg(qemu_command, None,
@@ -750,7 +777,7 @@
logging.debug("Shutdown command sent; waiting for VM "
"to go down...")
if kvm_utils.wait_for(self.is_dead, 60, 1, 1):
- logging.debug("VM is down")
+ logging.debug("VM is down, freeing mac address.")
return
finally:
session.close()
@@ -794,6 +821,14 @@
os.unlink(f)
except OSError:
pass
+ if hasattr(self, "migration_file"):
+ try:
+ os.unlink(self.migration_file)
+ except OSError:
+ pass
+ num_nics = len(kvm_utils.get_sub_dict_names(self.params, "nics"))
+ for vlan in range(num_nics):
+ self.free_mac_address(vlan)
@property
@@ -880,26 +915,23 @@
nic_name = nics[index]
nic_params = kvm_utils.get_sub_dict(self.params, nic_name)
if nic_params.get("nic_mode") == "tap":
- mac, ip = kvm_utils.get_mac_ip_pair_from_dict(nic_params)
+ mac = self.get_mac_address(index)
if not mac:
logging.debug("MAC address unavailable")
return None
- if not ip or nic_params.get("always_use_tcpdump") == "yes":
- # Get the IP address from the cache
- ip = self.address_cache.get(mac)
- if not ip:
- logging.debug("Could not find IP address for MAC address: "
- "%s" % mac)
- return None
- # Make sure the IP address is assigned to this guest
- nic_dicts = [kvm_utils.get_sub_dict(self.params, nic)
- for nic in nics]
- macs = [kvm_utils.get_mac_ip_pair_from_dict(dict)[0]
- for dict in nic_dicts]
- if not kvm_utils.verify_ip_address_ownership(ip, macs):
- logging.debug("Could not verify MAC-IP address mapping: "
- "%s ---> %s" % (mac, ip))
- return None
+ mac = mac.lower()
+ # Get the IP address from the cache
+ ip = self.address_cache.get(mac)
+ if not ip:
+ logging.debug("Could not find IP address for MAC address: %s" %
+ mac)
+ return None
+ # Make sure the IP address is assigned to this guest
+ macs = [self.get_mac_address(i) for i in range(len(nics))]
+ if not kvm_utils.verify_ip_address_ownership(ip, macs):
+ logging.debug("Could not verify MAC-IP address mapping: "
+ "%s ---> %s" % (mac, ip))
+ return None
return ip
else:
return "localhost"
@@ -925,6 +957,39 @@
return self.redirs.get(port)
+ def get_ifname(self, nic_index=0):
+ """
+ Return the ifname of a tap device associated with a NIC.
+
+ @param nic_index: Index of the NIC
+ """
+ nics = kvm_utils.get_sub_dict_names(self.params, "nics")
+ nic_name = nics[nic_index]
+ nic_params = kvm_utils.get_sub_dict(self.params, nic_name)
+ if nic_params.get("nic_ifname"):
+ return nic_params.get("nic_ifname")
+ else:
+ return "t%d-%s" % (nic_index, self.instance[-11:])
+
+
+ def get_mac_address(self, nic_index=0):
+ """
+ Return the MAC address of a NIC.
+
+ @param nic_index: Index of the NIC
+ """
+ return kvm_utils.get_mac_address(self.instance, nic_index)
+
+
+ def free_mac_address(self, nic_index=0):
+ """
+ Free a NIC's MAC address.
+
+ @param nic_index: Index of the NIC
+ """
+ kvm_utils.free_mac_address(self.instance, nic_index)
+
+
def get_pid(self):
"""
Return the VM's PID. If the VM is dead return None.
diff --git a/client/tests/kvm/scripts/join_mcast.py b/client/tests/kvm/scripts/join_mcast.py
new file mode 100644
index 0000000..350cd5f
--- /dev/null
+++ b/client/tests/kvm/scripts/join_mcast.py
@@ -0,0 +1,37 @@
+#!/usr/bin/python
+import socket, struct, os, signal, sys
+# -*- coding: utf-8 -*-
+
+"""
+Script used to join machine into multicast groups.
+
+@author Amos Kong <akong@redhat.com>
+"""
+
+if __name__ == "__main__":
+ if len(sys.argv) < 4:
+ print """%s [mgroup_count] [prefix] [suffix]
+ mgroup_count: count of multicast addresses
+ prefix: multicast address prefix
+ suffix: multicast address suffix""" % sys.argv[0]
+ sys.exit()
+
+ mgroup_count = int(sys.argv[1])
+ prefix = sys.argv[2]
+ suffix = int(sys.argv[3])
+
+ s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
+ for i in range(mgroup_count):
+ mcast = prefix + "." + str(suffix + i)
+ try:
+ mreq = struct.pack("4sl", socket.inet_aton(mcast),
+ socket.INADDR_ANY)
+ s.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
+ except:
+ s.close()
+ print "Could not join multicast: %s" % mcast
+ raise
+
+ print "join_mcast_pid:%s" % os.getpid()
+ os.kill(os.getpid(), signal.SIGSTOP)
+ s.close()
diff --git a/client/tests/kvm/scripts/qemu-ifup b/client/tests/kvm/scripts/qemu-ifup
index 413d492..c4debf5 100755
--- a/client/tests/kvm/scripts/qemu-ifup
+++ b/client/tests/kvm/scripts/qemu-ifup
@@ -4,6 +4,7 @@
# Modify it to suit your needs.
switch=$(/usr/sbin/brctl show | awk 'NR==2 { print $1 }')
+/bin/echo 1 > /proc/sys/net/ipv6/conf/${switch}/disable_ipv6
/sbin/ifconfig $1 0.0.0.0 up
/usr/sbin/brctl addif ${switch} $1
/usr/sbin/brctl setfd ${switch} 0
diff --git a/client/tests/kvm/scripts/qemu-ifup-ipv6 b/client/tests/kvm/scripts/qemu-ifup-ipv6
new file mode 100644
index 0000000..d4b0592
--- /dev/null
+++ b/client/tests/kvm/scripts/qemu-ifup-ipv6
@@ -0,0 +1,11 @@
+#!/bin/sh
+
+# The following expression selects the first bridge listed by 'brctl show'.
+# Modify it to suit your needs.
+switch=$(/usr/sbin/brctl show | awk 'NR==2 { print $1 }')
+
+/bin/echo 0 > /proc/sys/net/ipv6/conf/${switch}/disable_ipv6
+/sbin/ifconfig $1 0.0.0.0 up
+/usr/sbin/brctl addif ${switch} $1
+/usr/sbin/brctl setfd ${switch} 0
+/usr/sbin/brctl stp ${switch} off
diff --git a/client/tests/kvm/scripts/unattended.py b/client/tests/kvm/scripts/unattended.py
index ba7d80b..1029d1e 100755
--- a/client/tests/kvm/scripts/unattended.py
+++ b/client/tests/kvm/scripts/unattended.py
@@ -276,14 +276,16 @@
if not os.path.isdir(self.tftp):
os.makedirs(self.tftp)
- self.cdrom_cd1 = os.path.join(KVM_TEST_DIR, self.cdrom_cd1)
+ if self.cdrom_cd1:
+ self.cdrom_cd1 = os.path.join(KVM_TEST_DIR, self.cdrom_cd1)
self.cdrom_cd1_mount = tempfile.mkdtemp(prefix='cdrom_cd1_', dir='/tmp')
if self.medium == 'nfs':
self.nfs_mount = tempfile.mkdtemp(prefix='nfs_', dir='/tmp')
- self.floppy = os.path.join(KVM_TEST_DIR, self.floppy)
- if not os.path.isdir(os.path.dirname(self.floppy)):
- os.makedirs(os.path.dirname(self.floppy))
+ if self.floppy:
+ self.floppy = os.path.join(KVM_TEST_DIR, self.floppy)
+ if not os.path.isdir(os.path.dirname(self.floppy)):
+ os.makedirs(os.path.dirname(self.floppy))
self.image_path = KVM_TEST_DIR
self.kernel_path = os.path.join(self.image_path, self.kernel)
diff --git a/client/tests/kvm/tests/build.py b/client/tests/kvm/tests/build.py
index f39371a..c4f0b18 100644
--- a/client/tests/kvm/tests/build.py
+++ b/client/tests/kvm/tests/build.py
@@ -1,10 +1,5 @@
import time, os, sys, urllib, re, signal, logging, datetime, glob, ConfigParser
import shutil
-try:
- import koji
- KOJI_INSTALLED = True
-except ImportError:
- KOJI_INSTALLED = False
from autotest_lib.client.bin import utils, test, os_dep
from autotest_lib.client.common_lib import error
import kvm_utils
@@ -268,96 +263,27 @@
"""
def __init__(self, test, params):
"""
- Initialize koji/brew session.
+ Gets parameters and initializes the package downloader.
@param test: kvm test object
@param params: Dictionary with test arguments
"""
super(KojiInstaller, self).__init__(test, params)
-
default_koji_cmd = '/usr/bin/koji'
default_src_pkg = 'qemu'
-
- self.koji_cmd = params.get("koji_cmd", default_koji_cmd)
self.src_pkg = params.get("src_pkg", default_src_pkg)
-
- # Checking if all required dependencies are available
- os_dep.command(self.koji_cmd)
-
- config_map = {'/usr/bin/koji': '/etc/koji.conf',
- '/usr/bin/brew': '/etc/brewkoji.conf'}
- config_file = config_map[self.koji_cmd]
- base_name = os.path.basename(self.koji_cmd)
- if os.access(config_file, os.F_OK):
- f = open(config_file)
- config = ConfigParser.ConfigParser()
- config.readfp(f)
- f.close()
- else:
- raise error.TestError('Configuration file %s missing or with wrong '
- 'permissions' % config_file)
-
- if config.has_section(base_name):
- self.koji_options = {}
- session_options = {}
- server = None
- for name, value in config.items(base_name):
- if name in ('user', 'password', 'debug_xmlrpc', 'debug'):
- session_options[name] = value
- self.koji_options[name] = value
- self.session = koji.ClientSession(self.koji_options['server'],
- session_options)
- else:
- raise error.TestError('Koji config file %s does not have a %s '
- 'session' % (config_file, base_name))
-
self.tag = params.get("koji_tag", None)
self.build = params.get("koji_build", None)
- if self.build and self.build.isdigit():
- self.build = int(self.build)
- if self.tag and self.build:
- logging.info("Both tag and build parameters provided, ignoring tag "
- "parameter...")
- if not self.tag and not self.build:
- raise error.TestError("Koji install selected but neither koji_tag "
- "nor koji_build parameters provided. Please "
- "provide an appropriate tag or build name.")
+ koji_cmd = params.get("koji_cmd", default_koji_cmd)
+ self.downloader = kvm_utils.KojiDownloader(cmd=koji_cmd)
def _get_packages(self):
"""
Downloads the specific arch RPMs for the specific build name.
"""
- if self.build is None:
- try:
- builds = self.session.listTagged(self.tag, latest=True,
- package=self.src_pkg)
- except koji.GenericError, e:
- raise error.TestError("Error finding latest build for tag %s: "
- "%s" % (self.tag, e))
- if not builds:
- raise error.TestError("Tag %s has no builds of %s" %
- (self.tag, self.src_pkg))
- info = builds[0]
- else:
- info = self.session.getBuild(self.build)
-
- if info is None:
- raise error.TestError('No such brew/koji build: %s' %
- self.build)
- rpms = self.session.listRPMs(buildID=info['id'],
- arches=utils.get_arch())
- if not rpms:
- raise error.TestError("No %s packages available for %s" %
- utils.get_arch(), koji.buildLabel(info))
- for rpm in rpms:
- rpm_name = koji.pathinfo.rpm(rpm)
- url = ("%s/%s/%s/%s/%s" % (self.koji_options['pkgurl'],
- info['package_name'],
- info['version'], info['release'],
- rpm_name))
- utils.get_file(url,
- os.path.join(self.srcdir, os.path.basename(url)))
+ self.downloader.get(src_package=self.src_pkg, tag=self.tag,
+ build=self.build, dst_dir=self.srcdir)
def install(self):
@@ -680,11 +606,7 @@
elif install_mode == 'yum':
installer = YumInstaller(test, params)
elif install_mode == 'koji':
- if KOJI_INSTALLED:
- installer = KojiInstaller(test, params)
- else:
- raise error.TestError('Koji install selected but koji/brew are not '
- 'installed')
+ installer = KojiInstaller(test, params)
else:
raise error.TestError('Invalid or unsupported'
' install mode: %s' % install_mode)
diff --git a/client/tests/kvm/tests/ethtool.py b/client/tests/kvm/tests/ethtool.py
new file mode 100644
index 0000000..56b1c70
--- /dev/null
+++ b/client/tests/kvm/tests/ethtool.py
@@ -0,0 +1,222 @@
+import logging, commands, re
+from autotest_lib.client.common_lib import error
+from autotest_lib.client.bin import utils
+import kvm_test_utils, kvm_utils
+
+def run_ethtool(test, params, env):
+ """
+ Test offload functions of ethernet device by ethtool
+
+ 1) Log into a guest.
+ 2) Initialize the callback of sub functions.
+ 3) Enable/disable sub function of NIC.
+ 4) Execute callback function.
+ 5) Check the return value.
+ 6) Restore original configuration.
+
+ @param test: KVM test object.
+ @param params: Dictionary with the test parameters.
+ @param env: Dictionary with test environment.
+
+ @todo: Not all guests have ethtool installed, so
+ find a way to get it installed using yum/apt-get/
+ whatever
+ """
+ def ethtool_get(type):
+ feature_pattern = {
+ 'tx': 'tx.*checksumming',
+ 'rx': 'rx.*checksumming',
+ 'sg': 'scatter.*gather',
+ 'tso': 'tcp.*segmentation.*offload',
+ 'gso': 'generic.*segmentation.*offload',
+ 'gro': 'generic.*receive.*offload',
+ 'lro': 'large.*receive.*offload',
+ }
+ s, o = session.get_command_status_output("ethtool -k %s" % ethname)
+ try:
+ return re.findall("%s: (.*)" % feature_pattern.get(type), o)[0]
+ except IndexError:
+ logging.debug("Could not get %s status" % type)
+
+
+ def ethtool_set(type, status):
+ """
+ Set ethernet device offload status
+
+ @param type: Offload type name
+ @param status: New status will be changed to
+ """
+ logging.info("Try to set %s %s" % (type, status))
+ if status not in ["off", "on"]:
+ return False
+ cmd = "ethtool -K %s %s %s" % (ethname, type, status)
+ if ethtool_get(type) != status:
+ return session.get_command_status(cmd) == 0
+ if ethtool_get(type) != status:
+ logging.error("Fail to set %s %s" % (type, status))
+ return False
+ return True
+
+
+ def ethtool_save_params():
+ logging.info("Save ethtool configuration")
+ for i in supported_features:
+ feature_status[i] = ethtool_get(i)
+
+
+ def ethtool_restore_params():
+ logging.info("Restore ethtool configuration")
+ for i in supported_features:
+ ethtool_set(i, feature_status[i])
+
+
+ def compare_md5sum(name):
+ logging.info("Compare md5sum of the files on guest and host")
+ host_result = utils.hash_file(name, method="md5")
+ try:
+ o = session.get_command_output("md5sum %s" % name)
+ guest_result = re.findall("\w+", o)[0]
+ except IndexError:
+ logging.error("Could not get file md5sum in guest")
+ return False
+ logging.debug("md5sum: guest(%s), host(%s)" %
+ (guest_result, host_result))
+ return guest_result == host_result
+
+
+ def transfer_file(src="guest"):
+ """
+ Transfer file by scp, use tcpdump to capture packets, then check the
+ return string.
+
+ @param src: Source host of transfer file
+ @return: Tuple (status, error msg/tcpdump result)
+ """
+ session2.get_command_status("rm -rf %s" % filename)
+ dd_cmd = "dd if=/dev/urandom of=%s bs=1M count=%s" % (filename,
+ params.get("filesize"))
+ logging.info("Creat file in source host, cmd: %s" % dd_cmd)
+ tcpdump_cmd = "tcpdump -lep -s 0 tcp -vv port ssh"
+ if src == "guest":
+ s = session.get_command_status(dd_cmd, timeout=360)
+ tcpdump_cmd += " and src %s" % guest_ip
+ copy_files_fun = vm.copy_files_from
+ else:
+ s, o = commands.getstatusoutput(dd_cmd)
+ tcpdump_cmd += " and dst %s" % guest_ip
+ copy_files_fun = vm.copy_files_to
+ if s != 0:
+ return (False, "Fail to create file by dd, cmd: %s" % dd_cmd)
+
+ # only capture the new tcp port after offload setup
+ original_tcp_ports = re.findall("tcp.*:(\d+).*%s" % guest_ip,
+ utils.system_output("/bin/netstat -nap"))
+ for i in original_tcp_ports:
+ tcpdump_cmd += " and not port %s" % i
+ logging.debug("Listen by command: %s" % tcpdump_cmd)
+ session2.sendline(tcpdump_cmd)
+ if not kvm_utils.wait_for(lambda: session.get_command_status(
+ "pgrep tcpdump") == 0, 30):
+ return (False, "Tcpdump process wasn't launched")
+
+ logging.info("Start to transfer file")
+ if not copy_files_fun(filename, filename):
+ return (False, "Child process transfer file failed")
+ logging.info("Transfer file completed")
+ if session.get_command_status("killall tcpdump") != 0:
+ return (False, "Could not kill all tcpdump process")
+ s, tcpdump_string = session2.read_up_to_prompt(timeout=60)
+ if not s:
+ return (False, "Fail to read tcpdump's output")
+
+ if not compare_md5sum(filename):
+ return (False, "Files' md5sum mismatched")
+ return (True, tcpdump_string)
+
+
+ def tx_callback(status="on"):
+ s, o = transfer_file(src="guest")
+ if not s:
+ logging.error(o)
+ return False
+ return True
+
+
+ def rx_callback(status="on"):
+ s, o = transfer_file(src="host")
+ if not s:
+ logging.error(o)
+ return False
+ return True
+
+
+ def so_callback(status="on"):
+ s, o = transfer_file(src="guest")
+ if not s:
+ logging.error(o)
+ return False
+ logging.info("Check if contained large frame")
+ # MTU: default IPv4 MTU is 1500 Bytes, ethernet header is 14 Bytes
+ return (status == "on") ^ (len([i for i in re.findall(
+ "length (\d*):", o) if int(i) > mtu]) == 0)
+
+
+ def ro_callback(status="on"):
+ s, o = transfer_file(src="host")
+ if not s:
+ logging.error(o)
+ return False
+ return True
+
+
+ vm = kvm_test_utils.get_living_vm(env, params.get("main_vm"))
+ session = kvm_test_utils.wait_for_login(vm,
+ timeout=int(params.get("login_timeout", 360)))
+ # Let's just error the test if we identify that there's no ethtool installed
+ if session.get_command_status("ethtool -h"):
+ raise error.TestError("Command ethtool not installed on guest")
+ session2 = kvm_test_utils.wait_for_login(vm,
+ timeout=int(params.get("login_timeout", 360)))
+ mtu = 1514
+ feature_status = {}
+ filename = "/tmp/ethtool.dd"
+ guest_ip = vm.get_address()
+ ethname = kvm_test_utils.get_linux_ifname(session, vm.get_mac_address(0))
+ supported_features = params.get("supported_features").split()
+ test_matrix = {
+ # type:(callback, (dependence), (exclude)
+ "tx": (tx_callback, (), ()),
+ "rx": (rx_callback, (), ()),
+ "sg": (tx_callback, ("tx",), ()),
+ "tso": (so_callback, ("tx", "sg",), ("gso",)),
+ "gso": (so_callback, (), ("tso",)),
+ "gro": (ro_callback, ("rx",), ("lro",)),
+ "lro": (rx_callback, (), ("gro",)),
+ }
+ ethtool_save_params()
+ success = True
+ try:
+ for type in supported_features:
+ callback = test_matrix[type][0]
+ for i in test_matrix[type][2]:
+ if not ethtool_set(i, "off"):
+ logging.error("Fail to disable %s" % i)
+ success = False
+ for i in [f for f in test_matrix[type][1]] + [type]:
+ if not ethtool_set(i, "on"):
+ logging.error("Fail to enable %s" % i)
+ success = False
+ if not callback():
+ raise error.TestFail("Test failed, %s: on" % type)
+
+ if not ethtool_set(type, "off"):
+ logging.error("Fail to disable %s" % type)
+ success = False
+ if not callback(status="off"):
+ raise error.TestFail("Test failed, %s: off" % type)
+ if not success:
+ raise error.TestError("Enable/disable offload function fail")
+ finally:
+ ethtool_restore_params()
+ session.close()
+ session2.close()
diff --git a/client/tests/kvm/tests/file_transfer.py b/client/tests/kvm/tests/file_transfer.py
new file mode 100644
index 0000000..e872bed
--- /dev/null
+++ b/client/tests/kvm/tests/file_transfer.py
@@ -0,0 +1,90 @@
+import logging, commands, re, time, os
+from autotest_lib.client.common_lib import error
+from autotest_lib.client.bin import utils
+import kvm_utils, kvm_test_utils
+
+def run_file_transfer(test, params, env):
+ """
+ Test ethrnet device function by ethtool
+
+ 1) Boot up a VM.
+ 2) Create a large file by dd on host.
+ 3) Copy this file from host to guest.
+ 4) Copy this file from guest to host.
+ 5) Check if file transfers ended good.
+
+ @param test: KVM test object.
+ @param params: Dictionary with the test parameters.
+ @param env: Dictionary with test environment.
+ """
+ vm = kvm_test_utils.get_living_vm(env, params.get("main_vm"))
+ timeout=int(params.get("login_timeout", 360))
+
+ session = kvm_test_utils.wait_for_login(vm, timeout=timeout)
+ if not session:
+ raise error.TestFail("Could not log into guest '%s'" % vm.name)
+
+ dir = test.tmpdir
+ transfer_timeout = int(params.get("transfer_timeout"))
+ transfer_type = params.get("transfer_type")
+ tmp_dir = params.get("tmp_dir", "/tmp/")
+ clean_cmd = params.get("clean_cmd", "rm -f")
+ filesize = int(params.get("filesize", 4000))
+ cmd = "dd if=/dev/urandom of=%s/a.out bs=1M count=%d" % (dir, filesize)
+ guest_path = tmp_dir + "b.out"
+
+ try:
+ logging.info("Creating %dMB file on host", filesize)
+ utils.run(cmd)
+
+ if transfer_type == "remote":
+ logging.info("Transfering file host -> guest, timeout: %ss",
+ transfer_timeout)
+ t_begin = time.time()
+ success = vm.copy_files_to("%s/a.out" % dir, guest_path,
+ timeout=transfer_timeout)
+ t_end = time.time()
+ throughput = filesize / (t_end - t_begin)
+ if not success:
+ raise error.TestFail("Fail to transfer file from host to guest")
+ logging.info("File transfer host -> guest succeed, "
+ "estimated throughput: %.2fMB/s", throughput)
+
+ logging.info("Transfering file guest -> host, timeout: %ss",
+ transfer_timeout)
+ t_begin = time.time()
+ success = vm.copy_files_from(guest_path, "%s/c.out" % dir,
+ timeout=transfer_timeout)
+ t_end = time.time()
+ throughput = filesize / (t_end - t_begin)
+ if not success:
+ raise error.TestFail("Fail to transfer file from guest to host")
+ logging.info("File transfer guest -> host succeed, "
+ "estimated throughput: %.2fMB/s", throughput)
+ else:
+ raise error.TestError("Unknown test file transfer mode %s" %
+ transfer_type)
+
+ for f in ['a.out', 'c.out']:
+ p = os.path.join(dir, f)
+ size = os.path.getsize(p)
+ logging.debug('Size of %s: %sB', f, size)
+
+ md5_orig = utils.hash_file("%s/a.out" % dir, method="md5")
+ md5_new = utils.hash_file("%s/c.out" % dir, method="md5")
+
+ if md5_orig != md5_new:
+ raise error.TestFail("File changed after transfer host -> guest "
+ "and guest -> host")
+
+ finally:
+ logging.info('Cleaning temp file on guest')
+ clean_cmd += " %s" % guest_path
+ s, o = session.get_command_status_output(clean_cmd)
+ if s:
+ logging.warning("Failed to clean remote file %s, output:%s",
+ guest_path, o)
+ logging.info('Cleaning temp files on host')
+ os.remove('%s/a.out' % dir)
+ os.remove('%s/c.out' % dir)
+ session.close()
diff --git a/client/tests/kvm/tests/jumbo.py b/client/tests/kvm/tests/jumbo.py
new file mode 100644
index 0000000..2c91c83
--- /dev/null
+++ b/client/tests/kvm/tests/jumbo.py
@@ -0,0 +1,130 @@
+import logging, commands, random
+from autotest_lib.client.common_lib import error
+from autotest_lib.client.bin import utils
+import kvm_test_utils, kvm_utils
+
+def run_jumbo(test, params, env):
+ """
+ Test the RX jumbo frame function of vnics:
+
+ 1) Boot the VM.
+ 2) Change the MTU of guest nics and host taps depending on the NIC model.
+ 3) Add the static ARP entry for guest NIC.
+ 4) Wait for the MTU ok.
+ 5) Verify the path MTU using ping.
+ 6) Ping the guest with large frames.
+ 7) Increment size ping.
+ 8) Flood ping the guest with large frames.
+ 9) Verify the path MTU.
+ 10) Recover the MTU.
+
+ @param test: KVM test object.
+ @param params: Dictionary with the test parameters.
+ @param env: Dictionary with test environment.
+ """
+
+ vm = kvm_test_utils.get_living_vm(env, params.get("main_vm"))
+ session = kvm_test_utils.wait_for_login(vm)
+ mtu = params.get("mtu", "1500")
+ flood_time = params.get("flood_time", "300")
+ max_icmp_pkt_size = int(mtu) - 28
+
+ ifname = vm.get_ifname(0)
+ ip = vm.get_address(0)
+ if ip is None:
+ raise error.TestError("Could not get the IP address")
+
+ try:
+ # Environment preparation
+ ethname = kvm_test_utils.get_linux_ifname(session, vm.get_mac_address(0))
+
+ logging.info("Changing the MTU of guest ...")
+ guest_mtu_cmd = "ifconfig %s mtu %s" % (ethname , mtu)
+ s, o = session.get_command_status_output(guest_mtu_cmd)
+ if s != 0:
+ logging.error(o)
+ raise error.TestError("Fail to set the MTU of guest NIC: %s" %
+ ethname)
+
+ logging.info("Chaning the MTU of host tap ...")
+ host_mtu_cmd = "ifconfig %s mtu %s" % (ifname, mtu)
+ utils.run(host_mtu_cmd)
+
+ logging.info("Add a temporary static ARP entry ...")
+ arp_add_cmd = "arp -s %s %s -i %s" % (ip, vm.get_mac_address(0), ifname)
+ utils.run(arp_add_cmd)
+
+ def is_mtu_ok():
+ s, o = kvm_test_utils.ping(ip, 1, interface=ifname,
+ packetsize=max_icmp_pkt_size,
+ hint="do", timeout=2)
+ return s == 0
+
+ def verify_mtu():
+ logging.info("Verify the path MTU")
+ s, o = kvm_test_utils.ping(ip, 10, interface=ifname,
+ packetsize=max_icmp_pkt_size,
+ hint="do", timeout=15)
+ if s != 0 :
+ logging.error(o)
+ raise error.TestFail("Path MTU is not as expected")
+ if kvm_test_utils.get_loss_ratio(o) != 0:
+ logging.error(o)
+ raise error.TestFail("Packet loss ratio during MTU "
+ "verification is not zero")
+
+ def flood_ping():
+ logging.info("Flood with large frames")
+ kvm_test_utils.ping(ip, interface=ifname,
+ packetsize=max_icmp_pkt_size,
+ flood=True, timeout=float(flood_time))
+
+ def large_frame_ping(count=100):
+ logging.info("Large frame ping")
+ s, o = kvm_test_utils.ping(ip, count, interface=ifname,
+ packetsize=max_icmp_pkt_size,
+ timeout=float(count) * 2)
+ ratio = kvm_test_utils.get_loss_ratio(o)
+ if ratio != 0:
+ raise error.TestFail("Loss ratio of large frame ping is %s" %
+ ratio)
+
+ def size_increase_ping(step=random.randrange(90, 110)):
+ logging.info("Size increase ping")
+ for size in range(0, max_icmp_pkt_size + 1, step):
+ logging.info("Ping %s with size %s" % (ip, size))
+ s, o = kvm_test_utils.ping(ip, 1, interface=ifname,
+ packetsize=size,
+ hint="do", timeout=1)
+ if s != 0:
+ s, o = kvm_test_utils.ping(ip, 10, interface=ifname,
+ packetsize=size,
+ adaptive=True, hint="do",
+ timeout=20)
+
+ if kvm_test_utils.get_loss_ratio(o) > int(params.get(
+ "fail_ratio", 50)):
+ raise error.TestFail("Ping loss ratio is greater "
+ "than 50% for size %s" % size)
+
+ logging.info("Waiting for the MTU to be OK")
+ wait_mtu_ok = 10
+ if not kvm_utils.wait_for(is_mtu_ok, wait_mtu_ok, 0, 1):
+ logging.debug(commands.getoutput("ifconfig -a"))
+ raise error.TestError("MTU is not as expected even after %s "
+ "seconds" % wait_mtu_ok)
+
+ # Functional Test
+ verify_mtu()
+ large_frame_ping()
+ size_increase_ping()
+
+ # Stress test
+ flood_ping()
+ verify_mtu()
+
+ finally:
+ # Environment clean
+ session.close()
+ logging.info("Removing the temporary ARP entry")
+ utils.run("arp -d %s -i %s" % (ip, ifname))
diff --git a/client/tests/kvm/tests/mac_change.py b/client/tests/kvm/tests/mac_change.py
new file mode 100644
index 0000000..c614e15
--- /dev/null
+++ b/client/tests/kvm/tests/mac_change.py
@@ -0,0 +1,65 @@
+import logging
+from autotest_lib.client.common_lib import error
+import kvm_utils, kvm_test_utils
+
+
+def run_mac_change(test, params, env):
+ """
+ Change MAC address of guest.
+
+ 1) Get a new mac from pool, and the old mac addr of guest.
+ 2) Set new mac in guest and regain new IP.
+ 3) Re-log into guest with new MAC.
+
+ @param test: KVM test object.
+ @param params: Dictionary with the test parameters.
+ @param env: Dictionary with test environment.
+ """
+ timeout = int(params.get("login_timeout", 360))
+ vm = kvm_test_utils.get_living_vm(env, params.get("main_vm"))
+ logging.info("Trying to log into guest '%s' by serial", vm.name)
+ session = kvm_utils.wait_for(lambda: vm.serial_login(),
+ timeout, 0, step=2)
+ if not session:
+ raise error.TestFail("Could not log into guest '%s'" % vm.name)
+
+ old_mac = vm.get_mac_address(0)
+ while True:
+ vm.free_mac_address(0)
+ new_mac = kvm_utils.generate_mac_address(vm.instance, 0)
+ if old_mac != new_mac:
+ break
+ logging.info("The initial MAC address is %s", old_mac)
+ interface = kvm_test_utils.get_linux_ifname(session, old_mac)
+ # Start change MAC address
+ logging.info("Changing MAC address to %s", new_mac)
+ change_cmd = ("ifconfig %s down && ifconfig %s hw ether %s && "
+ "ifconfig %s up" % (interface, interface, new_mac, interface))
+ if session.get_command_status(change_cmd) != 0:
+ raise error.TestFail("Fail to send mac_change command")
+
+ # Verify whether MAC address was changed to the new one
+ logging.info("Verifying the new mac address")
+ if session.get_command_status("ifconfig | grep -i %s" % new_mac) != 0:
+ raise error.TestFail("Fail to change MAC address")
+
+ # Restart `dhclient' to regain IP for new mac address
+ logging.info("Restart the network to gain new IP")
+ dhclient_cmd = "dhclient -r && dhclient %s" % interface
+ session.sendline(dhclient_cmd)
+
+ # Re-log into the guest after changing mac address
+ if kvm_utils.wait_for(session.is_responsive, 120, 20, 3):
+ # Just warning when failed to see the session become dead,
+ # because there is a little chance the ip does not change.
+ logging.warn("The session is still responsive, settings may fail.")
+ session.close()
+
+ # Re-log into guest and check if session is responsive
+ logging.info("Re-log into the guest")
+ session = kvm_test_utils.wait_for_login(vm,
+ timeout=int(params.get("login_timeout", 360)))
+ if not session.is_responsive():
+ raise error.TestFail("The new session is not responsive.")
+
+ session.close()
diff --git a/client/tests/kvm/tests/multicast.py b/client/tests/kvm/tests/multicast.py
new file mode 100644
index 0000000..a47779a
--- /dev/null
+++ b/client/tests/kvm/tests/multicast.py
@@ -0,0 +1,91 @@
+import logging, os, re
+from autotest_lib.client.common_lib import error
+from autotest_lib.client.bin import utils
+import kvm_test_utils
+
+
+def run_multicast(test, params, env):
+ """
+ Test multicast function of nic (rtl8139/e1000/virtio)
+
+ 1) Create a VM.
+ 2) Join guest into multicast groups.
+ 3) Ping multicast addresses on host.
+ 4) Flood ping test with different size of packets.
+ 5) Final ping test and check if lose packet.
+
+ @param test: KVM test object.
+ @param params: Dictionary with the test parameters.
+ @param env: Dictionary with test environment.
+ """
+ vm = kvm_test_utils.get_living_vm(env, params.get("main_vm"))
+ session = kvm_test_utils.wait_for_login(vm,
+ timeout=int(params.get("login_timeout", 360)))
+
+ def run_guest(cmd):
+ s, o = session.get_command_status_output(cmd)
+ if s:
+ logging.warning('Command %s executed in guest returned exit code '
+ '%s, output: %s', cmd, s, o.strip())
+
+ def run_host_guest(cmd):
+ run_guest(cmd)
+ utils.system(cmd, ignore_status=True)
+
+ # flush the firewall rules
+ cmd_flush = "iptables -F"
+ cmd_selinux = ("if [ -e /selinux/enforce ]; then setenforce 0; "
+ "else echo 'no /selinux/enforce file present'; fi")
+ run_host_guest(cmd_flush)
+ run_host_guest(cmd_selinux)
+ # make sure guest replies to broadcasts
+ cmd_broadcast = "echo 0 > /proc/sys/net/ipv4/icmp_echo_ignore_broadcasts"
+ cmd_broadcast_2 = "echo 0 > /proc/sys/net/ipv4/icmp_echo_ignore_all"
+ run_guest(cmd_broadcast)
+ run_guest(cmd_broadcast_2)
+
+ # base multicast address
+ mcast = params.get("mcast", "225.0.0.1")
+ # count of multicast addresses, less than 20
+ mgroup_count = int(params.get("mgroup_count", 5))
+ flood_minutes = float(params.get("flood_minutes", 10))
+ ifname = vm.get_ifname()
+ prefix = re.findall("\d+.\d+.\d+", mcast)[0]
+ suffix = int(re.findall("\d+", mcast)[-1])
+ # copy python script to guest for joining guest to multicast groups
+ mcast_path = os.path.join(test.bindir, "scripts/join_mcast.py")
+ if not vm.copy_files_to(mcast_path, "/tmp"):
+ raise error.TestError("Fail to copy %s to guest" % mcast_path)
+ output = session.get_command_output("python /tmp/join_mcast.py %d %s %d" %
+ (mgroup_count, prefix, suffix))
+
+ # if success to join multicast, the process will be paused, and return PID.
+ try:
+ pid = re.findall("join_mcast_pid:(\d+)", output)[0]
+ except IndexError:
+ raise error.TestFail("Can't join multicast groups,output:%s" % output)
+
+ try:
+ for i in range(mgroup_count):
+ new_suffix = suffix + i
+ mcast = "%s.%d" % (prefix, new_suffix)
+
+ logging.info("Initial ping test, mcast: %s", mcast)
+ s, o = kvm_test_utils.ping(mcast, 10, interface=ifname, timeout=20)
+ if s != 0:
+ raise error.TestFail(" Ping return non-zero value %s" % o)
+
+ logging.info("Flood ping test, mcast: %s", mcast)
+ kvm_test_utils.ping(mcast, None, interface=ifname, flood=True,
+ output_func=None, timeout=flood_minutes*60)
+
+ logging.info("Final ping test, mcast: %s", mcast)
+ s, o = kvm_test_utils.ping(mcast, 10, interface=ifname, timeout=20)
+ if s != 0:
+ raise error.TestFail("Ping failed, status: %s, output: %s" %
+ (s, o))
+
+ finally:
+ logging.debug(session.get_command_output("ipmaddr show"))
+ session.get_command_output("kill -s SIGCONT %s" % pid)
+ session.close()
diff --git a/client/tests/kvm/tests/netperf.py b/client/tests/kvm/tests/netperf.py
new file mode 100644
index 0000000..dc21e0f
--- /dev/null
+++ b/client/tests/kvm/tests/netperf.py
@@ -0,0 +1,70 @@
+import logging, commands, os
+from autotest_lib.client.common_lib import error
+from autotest_lib.client.bin import utils
+import kvm_test_utils
+
+def run_netperf(test, params, env):
+ """
+ Network stress test with netperf.
+
+ 1) Boot up a VM.
+ 2) Launch netserver on guest.
+ 3) Execute netperf client on host with different protocols.
+ 4) Output the test result.
+
+ @param test: KVM test object.
+ @param params: Dictionary with the test parameters.
+ @param env: Dictionary with test environment.
+ """
+ vm = kvm_test_utils.get_living_vm(env, params.get("main_vm"))
+ login_timeout = int(params.get("login_timeout", 360))
+ session = kvm_test_utils.wait_for_login(vm, timeout=login_timeout)
+
+ netperf_dir = os.path.join(os.environ['AUTODIR'], "tests/netperf2")
+ setup_cmd = params.get("setup_cmd")
+ guest_ip = vm.get_address()
+ result_file = os.path.join(test.resultsdir, "output_%s" % test.iteration)
+
+ firewall_flush = "iptables -F"
+ session.get_command_output(firewall_flush)
+
+ for i in params.get("netperf_files").split():
+ if not vm.copy_files_to(os.path.join(netperf_dir, i), "/tmp"):
+ raise error.TestError("Could not copy file %s to guest" % i)
+
+ if session.get_command_status(firewall_flush):
+ logging.warning("Could not flush firewall rules on guest")
+
+ if session.get_command_status(setup_cmd % "/tmp", timeout=200):
+ raise error.TestFail("Fail to setup netperf on guest")
+
+ if session.get_command_status(params.get("netserver_cmd") % "/tmp"):
+ raise error.TestFail("Fail to start netperf server on guest")
+
+ try:
+ logging.info("Setup and run netperf client on host")
+ utils.run(setup_cmd % netperf_dir)
+ list_fail = []
+ result = open(result_file, "w")
+ result.write("Netperf test results\n")
+
+ for i in params.get("protocols").split():
+ cmd = params.get("netperf_cmd") % (netperf_dir, i, guest_ip)
+ logging.info("Netperf: protocol %s", i)
+ try:
+ netperf_output = utils.system_output(cmd,
+ retain_output=True)
+ result.write("%s\n" % netperf_output)
+ except:
+ logging.error("Test of protocol %s failed", i)
+ list_fail.append(i)
+
+ result.close()
+
+ if list_fail:
+ raise error.TestFail("Some netperf tests failed: %s" %
+ ", ".join(list_fail))
+
+ finally:
+ session.get_command_output("killall netserver")
+ session.close()
diff --git a/client/tests/kvm/tests/nic_promisc.py b/client/tests/kvm/tests/nic_promisc.py
new file mode 100644
index 0000000..99bbf8c
--- /dev/null
+++ b/client/tests/kvm/tests/nic_promisc.py
@@ -0,0 +1,103 @@
+import logging
+from autotest_lib.client.common_lib import error
+from autotest_lib.client.bin import utils
+import kvm_utils, kvm_test_utils
+
+def run_nic_promisc(test, params, env):
+ """
+ Test nic driver in promisc mode:
+
+ 1) Boot up a VM.
+ 2) Repeatedly enable/disable promiscuous mode in guest.
+ 3) TCP data transmission from host to guest, and from guest to host,
+ with 1/1460/65000/100000000 bytes payloads.
+ 4) Clean temporary files.
+ 5) Stop enable/disable promiscuous mode change.
+
+ @param test: KVM test object.
+ @param params: Dictionary with the test parameters.
+ @param env: Dictionary with test environment.
+ """
+ timeout = int(params.get("login_timeout", 360))
+ vm = kvm_test_utils.get_living_vm(env, params.get("main_vm"))
+ session = kvm_test_utils.wait_for_login(vm, timeout=timeout)
+
+ logging.info("Trying to log into guest '%s' by serial", vm.name)
+ session2 = kvm_utils.wait_for(lambda: vm.serial_login(),
+ timeout, 0, step=2)
+ if not session2:
+ raise error.TestFail("Could not log into guest '%s'" % vm.name)
+
+ def compare(filename):
+ cmd = "md5sum %s" % filename
+ md5_host = utils.hash_file(filename, method="md5")
+ rc_guest, md5_guest = session.get_command_status_output(cmd)
+ if rc_guest:
+ logging.debug("Could not get MD5 hash for file %s on guest,"
+ "output: %s", filename, md5_guest)
+ return False
+ md5_guest = md5_guest.split()[0]
+ if md5_host != md5_guest:
+ logging.error("MD5 hash mismatch between file %s "
+ "present on guest and on host", filename)
+ logging.error("MD5 hash for file on guest: %s,"
+ "MD5 hash for file on host: %s", md5_host, md5_guest)
+ return False
+ return True
+
+ ethname = kvm_test_utils.get_linux_ifname(session, vm.get_mac_address(0))
+ set_promisc_cmd = ("ip link set %s promisc on; sleep 0.01;"
+ "ip link set %s promisc off; sleep 0.01" %
+ (ethname, ethname))
+ logging.info("Set promisc change repeatedly in guest")
+ session2.sendline("while true; do %s; done" % set_promisc_cmd)
+
+ dd_cmd = "dd if=/dev/urandom of=%s bs=%d count=1"
+ filename = "/tmp/nic_promisc_file"
+ file_size = params.get("file_size", "1, 1460, 65000, 100000000").split(",")
+ success_counter = 0
+ try:
+ for size in file_size:
+ logging.info("Create %s bytes file on host" % size)
+ utils.run(dd_cmd % (filename, int(size)))
+
+ logging.info("Transfer file from host to guest")
+ if not vm.copy_files_to(filename, filename):
+ logging.error("File transfer failed")
+ continue
+ if not compare(filename):
+ logging.error("Compare file failed")
+ continue
+ else:
+ success_counter += 1
+
+ logging.info("Create %s bytes file on guest" % size)
+ if session.get_command_status(dd_cmd % (filename, int(size)),
+ timeout=100) != 0:
+ logging.error("Create file on guest failed")
+ continue
+
+ logging.info("Transfer file from guest to host")
+ if not vm.copy_files_from(filename, filename):
+ logging.error("File transfer failed")
+ continue
+ if not compare(filename):
+ logging.error("Compare file failed")
+ continue
+ else:
+ success_counter += 1
+
+ logging.info("Clean temporary files")
+ cmd = "rm -f %s" % filename
+ utils.run(cmd)
+ session.get_command_status(cmd)
+
+ finally:
+ logging.info("Restore the %s to the nonpromisc mode", ethname)
+ session2.close()
+ session.get_command_status("ip link set %s promisc off" % ethname)
+ session.close()
+
+ if success_counter != 2 * len(file_size):
+ raise error.TestFail("Some tests failed, succss_ratio : %s/%s" %
+ (success_counter, len(file_size)))
diff --git a/client/tests/kvm/tests/nicdriver_unload.py b/client/tests/kvm/tests/nicdriver_unload.py
new file mode 100644
index 0000000..47318ba
--- /dev/null
+++ b/client/tests/kvm/tests/nicdriver_unload.py
@@ -0,0 +1,115 @@
+import logging, threading, os
+from autotest_lib.client.common_lib import error
+from autotest_lib.client.bin import utils
+import kvm_utils, kvm_test_utils
+
+def run_nicdriver_unload(test, params, env):
+ """
+ Test nic driver.
+
+ 1) Boot a VM.
+ 2) Get the NIC driver name.
+ 3) Repeatedly unload/load NIC driver.
+ 4) Multi-session TCP transfer on test interface.
+ 5) Check whether the test interface should still work.
+
+ @param test: KVM test object.
+ @param params: Dictionary with the test parameters.
+ @param env: Dictionary with test environment.
+ """
+ timeout = int(params.get("login_timeout", 360))
+ vm = kvm_test_utils.get_living_vm(env, params.get("main_vm"))
+ session = kvm_test_utils.wait_for_login(vm, timeout=timeout)
+ logging.info("Trying to log into guest '%s' by serial", vm.name)
+ session2 = kvm_utils.wait_for(lambda: vm.serial_login(),
+ timeout, 0, step=2)
+ if not session2:
+ raise error.TestFail("Could not log into guest '%s'" % vm.name)
+
+ ethname = kvm_test_utils.get_linux_ifname(session, vm.get_mac_address(0))
+ sys_path = "/sys/class/net/%s/device/driver" % (ethname)
+ s, o = session.get_command_status_output('readlink -e %s' % sys_path)
+ if s:
+ raise error.TestError("Could not find driver name")
+ driver = os.path.basename(o.strip())
+ logging.info("driver is %s", driver)
+
+ class ThreadScp(threading.Thread):
+ def run(self):
+ remote_file = '/tmp/' + self.getName()
+ file_list.append(remote_file)
+ ret = vm.copy_files_to(file_name, remote_file, timeout=scp_timeout)
+ if ret:
+ logging.debug("File %s was transfered successfuly", remote_file)
+ else:
+ logging.debug("Failed to transfer file %s", remote_file)
+
+ def compare(origin_file, receive_file):
+ cmd = "md5sum %s"
+ check_sum1 = utils.hash_file(origin_file, method="md5")
+ s, output2 = session.get_command_status_output(cmd % receive_file)
+ if s != 0:
+ logging.error("Could not get md5sum of receive_file")
+ return False
+ check_sum2 = output2.strip().split()[0]
+ logging.debug("original file md5: %s, received file md5: %s",
+ check_sum1, check_sum2)
+ if check_sum1 != check_sum2:
+ logging.error("MD5 hash of origin and received files doesn't match")
+ return False
+ return True
+
+ #produce sized file in host
+ file_size = params.get("file_size")
+ file_name = "/tmp/nicdriver_unload_file"
+ cmd = "dd if=/dev/urandom of=%s bs=%sM count=1"
+ utils.system(cmd % (file_name, file_size))
+
+ file_list = []
+ connect_time = params.get("connect_time")
+ scp_timeout = int(params.get("scp_timeout"))
+ thread_num = int(params.get("thread_num"))
+ unload_load_cmd = ("sleep %s && ifconfig %s down && modprobe -r %s && "
+ "sleep 1 && modprobe %s && sleep 4 && ifconfig %s up" %
+ (connect_time, ethname, driver, driver, ethname))
+ pid = os.fork()
+ if pid != 0:
+ logging.info("Unload/load NIC driver repeatedly in guest...")
+ while True:
+ logging.debug("Try to unload/load nic drive once")
+ if session2.get_command_status(unload_load_cmd, timeout=120) != 0:
+ session.get_command_output("rm -rf /tmp/Thread-*")
+ raise error.TestFail("Unload/load nic driver failed")
+ pid, s = os.waitpid(pid, os.WNOHANG)
+ status = os.WEXITSTATUS(s)
+ if (pid, status) != (0, 0):
+ logging.debug("Child process ending")
+ break
+ else:
+ logging.info("Multi-session TCP data transfer")
+ threads = []
+ for i in range(thread_num):
+ t = ThreadScp()
+ t.start()
+ threads.append(t)
+ for t in threads:
+ t.join(timeout = scp_timeout)
+ os._exit(0)
+
+ session2.close()
+
+ try:
+ logging.info("Check MD5 hash for received files in multi-session")
+ for f in file_list:
+ if not compare(file_name, f):
+ raise error.TestFail("Fail to compare (guest) file %s" % f)
+
+ logging.info("Test nic function after load/unload")
+ if not vm.copy_files_to(file_name, file_name):
+ raise error.TestFail("Fail to copy file from host to guest")
+ if not compare(file_name, file_name):
+ raise error.TestFail("Test nic function after load/unload fail")
+
+ finally:
+ session.get_command_output("rm -rf /tmp/Thread-*")
+ session.close()
diff --git a/client/tests/kvm/tests/physical_resources_check.py b/client/tests/kvm/tests/physical_resources_check.py
index 0f7cab3..682c7b2 100644
--- a/client/tests/kvm/tests/physical_resources_check.py
+++ b/client/tests/kvm/tests/physical_resources_check.py
@@ -123,9 +123,9 @@
found_mac_addresses = re.findall("macaddr=(\S+)", o)
logging.debug("Found MAC adresses: %s" % found_mac_addresses)
- for nic_name in kvm_utils.get_sub_dict_names(params, "nics"):
- nic_params = kvm_utils.get_sub_dict(params, nic_name)
- mac, ip = kvm_utils.get_mac_ip_pair_from_dict(nic_params)
+ num_nics = len(kvm_utils.get_sub_dict_names(params, "nics"))
+ for nic_index in range(num_nics):
+ mac = vm.get_mac_address(nic_index)
if not string.lower(mac) in found_mac_addresses:
n_fail += 1
logging.error("MAC address mismatch:")
diff --git a/client/tests/kvm/tests/ping.py b/client/tests/kvm/tests/ping.py
new file mode 100644
index 0000000..9b2308f
--- /dev/null
+++ b/client/tests/kvm/tests/ping.py
@@ -0,0 +1,72 @@
+import logging
+from autotest_lib.client.common_lib import error
+import kvm_test_utils
+
+
+def run_ping(test, params, env):
+ """
+ Ping the guest with different size of packets.
+
+ Packet Loss Test:
+ 1) Ping the guest with different size/interval of packets.
+
+ Stress Test:
+ 1) Flood ping the guest.
+ 2) Check if the network is still usable.
+
+ @param test: KVM test object.
+ @param params: Dictionary with the test parameters.
+ @param env: Dictionary with test environment.
+ """
+
+ vm = kvm_test_utils.get_living_vm(env, params.get("main_vm"))
+ session = kvm_test_utils.wait_for_login(vm)
+
+ counts = params.get("ping_counts", 100)
+ flood_minutes = float(params.get("flood_minutes", 10))
+ nics = params.get("nics").split()
+ strict_check = params.get("strict_check", "no") == "yes"
+
+ packet_size = [0, 1, 4, 48, 512, 1440, 1500, 1505, 4054, 4055, 4096, 4192,
+ 8878, 9000, 32767, 65507]
+
+ try:
+ for i, nic in enumerate(nics):
+ ip = vm.get_address(i)
+ if not ip:
+ logging.error("Could not get the ip of nic index %d", i)
+ continue
+
+ for size in packet_size:
+ logging.info("Ping with packet size %s", size)
+ status, output = kvm_test_utils.ping(ip, 10,
+ packetsize=size,
+ timeout=20)
+ if strict_check:
+ ratio = kvm_test_utils.get_loss_ratio(output)
+ if ratio != 0:
+ raise error.TestFail("Loss ratio is %s for packet size"
+ " %s" % (ratio, size))
+ else:
+ if status != 0:
+ raise error.TestFail("Ping failed, status: %s,"
+ " output: %s" % (status, output))
+
+ logging.info("Flood ping test")
+ kvm_test_utils.ping(ip, None, flood=True, output_func=None,
+ timeout=flood_minutes * 60)
+
+ logging.info("Final ping test")
+ status, output = kvm_test_utils.ping(ip, counts,
+ timeout=float(counts) * 1.5)
+ if strict_check:
+ ratio = kvm_test_utils.get_loss_ratio(output)
+ if ratio != 0:
+ raise error.TestFail("Ping failed, status: %s,"
+ " output: %s" % (status, output))
+ else:
+ if status != 0:
+ raise error.TestFail("Ping returns non-zero value %s" %
+ output)
+ finally:
+ session.close()
diff --git a/client/tests/kvm/tests/pxe.py b/client/tests/kvm/tests/pxe.py
new file mode 100644
index 0000000..ec9a549
--- /dev/null
+++ b/client/tests/kvm/tests/pxe.py
@@ -0,0 +1,31 @@
+import logging
+from autotest_lib.client.common_lib import error
+import kvm_subprocess, kvm_test_utils
+
+
+def run_pxe(test, params, env):
+ """
+ PXE test:
+
+ 1) Snoop the tftp packet in the tap device.
+ 2) Wait for some seconds.
+ 3) Check whether we could capture TFTP packets.
+
+ @param test: KVM test object.
+ @param params: Dictionary with the test parameters.
+ @param env: Dictionary with test environment.
+ """
+ vm = kvm_test_utils.get_living_vm(env, params.get("main_vm"))
+ timeout = int(params.get("pxe_timeout", 60))
+
+ logging.info("Try to boot from PXE")
+ status, output = kvm_subprocess.run_fg("tcpdump -nli %s" % vm.get_ifname(),
+ logging.debug,
+ "(pxe capture) ",
+ timeout)
+
+ logging.info("Analyzing the tcpdump result...")
+ if not "tftp" in output:
+ raise error.TestFail("Couldn't find any TFTP packets after %s seconds" %
+ timeout)
+ logging.info("Found TFTP packet")
diff --git a/client/tests/kvm/tests/qmp_basic.py b/client/tests/kvm/tests/qmp_basic.py
new file mode 100644
index 0000000..985ad15
--- /dev/null
+++ b/client/tests/kvm/tests/qmp_basic.py
@@ -0,0 +1,395 @@
+import kvm_test_utils
+from autotest_lib.client.common_lib import error
+
+def run_qmp_basic(test, params, env):
+ """
+ QMP Specification test-suite: this checks if the *basic* protocol conforms
+ to its specification, which is file QMP/qmp-spec.txt in QEMU's source tree.
+
+ IMPORTANT NOTES:
+
+ o Most tests depend heavily on QMP's error information (eg. classes),
+ this might have bad implications as the error interface is going to
+ change in QMP
+
+ o Command testing is *not* covered in this suite. Each command has its
+ own specification and should be tested separately
+
+ o We use the same terminology as used by the QMP specification,
+ specially with regard to JSON types (eg. a Python dict is called
+ a json-object)
+
+ o This is divided in sub test-suites, please check the bottom of this
+ file to check the order in which they are run
+
+ TODO:
+
+ o Finding which test failed is not as easy as it should be
+
+ o Are all those check_*() functions really needed? Wouldn't a
+ specialized class (eg. a Response class) do better?
+ """
+ def fail_no_key(qmp_dict, key):
+ if not isinstance(qmp_dict, dict):
+ raise error.TestFail("qmp_dict is not a dict (it's '%s')" %
+ type(qmp_dict))
+ if not key in qmp_dict:
+ raise error.TestFail("'%s' key doesn't exist in dict ('%s')" %
+ (key, str(qmp_dict)))
+
+
+ def check_dict_key(qmp_dict, key, keytype):
+ """
+ Performs the following checks on a QMP dict key:
+
+ 1. qmp_dict is a dict
+ 2. key exists in qmp_dict
+ 3. key is of type keytype
+
+ If any of these checks fails, error.TestFail is raised.
+ """
+ fail_no_key(qmp_dict, key)
+ if not isinstance(qmp_dict[key], keytype):
+ raise error.TestFail("'%s' key is not of type '%s', it's '%s'" %
+ (key, keytype, type(qmp_dict[key])))
+
+
+ def check_key_is_dict(qmp_dict, key):
+ check_dict_key(qmp_dict, key, dict)
+
+
+ def check_key_is_list(qmp_dict, key):
+ check_dict_key(qmp_dict, key, list)
+
+
+ def check_key_is_str(qmp_dict, key):
+ check_dict_key(qmp_dict, key, unicode)
+
+
+ def check_str_key(qmp_dict, keyname, value=None):
+ check_dict_key(qmp_dict, keyname, unicode)
+ if value and value != qmp_dict[keyname]:
+ raise error.TestFail("'%s' key value '%s' should be '%s'" %
+ (keyname, str(qmp_dict[keyname]), str(value)))
+
+
+ def check_key_is_int(qmp_dict, key):
+ fail_no_key(qmp_dict, key)
+ try:
+ value = int(qmp_dict[key])
+ except:
+ raise error.TestFail("'%s' key is not of type int, it's '%s'" %
+ (key, type(qmp_dict[key])))
+
+
+ def check_bool_key(qmp_dict, keyname, value=None):
+ check_dict_key(qmp_dict, keyname, bool)
+ if value and value != qmp_dict[keyname]:
+ raise error.TestFail("'%s' key value '%s' should be '%s'" %
+ (keyname, str(qmp_dict[keyname]), str(value)))
+
+
+ def check_success_resp(resp, empty=False):
+ """
+ Check QMP OK response.
+
+ @param resp: QMP response
+ @param empty: if True, response should not contain data to return
+ """
+ check_key_is_dict(resp, "return")
+ if empty and len(resp["return"]) > 0:
+ raise error.TestFail("success response is not empty ('%s')" %
+ str(resp))
+
+
+ def check_error_resp(resp, classname=None, datadict=None):
+ """
+ Check QMP error response.
+
+ @param resp: QMP response
+ @param classname: Expected error class name
+ @param datadict: Expected error data dictionary
+ """
+ check_key_is_dict(resp, "error")
+ check_key_is_str(resp["error"], "class")
+ if classname and resp["error"]["class"] != classname:
+ raise error.TestFail("got error class '%s' expected '%s'" %
+ (resp["error"]["class"], classname))
+ check_key_is_dict(resp["error"], "data")
+ if datadict and resp["error"]["data"] != datadict:
+ raise error.TestFail("got data dict '%s' expected '%s'" %
+ (resp["error"]["data"], datadict))
+
+
+ def test_version(version):
+ """
+ Check the QMP greeting message version key which, according to QMP's
+ documentation, should be:
+
+ { "qemu": { "major": json-int, "minor": json-int, "micro": json-int }
+ "package": json-string }
+ """
+ check_key_is_dict(version, "qemu")
+ for key in [ "major", "minor", "micro" ]:
+ check_key_is_int(version["qemu"], key)
+ check_key_is_str(version, "package")
+
+
+ def test_greeting(greeting):
+ check_key_is_dict(greeting, "QMP")
+ check_key_is_dict(greeting["QMP"], "version")
+ check_key_is_list(greeting["QMP"], "capabilities")
+
+
+ def greeting_suite(monitor):
+ """
+ Check the greeting message format, as described in the QMP
+ specfication section '2.2 Server Greeting'.
+
+ { "QMP": { "version": json-object, "capabilities": json-array } }
+ """
+ greeting = monitor.get_greeting()
+ test_greeting(greeting)
+ test_version(greeting["QMP"]["version"])
+
+
+ def json_parsing_errors_suite(monitor):
+ """
+ Check that QMP's parser is able to recover from parsing errors, please
+ check the JSON spec for more info on the JSON syntax (RFC 4627).
+ """
+ # We're quite simple right now and the focus is on parsing errors that
+ # have already biten us in the past.
+ #
+ # TODO: The following test-cases are missing:
+ #
+ # - JSON numbers, strings and arrays
+ # - More invalid characters or malformed structures
+ # - Valid, but not obvious syntax, like zillion of spaces or
+ # strings with unicode chars (different suite maybe?)
+ bad_json = []
+
+ # A JSON value MUST be an object, array, number, string, true, false,
+ # or null
+ #
+ # NOTE: QMP seems to ignore a number of chars, like: | and ?
+ bad_json.append(":")
+ bad_json.append(",")
+
+ # Malformed json-objects
+ #
+ # NOTE: sending only "}" seems to break QMP
+ # NOTE: Duplicate keys are accepted (should it?)
+ bad_json.append("{ \"execute\" }")
+ bad_json.append("{ \"execute\": \"query-version\", }")
+ bad_json.append("{ 1: \"query-version\" }")
+ bad_json.append("{ true: \"query-version\" }")
+ bad_json.append("{ []: \"query-version\" }")
+ bad_json.append("{ {}: \"query-version\" }")
+
+ for cmd in bad_json:
+ resp = monitor.cmd_raw(cmd)
+ check_error_resp(resp, "JSONParsing")
+
+
+ def test_id_key(monitor):
+ """
+ Check that QMP's "id" key is correctly handled.
+ """
+ # The "id" key must be echoed back in error responses
+ id = "kvm-autotest"
+ resp = monitor.cmd_qmp("eject", { "foobar": True }, id=id)
+ check_error_resp(resp)
+ check_str_key(resp, "id", id)
+
+ # The "id" key must be echoed back in success responses
+ resp = monitor.cmd_qmp("query-status", id=id)
+ check_success_resp(resp)
+ check_str_key(resp, "id", id)
+
+ # The "id" key can be any json-object
+ for id in [ True, 1234, "string again!", [1, [], {}, True, "foo"],
+ { "key": {} } ]:
+ resp = monitor.cmd_qmp("query-status", id=id)
+ check_success_resp(resp)
+ if resp["id"] != id:
+ raise error.TestFail("expected id '%s' but got '%s'" %
+ (str(id), str(resp["id"])))
+
+
+ def test_invalid_arg_key(monitor):
+ """
+ Currently, the only supported keys in the input object are: "execute",
+ "arguments" and "id". Although expansion is supported, invalid key
+ names must be detected.
+ """
+ resp = monitor.cmd_obj({ "execute": "eject", "foobar": True })
+ check_error_resp(resp, "QMPExtraInputObjectMember",
+ { "member": "foobar" })
+
+
+ def test_bad_arguments_key_type(monitor):
+ """
+ The "arguments" key must be an json-object.
+
+ We use the eject command to perform the tests, but that's a random
+ choice, any command that accepts arguments will do, as the command
+ doesn't get called.
+ """
+ for item in [ True, [], 1, "foo" ]:
+ resp = monitor.cmd_obj({ "execute": "eject", "arguments": item })
+ check_error_resp(resp, "QMPBadInputObjectMember",
+ { "member": "arguments", "expected": "object" })
+
+
+ def test_bad_execute_key_type(monitor):
+ """
+ The "execute" key must be a json-string.
+ """
+ for item in [ False, 1, {}, [] ]:
+ resp = monitor.cmd_obj({ "execute": item })
+ check_error_resp(resp, "QMPBadInputObjectMember",
+ { "member": "execute", "expected": "string" })
+
+
+ def test_no_execute_key(monitor):
+ """
+ The "execute" key must exist, we also test for some stupid parsing
+ errors.
+ """
+ for cmd in [ {}, { "execut": "qmp_capabilities" },
+ { "executee": "qmp_capabilities" }, { "foo": "bar" }]:
+ resp = monitor.cmd_obj(cmd)
+ check_error_resp(resp) # XXX: check class and data dict?
+
+
+ def test_bad_input_obj_type(monitor):
+ """
+ The input object must be... an json-object.
+ """
+ for cmd in [ "foo", [], True, 1 ]:
+ resp = monitor.cmd_obj(cmd)
+ check_error_resp(resp, "QMPBadInputObject", { "expected":"object" })
+
+
+ def test_good_input_obj(monitor):
+ """
+ Basic success tests for issuing QMP commands.
+ """
+ # NOTE: We don't use the cmd_qmp() method here because the command
+ # object is in a 'random' order
+ resp = monitor.cmd_obj({ "execute": "query-version" })
+ check_success_resp(resp)
+
+ resp = monitor.cmd_obj({ "arguments": {}, "execute": "query-version" })
+ check_success_resp(resp)
+
+ id = "1234foo"
+ resp = monitor.cmd_obj({ "id": id, "execute": "query-version",
+ "arguments": {} })
+ check_success_resp(resp)
+ check_str_key(resp, "id", id)
+
+ # TODO: would be good to test simple argument usage, but we don't have
+ # a read-only command that accepts arguments.
+
+
+ def input_object_suite(monitor):
+ """
+ Check the input object format, as described in the QMP specfication
+ section '2.3 Issuing Commands'.
+
+ { "execute": json-string, "arguments": json-object, "id": json-value }
+ """
+ test_good_input_obj(monitor)
+ test_bad_input_obj_type(monitor)
+ test_no_execute_key(monitor)
+ test_bad_execute_key_type(monitor)
+ test_bad_arguments_key_type(monitor)
+ test_id_key(monitor)
+ test_invalid_arg_key(monitor)
+
+
+ def argument_checker_suite(monitor):
+ """
+ Check that QMP's argument checker is detecting all possible errors.
+
+ We use a number of different commands to perform the checks, but the
+ command used doesn't matter much as QMP performs argument checking
+ _before_ calling the command.
+ """
+ # stop doesn't take arguments
+ resp = monitor.cmd_qmp("stop", { "foo": 1 })
+ check_error_resp(resp, "InvalidParameter", { "name": "foo" })
+
+ # required argument omitted
+ resp = monitor.cmd_qmp("screendump")
+ check_error_resp(resp, "MissingParameter", { "name": "filename" })
+
+ # 'bar' is not a valid argument
+ resp = monitor.cmd_qmp("screendump", { "filename": "outfile",
+ "bar": "bar" })
+ check_error_resp(resp, "InvalidParameter", { "name": "bar"})
+
+ # test optional argument: 'force' is omitted, but it's optional, so
+ # the handler has to be called. Test this happens by checking an
+ # error that is generated by the handler itself.
+ resp = monitor.cmd_qmp("eject", { "device": "foobar" })
+ check_error_resp(resp, "DeviceNotFound")
+
+ # filename argument must be a json-string
+ for arg in [ {}, [], 1, True ]:
+ resp = monitor.cmd_qmp("screendump", { "filename": arg })
+ check_error_resp(resp, "InvalidParameterType",
+ { "name": "filename", "expected": "string" })
+
+ # force argument must be a json-bool
+ for arg in [ {}, [], 1, "foo" ]:
+ resp = monitor.cmd_qmp("eject", { "force": arg, "device": "foo" })
+ check_error_resp(resp, "InvalidParameterType",
+ { "name": "force", "expected": "bool" })
+
+ # val argument must be a json-int
+ for arg in [ {}, [], True, "foo" ]:
+ resp = monitor.cmd_qmp("memsave", { "val": arg, "filename": "foo",
+ "size": 10 })
+ check_error_resp(resp, "InvalidParameterType",
+ { "name": "val", "expected": "int" })
+
+ # value argument must be a json-number
+ for arg in [ {}, [], True, "foo" ]:
+ resp = monitor.cmd_qmp("migrate_set_speed", { "value": arg })
+ check_error_resp(resp, "InvalidParameterType",
+ { "name": "value", "expected": "number" })
+
+ # qdev-type commands have their own argument checker, all QMP does
+ # is to skip its checking and pass arguments through. Check this
+ # works by providing invalid options to device_add and expecting
+ # an error message from qdev
+ resp = monitor.cmd_qmp("device_add", { "driver": "e1000","foo": "bar" })
+ check_error_resp(resp, "PropertyNotFound",
+ {"device": "e1000", "property": "foo"})
+
+
+ def unknown_commands_suite(monitor):
+ """
+ Check that QMP handles unknown commands correctly.
+ """
+ # We also call a HMP-only command, to be sure it will fail as expected
+ for cmd in [ "bar", "query-", "query-foo", "q", "help" ]:
+ resp = monitor.cmd_qmp(cmd)
+ check_error_resp(resp, "CommandNotFound", { "name": cmd })
+
+
+ vm = kvm_test_utils.get_living_vm(env, params.get("main_vm"))
+
+ # Run all suites
+ greeting_suite(vm.monitor)
+ input_object_suite(vm.monitor)
+ argument_checker_suite(vm.monitor)
+ unknown_commands_suite(vm.monitor)
+ json_parsing_errors_suite(vm.monitor)
+
+ # check if QMP is still alive
+ if not vm.monitor.is_responsive():
+ raise error.TestFail('QEMU is not alive after QMP testing')
diff --git a/client/tests/kvm/tests/stress_boot.py b/client/tests/kvm/tests/stress_boot.py
index 0d3ed07..b7916b4 100644
--- a/client/tests/kvm/tests/stress_boot.py
+++ b/client/tests/kvm/tests/stress_boot.py
@@ -28,7 +28,6 @@
num = 2
sessions = [session]
- address_index = int(params.get("clone_address_index_base", 10))
# boot the VMs
while num <= int(params.get("max_vms")):
@@ -36,7 +35,6 @@
# clone vm according to the first one
vm_name = "vm" + str(num)
vm_params = vm.get_params().copy()
- vm_params["address_index"] = str(address_index)
curr_vm = vm.clone(vm_name, vm_params)
kvm_utils.env_register_vm(env, vm_name, curr_vm)
logging.info("Booting guest #%d" % num)
@@ -56,7 +54,6 @@
if se.get_command_status(params.get("alive_test_cmd")) != 0:
raise error.TestFail("Session #%d is not responsive" % i)
num += 1
- address_index += 1
except (error.TestFail, OSError):
for se in sessions:
diff --git a/client/tests/kvm/tests/vlan.py b/client/tests/kvm/tests/vlan.py
new file mode 100644
index 0000000..f41ea6a
--- /dev/null
+++ b/client/tests/kvm/tests/vlan.py
@@ -0,0 +1,185 @@
+import logging, time, re
+from autotest_lib.client.common_lib import error
+import kvm_test_utils, kvm_utils
+
+def run_vlan(test, params, env):
+ """
+ Test 802.1Q vlan of NIC, config it by vconfig command.
+
+ 1) Create two VMs.
+ 2) Setup guests in 10 different vlans by vconfig and using hard-coded
+ ip address.
+ 3) Test by ping between same and different vlans of two VMs.
+ 4) Test by TCP data transfer, floop ping between same vlan of two VMs.
+ 5) Test maximal plumb/unplumb vlans.
+ 6) Recover the vlan config.
+
+ @param test: KVM test object.
+ @param params: Dictionary with the test parameters.
+ @param env: Dictionary with test environment.
+ """
+
+ vm = []
+ session = []
+ ifname = []
+ vm_ip = []
+ digest_origin = []
+ vlan_ip = ['', '']
+ ip_unit = ['1', '2']
+ subnet = params.get("subnet")
+ vlan_num = int(params.get("vlan_num"))
+ maximal = int(params.get("maximal"))
+ file_size = params.get("file_size")
+
+ vm.append(kvm_test_utils.get_living_vm(env, params.get("main_vm")))
+ vm.append(kvm_test_utils.get_living_vm(env, "vm2"))
+
+ def add_vlan(session, id, iface="eth0"):
+ if session.get_command_status("vconfig add %s %s" % (iface, id)) != 0:
+ raise error.TestError("Fail to add %s.%s" % (iface, id))
+
+ def set_ip_vlan(session, id, ip, iface="eth0"):
+ iface = "%s.%s" % (iface, id)
+ if session.get_command_status("ifconfig %s %s" % (iface, ip)) != 0:
+ raise error.TestError("Fail to configure ip for %s" % iface)
+
+ def set_arp_ignore(session, iface="eth0"):
+ ignore_cmd = "echo 1 > /proc/sys/net/ipv4/conf/all/arp_ignore"
+ if session.get_command_status(ignore_cmd) != 0:
+ raise error.TestError("Fail to set arp_ignore of %s" % session)
+
+ def rem_vlan(session, id, iface="eth0"):
+ rem_vlan_cmd = "if [[ -e /proc/net/vlan/%s ]];then vconfig rem %s;fi"
+ iface = "%s.%s" % (iface, id)
+ s = session.get_command_status(rem_vlan_cmd % (iface, iface))
+ return s
+
+ def nc_transfer(src, dst):
+ nc_port = kvm_utils.find_free_port(1025, 5334, vm_ip[dst])
+ listen_cmd = params.get("listen_cmd")
+ send_cmd = params.get("send_cmd")
+
+ #listen in dst
+ listen_cmd = listen_cmd % (nc_port, "receive")
+ session[dst].sendline(listen_cmd)
+ time.sleep(2)
+ #send file from src to dst
+ send_cmd = send_cmd % (vlan_ip[dst], str(nc_port), "file")
+ if session[src].get_command_status(send_cmd, timeout = 60) != 0:
+ raise error.TestFail ("Fail to send file"
+ " from vm%s to vm%s" % (src+1, dst+1))
+ s, o = session[dst].read_up_to_prompt(timeout=60)
+ if s != True:
+ raise error.TestFail ("Fail to receive file"
+ " from vm%s to vm%s" % (src+1, dst+1))
+ #check MD5 message digest of receive file in dst
+ output = session[dst].get_command_output("md5sum receive").strip()
+ digest_receive = re.findall(r'(\w+)', output)[0]
+ if digest_receive == digest_origin[src]:
+ logging.info("file succeed received in vm %s" % vlan_ip[dst])
+ else:
+ logging.info("digest_origin is %s" % digest_origin[src])
+ logging.info("digest_receive is %s" % digest_receive)
+ raise error.TestFail("File transfered differ from origin")
+ session[dst].get_command_status("rm -f receive")
+
+ for i in range(2):
+ session.append(kvm_test_utils.wait_for_login(vm[i],
+ timeout=int(params.get("login_timeout", 360))))
+ if not session[i] :
+ raise error.TestError("Could not log into guest(vm%d)" % i)
+ logging.info("Logged in")
+
+ ifname.append(kvm_test_utils.get_linux_ifname(session[i],
+ vm[i].get_mac_address()))
+ #get guest ip
+ vm_ip.append(vm[i].get_address())
+
+ #produce sized file in vm
+ dd_cmd = "dd if=/dev/urandom of=file bs=1024k count=%s"
+ if session[i].get_command_status(dd_cmd % file_size) != 0:
+ raise error.TestFail("File producing failed")
+ #record MD5 message digest of file
+ s, output =session[i].get_command_status_output("md5sum file",
+ timeout=60)
+ if s != 0:
+ raise error.TestFail("File MD5_checking failed" )
+ digest_origin.append(re.findall(r'(\w+)', output)[0])
+
+ #stop firewall in vm
+ session[i].get_command_status("/etc/init.d/iptables stop")
+
+ #load 8021q module for vconfig
+ load_8021q_cmd = "modprobe 8021q"
+ if session[i].get_command_status(load_8021q_cmd) != 0:
+ raise error.TestError("Fail to load 8021q module on VM%s" % i)
+
+ try:
+ for i in range(2):
+ for vlan_i in range(1, vlan_num+1):
+ add_vlan(session[i], vlan_i, ifname[i])
+ set_ip_vlan(session[i], vlan_i, "%s.%s.%s" %
+ (subnet, vlan_i, ip_unit[i]), ifname[i])
+ set_arp_ignore(session[i], ifname[i])
+
+ for vlan in range(1, vlan_num+1):
+ logging.info("Test for vlan %s" % vlan)
+
+ logging.info("Ping between vlans")
+ interface = ifname[0] + '.' + str(vlan)
+ for vlan2 in range(1, vlan_num+1):
+ for i in range(2):
+ interface = ifname[i] + '.' + str(vlan)
+ dest = subnet +'.'+ str(vlan2)+ '.' + ip_unit[(i+1)%2]
+ s, o = kvm_test_utils.ping(dest, count=2,
+ interface=interface,
+ session=session[i], timeout=30)
+ if ((vlan == vlan2) ^ (s == 0)):
+ raise error.TestFail ("%s ping %s unexpected" %
+ (interface, dest))
+
+ vlan_ip[0] = subnet + '.' + str(vlan) + '.' + ip_unit[0]
+ vlan_ip[1] = subnet + '.' + str(vlan) + '.' + ip_unit[1]
+
+ logging.info("Flood ping")
+ def flood_ping(src, dst):
+ # we must use a dedicated session becuase the kvm_subprocess
+ # does not have the other method to interrupt the process in
+ # the guest rather than close the session.
+ session_flood = kvm_test_utils.wait_for_login(vm[src],
+ timeout = 60)
+ kvm_test_utils.ping(vlan_ip[dst], flood=True,
+ interface=ifname[src],
+ session=session_flood, timeout=10)
+ session_flood.close()
+
+ flood_ping(0,1)
+ flood_ping(1,0)
+
+ logging.info("Transfering data through nc")
+ nc_transfer(0, 1)
+ nc_transfer(1, 0)
+
+ finally:
+ for vlan in range(1, vlan_num+1):
+ rem_vlan(session[0], vlan, ifname[0])
+ rem_vlan(session[1], vlan, ifname[1])
+ logging.info("rem vlan: %s" % vlan)
+
+ # Plumb/unplumb maximal unber of vlan interfaces
+ i = 1
+ s = 0
+ try:
+ logging.info("Testing the plumb of vlan interface")
+ for i in range (1, maximal+1):
+ add_vlan(session[0], i, ifname[0])
+ finally:
+ for j in range (1, i+1):
+ s = s or rem_vlan(session[0], j, ifname[0])
+ if s == 0:
+ logging.info("maximal interface plumb test done")
+ else:
+ logging.error("maximal interface plumb test failed")
+
+ session[0].close()
+ session[1].close()
diff --git a/client/tests/kvm/tests/vlan_tag.py b/client/tests/kvm/tests/vlan_tag.py
deleted file mode 100644
index cafd8fe..0000000
--- a/client/tests/kvm/tests/vlan_tag.py
+++ /dev/null
@@ -1,68 +0,0 @@
-import logging, time
-from autotest_lib.client.common_lib import error
-import kvm_subprocess, kvm_test_utils, kvm_utils
-
-
-def run_vlan_tag(test, params, env):
- """
- Test 802.1Q vlan of NIC, config it by vconfig command.
-
- 1) Create two VMs.
- 2) Setup guests in different VLANs by vconfig and test communication by
- ping using hard-coded ip addresses.
- 3) Setup guests in same vlan and test communication by ping.
- 4) Recover the vlan config.
-
- @param test: KVM test object.
- @param params: Dictionary with the test parameters.
- @param env: Dictionary with test environment.
- """
- subnet = params.get("subnet")
- vlans = params.get("vlans").split()
-
- vm1 = kvm_test_utils.get_living_vm(env, params.get("main_vm"))
- vm2 = kvm_test_utils.get_living_vm(env, "vm2")
-
- timeout = int(params.get("login_timeout", 360))
- session1 = kvm_test_utils.wait_for_login(vm1, timeout=timeout)
- session2 = kvm_test_utils.wait_for_login(vm2, timeout=timeout)
-
- try:
- ip_cfg_base = "vconfig add eth0 %s && ifconfig eth0.%s %s.%s"
- ip_cfg_cmd1 = ip_cfg_base % (vlans[0], vlans[0], subnet, "11")
- ip_cfg_cmd2 = ip_cfg_base % (vlans[1], vlans[1], subnet, "12")
-
- # Configure VM1 and VM2 in different VLANs
- ip_cfg_vm1 = session1.get_command_status(ip_cfg_cmd1)
- if ip_cfg_vm1 != 0:
- raise error.TestError("Failed to config VM 1 IP address")
- ip_cfg_vm2 = session2.get_command_status(ip_cfg_cmd2)
- if ip_cfg_vm2 != 0:
- raise error.TestError("Failed to config VM 2 IP address")
-
- # Trying to ping VM 2 from VM 1, this shouldn't work
- ping_cmd = "ping -c 2 -I eth0.%s %s.%s" % (vlans[0], subnet, "12")
- ping_diff_vlan = session1.get_command_status(ping_cmd)
- if ping_diff_vlan == 0:
- raise error.TestFail("VM 2 can be reached even though it was "
- "configured on a different VLAN")
-
- # Now let's put VM 2 in the same VLAN as VM 1
- ip_cfg_reconfig= ("vconfig rem eth0.%s && vconfig add eth0 %s && "
- "ifconfig eth0.%s %s.%s" % (vlans[1], vlans[0],
- vlans[0], subnet, "12"))
- ip_cfg_vm2 = session2.get_command_status(ip_cfg_reconfig)
- if ip_cfg_vm2 != 0:
- raise error.TestError("Failed to re-config IP address of VM 2")
-
- # Try to ping VM 2 from VM 1, this should work
- ping_same_vlan = session1.get_command_status(ping_cmd)
- if ping_same_vlan != 0:
- raise error.TestFail("Failed to ping VM 2 even though it was "
- "configured on the same VLAN")
-
- finally:
- session1.get_command_status("vconfig rem eth0.%s" % vlans[0])
- session1.close()
- session2.get_command_status("vconfig rem eth0.%s" % vlans[0])
- session2.close()
diff --git a/client/tests/kvm/tests/whql_client_install.py b/client/tests/kvm/tests/whql_client_install.py
index d866df7..84b91bc 100644
--- a/client/tests/kvm/tests/whql_client_install.py
+++ b/client/tests/kvm/tests/whql_client_install.py
@@ -60,7 +60,11 @@
server_workgroup = server_workgroup.splitlines()[-1]
regkey = r"HKLM\SYSTEM\CurrentControlSet\Services\Tcpip\Parameters"
cmd = "reg query %s /v Domain" % regkey
- server_dns_suffix = server_session.get_command_output(cmd).split()[-1]
+ o = server_session.get_command_output(cmd).strip().splitlines()[-1]
+ try:
+ server_dns_suffix = o.split(None, 2)[2]
+ except IndexError:
+ server_dns_suffix = ""
# Delete the client machine from the server's data store (if it's there)
server_session.get_command_output("cd %s" % server_studio_path)
@@ -86,7 +90,7 @@
# Set the client machine's DNS suffix
logging.info("Setting DNS suffix to '%s'" % server_dns_suffix)
- cmd = "reg add %s /v Domain /d %s /f" % (regkey, server_dns_suffix)
+ cmd = 'reg add %s /v Domain /d "%s" /f' % (regkey, server_dns_suffix)
if session.get_command_status(cmd, timeout=300) != 0:
raise error.TestError("Could not set the client's DNS suffix")
diff --git a/client/tests/kvm/tests_base.cfg.sample b/client/tests/kvm/tests_base.cfg.sample
index 167e86d..eddc02b 100644
--- a/client/tests/kvm/tests_base.cfg.sample
+++ b/client/tests/kvm/tests_base.cfg.sample
@@ -35,6 +35,7 @@
mem = 512
image_size = 10G
drive_index_image1 = 0
+drive_cache = none
shell_port = 22
display = vnc
drive_index_cd1 = 1
@@ -54,7 +55,7 @@
nic_mode = user
#nic_mode = tap
nic_script = scripts/qemu-ifup
-address_index = 0
+#nic_script = scripts/qemu-ifup-ipv6
run_tcpdump = yes
# Misc
@@ -274,7 +275,6 @@
type = stress_boot
max_vms = 5
alive_test_cmd = uname -a
- clone_address_index_base = 10
login_timeout = 240
kill_vm = yes
kill_vm_vm1 = no
@@ -456,17 +456,90 @@
- fmt_raw:
image_format_stg = raw
- - vlan_tag: install setup unattended_install.cdrom
- type = vlan_tag
+ - qmp_basic: install setup unattended_install.cdrom
+ type = qmp_basic
+
+ - vlan: install setup unattended_install.cdrom
+ type = vlan
# subnet should not be used by host
- subnet = 192.168.123
- vlans = "10 20"
+ subnet = "192.168"
+ vlan_num = 5
+ file_size = 10
+ maximal = 4094
+ listen_cmd = "nc -l %s > %s"
+ send_cmd = "nc %s %s < %s"
nic_mode = tap
vms += " vm2"
- extra_params_vm1 += " -snapshot"
- extra_params_vm2 += " -snapshot"
+ image_snapshot = yes
+ kill_vm_vm2 = yes
kill_vm_gracefully_vm2 = no
- address_index_vm2 = 1
+
+ - ping: install setup unattended_install.cdrom
+ type = ping
+ counts = 100
+ flood_minutes = 10
+
+ - jumbo: install setup unattended_install.cdrom
+ type = jumbo
+
+ - file_transfer: install setup unattended_install.cdrom
+ type = file_transfer
+ filesize = 4000
+ transfer_timeout = 1000
+ variants:
+ - remote:
+ transfer_type = remote
+
+ - nicdriver_unload: install setup unattended_install.cdrom
+ type = nicdriver_unload
+ nic_mode = tap
+ file_size = 100
+ connect_time = 4
+ scp_timeout = 300
+ thread_num = 10
+
+ - nic_promisc: install setup unattended_install.cdrom
+ type = nic_promisc
+ file_size = 1, 1460, 65000, 100000000
+
+ - multicast: install setup unattended_install.cdrom
+ type = multicast
+ nic_mode = tap
+ mcast = 225.0.0.1
+ mgroup_count = 20
+ flood_minutes = 1
+
+ - pxe:
+ type = pxe
+ images = pxe
+ image_name_pxe = pxe-test
+ image_size_pxe = 1G
+ force_create_image_pxe = yes
+ remove_image_pxe = yes
+ extra_params += ' -boot n'
+ kill_vm_on_error = yes
+ network = bridge
+ restart_vm = yes
+ pxe_timeout = 60
+
+ - mac_change: install setup unattended_install.cdrom
+ type = mac_change
+ kill_vm = yes
+
+ - netperf: install setup unattended_install.cdrom
+ type = netperf
+ nic_mode = tap
+ netperf_files = netperf-2.4.5.tar.bz2 wait_before_data.patch
+ setup_cmd = "cd %s && tar xvfj netperf-2.4.5.tar.bz2 && cd netperf-2.4.5 && patch -p0 < ../wait_before_data.patch && ./configure && make"
+ netserver_cmd = %s/netperf-2.4.5/src/netserver
+ # test time is 60 seconds, set the buffer size to 1 for more hardware interrupt
+ netperf_cmd = %s/netperf-2.4.5/src/netperf -t %s -H %s -l 60 -- -m 1
+ protocols = "TCP_STREAM TCP_MAERTS TCP_RR TCP_CRR UDP_RR TCP_SENDFILE UDP_STREAM"
+
+ - ethtool: install setup unattended_install.cdrom
+ type = ethtool
+ filesize = 512
+ nic_mode = tap
- physical_resources_check: install setup unattended_install.cdrom
type = physical_resources_check
@@ -618,11 +691,26 @@
variants:
- @rtl8139:
nic_model = rtl8139
+ jumbo:
+ mtu = 1500
- e1000:
nic_model = e1000
+ jumbo:
+ mtu = 16110
+ ethtool:
+ # gso gro lro is only supported by latest kernel
+ supported_features = "tx rx sg tso gso gro lro"
- virtio_net:
nic_model = virtio
-
+ # You can add advanced attributes on nic_extra_params such as mrg_rxbuf
+ #nic_extra_params =
+ # You can set vhost = yes to enable the vhost kernel backend
+ # (This only works if nic_mode=tap)
+ vhost = no
+ jumbo:
+ mtu = 65520
+ ethtool:
+ supported_features = "tx sg tso gso"
# Guests
variants:
@@ -665,6 +753,9 @@
time_command = date +'TIME: %a %m/%d/%Y %H:%M:%S.%N'
time_filter_re = "(?:TIME: \w\w\w )(.{19})(?:\.\d\d)"
time_format = "%m/%d/%Y %H:%M:%S"
+ file_transfer:
+ tmp_dir = /tmp/
+ clean_cmd = rm -f
variants:
- Fedora:
@@ -856,8 +947,8 @@
extra_params += " -boot cn"
# You have to use autoyast=floppy if you want to use floppies to
# hold your autoyast file
- #kernel_args = "autoyast=floppy console=ttyS0,115200 console=tty0"
- kernel_args = "autoyast=cdrom console=ttyS0,115200 console=tty0"
+ kernel_args = "autoyast=floppy console=ttyS0,115200 console=tty0"
+ #kernel_args = "autoyast=cdrom console=ttyS0,115200 console=tty0"
post_install_delay = 10
variants:
@@ -871,7 +962,8 @@
unattended_install.cdrom:
unattended_file = unattended/OpenSUSE-11.xml
tftp = images/opensuse-11-0-32/tftpboot
- floppy = images/opensuse-11-0-32/floppy.img
+ floppy = images/opensuse-11-0-32/autoyast.vfd
+ #cdrom_unattended = images/opensuse-11-0-32/autoyast.iso
pxe_dir = boot/i386/loader
- 11.0.64:
@@ -882,8 +974,8 @@
unattended_install.cdrom:
unattended_file = unattended/OpenSUSE-11.xml
tftp = images/opensuse-11-0-64/tftpboot
- #floppy = images/opensuse-11-0-64/autoyast.vfd
- cdrom_unattended = images/opensuse-11-0-64/autoyast.iso
+ floppy = images/opensuse-11-0-64/autoyast.vfd
+ #cdrom_unattended = images/opensuse-11-0-64/autoyast.iso
pxe_dir = boot/x86_64/loader
- 11.1.32:
@@ -896,8 +988,8 @@
unattended_install.cdrom:
unattended_file = unattended/OpenSUSE-11.xml
tftp = images/opensuse-11-1-32/tftpboot
- #floppy = images/opensuse-11-1-32/autoyast.vfd
- cdrom_unattended = images/opensuse-11-1-32/autoyast.iso
+ floppy = images/opensuse-11-1-32/autoyast.vfd
+ #cdrom_unattended = images/opensuse-11-1-32/autoyast.iso
pxe_dir = boot/i386/loader
- 11.1.64:
@@ -910,8 +1002,8 @@
unattended_install.cdrom:
unattended_file = unattended/OpenSUSE-11.xml
tftp = images/opensuse-11-1-64/tftpboot
- #floppy = images/opensuse-11-1-64/autoyast.vfd
- cdrom_unattended = images/opensuse-11-1-64/autoyast.iso
+ floppy = images/opensuse-11-1-64/autoyast.vfd
+ #cdrom_unattended = images/opensuse-11-1-64/autoyast.iso
pxe_dir = boot/x86_64/loader
- 11.2.32:
@@ -922,8 +1014,8 @@
unattended_install.cdrom:
unattended_file = unattended/OpenSUSE-11.xml
tftp = images/opensuse-11-2-32/tftpboot
- #floppy = images/opensuse-11-2-32/autoyast.vfd
- cdrom_unattended = images/opensuse-11-2-32/autoyast.iso
+ floppy = images/opensuse-11-2-32/autoyast.vfd
+ #cdrom_unattended = images/opensuse-11-2-32/autoyast.iso
pxe_dir = boot/i386/loader
- 11.2.64:
@@ -934,8 +1026,8 @@
unattended_install.cdrom:
unattended_file = unattended/OpenSUSE-11.xml
tftp = images/opensuse-11-2-64/tftpboot
- #floppy = images/opensuse11-2-64/autoyast.vfd
- cdrom_unattended = images/opensuse11-2-64/autoyast.iso
+ floppy = images/opensuse11-2-64/autoyast.vfd
+ #cdrom_unattended = images/opensuse11-2-64/autoyast.iso
pxe_dir = boot/x86_64/loader
- 11.3.32:
@@ -946,8 +1038,8 @@
unattended_install.cdrom:
unattended_file = unattended/OpenSUSE-11.xml
tftp = images/opensuse-11-3-32/tftpboot
- #floppy = images/opensuse-11-3-32/autoyast.vfd
- cdrom_unattended = images/opensuse-11-3-32/autoyast.iso
+ floppy = images/opensuse-11-3-32/autoyast.vfd
+ #cdrom_unattended = images/opensuse-11-3-32/autoyast.iso
pxe_dir = boot/i386/loader
- 11.3.64:
@@ -958,8 +1050,8 @@
unattended_install.cdrom:
unattended_file = unattended/OpenSUSE-11.xml
tftp = images/opensuse-11-3-64/tftpboot
- #floppy = images/opensuse-11-3-64/autoyast.vfd
- cdrom_unattended = images/opensuse-11-3-64/autoyast.iso
+ floppy = images/opensuse-11-3-64/autoyast.vfd
+ #cdrom_unattended = images/opensuse-11-3-64/autoyast.iso
pxe_dir = boot/x86_64/loader
- SLES:
@@ -971,8 +1063,8 @@
extra_params += " -boot cn"
# You have to use autoyast=floppy if you want to use floppies to
# hold your autoyast file
- #kernel_args = "autoyast=floppy console=ttyS0,115200 console=tty0"
- kernel_args = "autoyast=cdrom console=ttyS0,115200 console=tty0"
+ kernel_args = "autoyast=floppy console=ttyS0,115200 console=tty0"
+ #kernel_args = "autoyast=cdrom console=ttyS0,115200 console=tty0"
post_install_delay = 10
variants:
@@ -986,6 +1078,8 @@
tftp = images/sles-11-0-32/tftpboot
#floppy = images/sles-11-0-32/autoyast.vfd
cdrom_unattended = images/sles-11-0-32/autoyast.iso
+ floppy = images/sles-11-0-32/autoyast.vfd
+ #cdrom_unattended = images/sles-11-0-32/autoyast.iso
pxe_dir = boot/i386/loader
- 11.0.64:
@@ -996,8 +1090,8 @@
unattended_install.cdrom:
unattended_file = unattended/SLES-11.xml
tftp = images/sles-11-0-64/tftpboot
- #floppy = images/sles-11-0-64/autoyast.vfd
- cdrom_unattended = images/sles-11-0-64/autoyast.iso
+ floppy = images/sles-11-0-64/autoyast.vfd
+ #cdrom_unattended = images/sles-11-0-64/autoyast.iso
pxe_dir = boot/x86_64/loader
- 11.1.32:
@@ -1008,8 +1102,8 @@
unattended_install:
unattended_file = unattended/SLES-11.xml
tftp = images/sles-11-1-32/tftpboot
- #floppy = images/sles-11-1-32/autoyast.vfd
- cdrom_unattended = images/sles-11-1-32/autoyast.iso
+ floppy = images/sles-11-1-32/autoyast.vfd
+ #cdrom_unattended = images/sles-11-1-32/autoyast.iso
pxe_dir = boot/i386/loader
- 11.1.64:
@@ -1020,8 +1114,8 @@
unattended_install:
unattended_file = unattended/SLES-11.xml
tftp = images/sles-11-1-64/tftpboot
- #floppy = images/sles-11-1-64/autoyast.vfd
- cdrom_unattended = images/sles-11-1-64/autoyast.iso
+ floppy = images/sles-11-1-64/autoyast.vfd
+ #cdrom_unattended = images/sles-11-1-64/autoyast.iso
pxe_dir = boot/x86_64/loader
- @Ubuntu:
@@ -1236,7 +1330,7 @@
# Windows section
- @Windows:
- no autotest linux_s3 vlan_tag ioquit unattended_install.(url|nfs|remote_ks)
+ no autotest linux_s3 vlan ioquit unattended_install.(url|nfs|remote_ks) jumbo nicdriver_unload nic_promisc multicast mac_change ethtool
shutdown_command = shutdown /s /f /t 0
reboot_command = shutdown /r /f /t 0
status_test_command = echo %errorlevel%
@@ -1328,6 +1422,9 @@
physical_resources_check:
catch_uuid_cmd =
+ file_transfer:
+ tmp_dir = C:\
+ clean_cmd = del
variants:
- Win2000:
no reboot whql
diff --git a/client/tests/kvm/unattended/win2008-32-autounattend.xml b/client/tests/kvm/unattended/win2008-32-autounattend.xml
index 89af07f..352cb73 100644
--- a/client/tests/kvm/unattended/win2008-32-autounattend.xml
+++ b/client/tests/kvm/unattended/win2008-32-autounattend.xml
@@ -16,7 +16,7 @@
<UserLocale>en-us</UserLocale>
</component>
<component name="Microsoft-Windows-PnpCustomizationsWinPE"
- processorArchitecture="amd64" publicKeyToken="31bf3856ad364e35"
+ processorArchitecture="x86" publicKeyToken="31bf3856ad364e35"
language="neutral" versionScope="nonSxS"
xmlns:wcm="http://schemas.microsoft.com/WMIConfig/2002/State"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
@@ -147,6 +147,7 @@
<SynchronousCommand wcm:action="add">
<CommandLine>%WINDIR%\System32\cmd /c net start telnet</CommandLine>
<Order>5</Order>
+<<<<<<< HEAD
</SynchronousCommand>
<SynchronousCommand wcm:action="add">
<CommandLine>%WINDIR%\System32\cmd /c E:\setuprss.bat</CommandLine>
@@ -159,6 +160,24 @@
<SynchronousCommand wcm:action="add">
<CommandLine>%WINDIR%\System32\cmd /c ping 10.0.2.2 -n 20 && A:\finish.exe</CommandLine>
<Order>8</Order>
+=======
+ </SynchronousCommand>
+ <SynchronousCommand wcm:action="add">
+ <CommandLine>%WINDIR%\System32\cmd /c bcdedit /set {current} bootstatuspolicy ignoreallfailures</CommandLine>
+ <Order>6</Order>
+ </SynchronousCommand>
+ <SynchronousCommand wcm:action="add">
+ <CommandLine>%WINDIR%\System32\cmd /c E:\setuprss.bat</CommandLine>
+ <Order>7</Order>
+ </SynchronousCommand>
+ <SynchronousCommand wcm:action="add">
+ <CommandLine>%WINDIR%\System32\cmd /c netsh interface ip set address "Local Area Connection" dhcp</CommandLine>
+ <Order>8</Order>
+ </SynchronousCommand>
+ <SynchronousCommand wcm:action="add">
+ <CommandLine>%WINDIR%\System32\cmd /c ping 10.0.2.2 -n 20 && A:\finish.exe</CommandLine>
+ <Order>9</Order>
+>>>>>>> cros/upstream
</SynchronousCommand>
</FirstLogonCommands>
<OOBE>
diff --git a/client/tests/kvm/unattended/win2008-64-autounattend.xml b/client/tests/kvm/unattended/win2008-64-autounattend.xml
index 98f5589..fce6582 100644
--- a/client/tests/kvm/unattended/win2008-64-autounattend.xml
+++ b/client/tests/kvm/unattended/win2008-64-autounattend.xml
@@ -158,17 +158,21 @@
<Order>5</Order>
</SynchronousCommand>
<SynchronousCommand wcm:action="add">
- <CommandLine>%WINDIR%\System32\cmd /c E:\setuprss.bat</CommandLine>
+ <CommandLine>%WINDIR%\System32\cmd /c bcdedit /set {current} bootstatuspolicy ignoreallfailures</CommandLine>
<Order>6</Order>
</SynchronousCommand>
<SynchronousCommand wcm:action="add">
- <CommandLine>%WINDIR%\System32\cmd /c netsh interface ip set address "Local Area Connection" dhcp</CommandLine>
+ <CommandLine>%WINDIR%\System32\cmd /c E:\setuprss.bat</CommandLine>
<Order>7</Order>
</SynchronousCommand>
<SynchronousCommand wcm:action="add">
- <CommandLine>%WINDIR%\System32\cmd /c ping 10.0.2.2 -n 20 && A:\finish.exe</CommandLine>
+ <CommandLine>%WINDIR%\System32\cmd /c netsh interface ip set address "Local Area Connection" dhcp</CommandLine>
<Order>8</Order>
</SynchronousCommand>
+ <SynchronousCommand wcm:action="add">
+ <CommandLine>%WINDIR%\System32\cmd /c ping 10.0.2.2 -n 20 && A:\finish.exe</CommandLine>
+ <Order>9</Order>
+ </SynchronousCommand>
</FirstLogonCommands>
<OOBE>
<HideEULAPage>true</HideEULAPage>
diff --git a/client/tests/kvm/unattended/win2008-r2-autounattend.xml b/client/tests/kvm/unattended/win2008-r2-autounattend.xml
index b624d10..7e9ab23 100644
--- a/client/tests/kvm/unattended/win2008-r2-autounattend.xml
+++ b/client/tests/kvm/unattended/win2008-r2-autounattend.xml
@@ -70,6 +70,20 @@
<UserLocale>en-us</UserLocale>
<UILanguageFallback>en-us</UILanguageFallback>
</component>
+ <component name="Microsoft-Windows-PnpCustomizationsWinPE"
+ processorArchitecture="amd64" publicKeyToken="31bf3856ad364e35"
+ language="neutral" versionScope="nonSxS"
+ xmlns:wcm="http://schemas.microsoft.com/WMIConfig/2002/State"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
+ <DriverPaths>
+ <PathAndCredentials wcm:keyValue="1" wcm:action="add">
+ <Path>KVM_TEST_STORAGE_DRIVER_PATH</Path>
+ </PathAndCredentials>
+ <PathAndCredentials wcm:keyValue="2" wcm:action="add">
+ <Path>KVM_TEST_NETWORK_DRIVER_PATH</Path>
+ </PathAndCredentials>
+ </DriverPaths>
+ </component>
</settings>
<settings pass="specialize">
<component name="Microsoft-Windows-Deployment"
@@ -158,17 +172,21 @@
<Order>5</Order>
</SynchronousCommand>
<SynchronousCommand wcm:action="add">
- <CommandLine>%WINDIR%\System32\cmd /c E:\setuprss.bat</CommandLine>
+ <CommandLine>%WINDIR%\System32\cmd /c bcdedit /set {current} bootstatuspolicy ignoreallfailures</CommandLine>
<Order>6</Order>
</SynchronousCommand>
<SynchronousCommand wcm:action="add">
- <CommandLine>%WINDIR%\System32\cmd /c netsh interface ip set address "Local Area Connection" dhcp</CommandLine>
+ <CommandLine>%WINDIR%\System32\cmd /c E:\setuprss.bat</CommandLine>
<Order>7</Order>
</SynchronousCommand>
<SynchronousCommand wcm:action="add">
- <CommandLine>%WINDIR%\System32\cmd /c ping 10.0.2.2 -n 20 && A:\finish.exe</CommandLine>
+ <CommandLine>%WINDIR%\System32\cmd /c netsh interface ip set address "Local Area Connection" dhcp</CommandLine>
<Order>8</Order>
</SynchronousCommand>
+ <SynchronousCommand wcm:action="add">
+ <CommandLine>%WINDIR%\System32\cmd /c ping 10.0.2.2 -n 20 && A:\finish.exe</CommandLine>
+ <Order>9</Order>
+ </SynchronousCommand>
</FirstLogonCommands>
<OOBE>
<HideEULAPage>true</HideEULAPage>
diff --git a/client/tests/kvm/unattended/win7-32-autounattend.xml b/client/tests/kvm/unattended/win7-32-autounattend.xml
index a16cdd7..6904db1 100644
--- a/client/tests/kvm/unattended/win7-32-autounattend.xml
+++ b/client/tests/kvm/unattended/win7-32-autounattend.xml
@@ -16,7 +16,7 @@
<UserLocale>en-us</UserLocale>
</component>
<component name="Microsoft-Windows-PnpCustomizationsWinPE"
- processorArchitecture="amd64" publicKeyToken="31bf3856ad364e35"
+ processorArchitecture="x86" publicKeyToken="31bf3856ad364e35"
language="neutral" versionScope="nonSxS"
xmlns:wcm="http://schemas.microsoft.com/WMIConfig/2002/State"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
@@ -156,6 +156,7 @@
<Order>5</Order>
</SynchronousCommand>
<SynchronousCommand wcm:action="add">
+<<<<<<< HEAD
<CommandLine>%WINDIR%\System32\cmd /c E:\setuprss.bat</CommandLine>
<Order>6</Order>
</SynchronousCommand>
@@ -167,9 +168,26 @@
<CommandLine>%WINDIR%\System32\cmd /c ping 10.0.2.2 -n 20 && A:\finish.exe</CommandLine>
<Order>8</Order>
</SynchronousCommand>
+=======
+ <CommandLine>%WINDIR%\System32\cmd /c bcdedit /set {current} bootstatuspolicy ignoreallfailures</CommandLine>
+ <Order>6</Order>
+ </SynchronousCommand>
+ <SynchronousCommand wcm:action="add">
+ <CommandLine>%WINDIR%\System32\cmd /c E:\setuprss.bat</CommandLine>
+ <Order>7</Order>
+ </SynchronousCommand>
+ <SynchronousCommand wcm:action="add">
+ <CommandLine>%WINDIR%\System32\cmd /c netsh interface ip set address "Local Area Connection" dhcp</CommandLine>
+ <Order>8</Order>
+ </SynchronousCommand>
+ <SynchronousCommand wcm:action="add">
+ <CommandLine>%WINDIR%\System32\cmd /c ping 10.0.2.2 -n 20 && A:\finish.exe</CommandLine>
+ <Order>9</Order>
+ </SynchronousCommand>
+>>>>>>> cros/upstream
</FirstLogonCommands>
</component>
</settings>
<cpi:offlineImage cpi:source="wim:c:/install.wim#Windows Longhorn SERVERSTANDARD"
xmlns:cpi="urn:schemas-microsoft-com:cpi" />
-</unattend>
\ No newline at end of file
+</unattend>
diff --git a/client/tests/kvm/unattended/win7-64-autounattend.xml b/client/tests/kvm/unattended/win7-64-autounattend.xml
index 65873f6..e30e2c7 100644
--- a/client/tests/kvm/unattended/win7-64-autounattend.xml
+++ b/client/tests/kvm/unattended/win7-64-autounattend.xml
@@ -156,20 +156,24 @@
<Order>5</Order>
</SynchronousCommand>
<SynchronousCommand wcm:action="add">
- <CommandLine>%WINDIR%\System32\cmd /c E:\setuprss.bat</CommandLine>
+ <CommandLine>%WINDIR%\System32\cmd /c bcdedit /set {current} bootstatuspolicy ignoreallfailures</CommandLine>
<Order>6</Order>
</SynchronousCommand>
<SynchronousCommand wcm:action="add">
- <CommandLine>%WINDIR%\System32\cmd /c netsh interface ip set address "Local Area Connection" dhcp</CommandLine>
+ <CommandLine>%WINDIR%\System32\cmd /c E:\setuprss.bat</CommandLine>
<Order>7</Order>
</SynchronousCommand>
<SynchronousCommand wcm:action="add">
- <CommandLine>%WINDIR%\System32\cmd /c ping 10.0.2.2 -n 20 && A:\finish.exe</CommandLine>
+ <CommandLine>%WINDIR%\System32\cmd /c netsh interface ip set address "Local Area Connection" dhcp</CommandLine>
<Order>8</Order>
</SynchronousCommand>
+ <SynchronousCommand wcm:action="add">
+ <CommandLine>%WINDIR%\System32\cmd /c ping 10.0.2.2 -n 20 && A:\finish.exe</CommandLine>
+ <Order>9</Order>
+ </SynchronousCommand>
</FirstLogonCommands>
</component>
</settings>
<cpi:offlineImage cpi:source="wim:c:/install.wim#Windows Longhorn SERVERSTANDARD"
xmlns:cpi="urn:schemas-microsoft-com:cpi" />
-</unattend>
\ No newline at end of file
+</unattend>
diff --git a/client/tests/tiobench/control b/client/tests/tiobench/control
index ee806cf..dd40a26 100644
--- a/client/tests/tiobench/control
+++ b/client/tests/tiobench/control
@@ -9,4 +9,7 @@
Performs threaded I/O benchmarks.
"""
-job.run_test('tiobench', dir='/mnt')
+job.run_test('tiobench',
+ args='--block=4096 --block=8192 --threads=10 --size=1024',
+ iterations=2,
+ dir='/mnt')
diff --git a/client/tests/tiobench/tiobench.py b/client/tests/tiobench/tiobench.py
index e0693aa..747fc26 100644
--- a/client/tests/tiobench/tiobench.py
+++ b/client/tests/tiobench/tiobench.py
@@ -1,4 +1,4 @@
-import os
+import os, logging
from autotest_lib.client.bin import test, utils
@@ -29,4 +29,11 @@
self.args = args
os.chdir(self.srcdir)
- utils.system('./tiobench.pl --dir %s %s' %(self.dir, self.args))
+ results = utils.system_output('./tiobench.pl --dir %s %s' %
+ (self.dir, self.args))
+
+ logging.info(results)
+ results_path = os.path.join(self.resultsdir,
+ 'raw_output_%s' % self.iteration)
+
+ utils.open_write_close(results_path, results)
diff --git a/client/tests/tracing_microbenchmark/base_tracer.py b/client/tests/tracing_microbenchmark/base_tracer.py
new file mode 100644
index 0000000..e36eca4
--- /dev/null
+++ b/client/tests/tracing_microbenchmark/base_tracer.py
@@ -0,0 +1,30 @@
+import os
+from autotest_lib.client.bin import utils
+
+
+class Tracer(object):
+ """
+ Common interface for tracing.
+ """
+
+ tracing_dir = None
+
+ def trace_config(self, path, value):
+ """
+ Write value to a tracing config file under self.tracing_dir.
+ """
+ path = os.path.join(self.tracing_dir, path)
+ utils.open_write_close(path, value)
+
+ def warmup(self, buffer_size_kb):
+ pass
+ def cleanup(self):
+ pass
+ def start_tracing(self):
+ pass
+ def stop_tracing(self):
+ pass
+ def gather_stats(self, results):
+ pass
+ def reset_tracing(self):
+ pass
diff --git a/client/tests/tracing_microbenchmark/control b/client/tests/tracing_microbenchmark/control
new file mode 100644
index 0000000..ec7017b
--- /dev/null
+++ b/client/tests/tracing_microbenchmark/control
@@ -0,0 +1,28 @@
+AUTHOR = "David Sharp <dhsharp@google.com>"
+NAME = "Tracing microbenchmark"
+TIME = "SHORT"
+TEST_CATEGORY = "Benchmark"
+TEST_CLASS = "Kernel"
+TEST_TYPE = "client"
+
+DOC = """
+A simple benchmark of kernel tracers such as ftrace. Enables tracepoints in
+sys_getuid and makes 100,000 calls to getuid with tracing on and off to measure
+the overhead of enabling tracing. The intent for this benchmark is to not
+overflow the ring buffer, so the buffer is generously sized.
+
+
+tracer: tracepoint enabled
+------
+off: n/a
+ftrace: syscalls:sys_enter_getuid
+
+Args:
+ tracer: see table above.
+ buffer_size_kb: Set the tracing ring buffer to this size (per-cpu).
+ calls: Set the number of calls to make to getuid.
+"""
+
+
+job.run_test('tracing_microbenchmark', tracer='off', tag='off', iterations=10)
+job.run_test('tracing_microbenchmark', tracer='ftrace', tag='ftrace', iterations=10)
diff --git a/client/tests/tracing_microbenchmark/src/Makefile b/client/tests/tracing_microbenchmark/src/Makefile
new file mode 100644
index 0000000..ac2af8a
--- /dev/null
+++ b/client/tests/tracing_microbenchmark/src/Makefile
@@ -0,0 +1,8 @@
+CC = $(CROSS_COMPILE)gcc
+LDLIBS = -lrt
+
+getuid_microbench: getuid_microbench.o
+
+.PHONY: clean
+clean:
+ rm *.o getuid_microbench
diff --git a/client/tests/tracing_microbenchmark/src/getuid_microbench.c b/client/tests/tracing_microbenchmark/src/getuid_microbench.c
new file mode 100644
index 0000000..fd540cb
--- /dev/null
+++ b/client/tests/tracing_microbenchmark/src/getuid_microbench.c
@@ -0,0 +1,63 @@
+#define _GNU_SOURCE
+#include <sys/syscall.h>
+#include <sys/types.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <errno.h>
+#include <unistd.h>
+#include <time.h>
+
+void ts_subtract(struct timespec *result,
+ const struct timespec *time1, const struct timespec *time2) {
+ *result = *time1;
+ result->tv_sec -= time2->tv_sec ;
+ if (result->tv_nsec < time2->tv_nsec) {
+ /* borrow a second */
+ result->tv_nsec += 1000000000L;
+ result->tv_sec--;
+ }
+ result->tv_nsec -= time2->tv_nsec;
+}
+
+void usage(const char *cmd) {
+ fprintf(stderr, "usage: %s <iterations>\n", cmd);
+}
+
+int main (int argc, char *argv[]) {
+ struct timespec start_time, end_time, elapsed_time;
+ uid_t uid;
+ long iterations, i;
+ double per_call;
+
+ if (argc != 2) {
+ usage(argv[0]);
+ return 1;
+ }
+
+ iterations = atol(argv[1]);
+ if (iterations < 0) {
+ usage(argv[0]);
+ return 1;
+ }
+
+ if (clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &start_time)) {
+ perror("clock_gettime");
+ return errno;
+ }
+
+ for (i = iterations; i; i--)
+ uid = syscall(SYS_getuid);
+
+ if (clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &end_time)) {
+ perror("clock_gettime");
+ return errno;
+ }
+
+ ts_subtract(&elapsed_time, &end_time, &start_time);
+ per_call = (elapsed_time.tv_sec * 1000000000.0L + elapsed_time.tv_nsec) /
+ (double)iterations;
+ printf("%ld calls in %ld.%09ld s (%lf ns/call)\n", iterations,
+ elapsed_time.tv_sec, elapsed_time.tv_nsec, per_call);
+
+ return 0;
+}
diff --git a/client/tests/tracing_microbenchmark/tracers.py b/client/tests/tracing_microbenchmark/tracers.py
new file mode 100644
index 0000000..bdd5194
--- /dev/null
+++ b/client/tests/tracing_microbenchmark/tracers.py
@@ -0,0 +1,60 @@
+import os
+from autotest_lib.client.bin import utils
+
+import base_tracer
+try:
+ from site_tracers import *
+except ImportError:
+ pass
+
+
+off = base_tracer.Tracer
+
+
+class ftrace(base_tracer.Tracer):
+
+ mountpoint = '/sys/kernel/debug'
+ tracing_dir = os.path.join(mountpoint, 'tracing')
+
+ def warmup(self, buffer_size_kb):
+ if not os.path.exists(self.tracing_dir):
+ utils.system('mount -t debugfs debugfs %s' % self.mountpoint)
+
+ # ensure clean state:
+ self.trace_config('tracing_enabled', '0')
+ self.trace_config('current_tracer', 'nop')
+ self.trace_config('events/enable', '0')
+ self.trace_config('trace', '')
+ # set ring buffer size:
+ self.trace_config('buffer_size_kb', str(buffer_size_kb))
+ # enable tracepoints:
+ self.trace_config('events/syscalls/sys_enter_getuid/enable', '1')
+
+ def cleanup(self):
+ # reset ring buffer size:
+ self.trace_config('buffer_size_kb', '1408')
+ # disable tracepoints:
+ self.trace_config('events/enable', '0')
+
+ def start_tracing(self):
+ self.trace_config('tracing_enabled', '1')
+
+ def stop_tracing(self):
+ self.trace_config('tracing_enabled', '0')
+
+ def reset_tracing(self):
+ self.trace_config('trace', '')
+
+ def gather_stats(self, results):
+ per_cpu = os.path.join(self.tracing_dir, 'per_cpu')
+ for cpu in os.listdir(per_cpu):
+ cpu_stats = os.path.join(per_cpu, cpu, 'stats')
+ for line in utils.read_file(cpu_stats).splitlines():
+ key, val = line.split(': ')
+ key = key.replace(' ', '_')
+ val = int(val)
+ cpu_key = '%s_%s' % (cpu, key)
+ total_key = 'total_' + key
+ results[cpu_key] = val
+ results[total_key] = (results.get(total_key, 0) +
+ results[cpu_key])
diff --git a/client/tests/tracing_microbenchmark/tracing_microbenchmark.py b/client/tests/tracing_microbenchmark/tracing_microbenchmark.py
new file mode 100644
index 0000000..2d7af6d
--- /dev/null
+++ b/client/tests/tracing_microbenchmark/tracing_microbenchmark.py
@@ -0,0 +1,50 @@
+import os
+import re
+from autotest_lib.client.bin import test
+from autotest_lib.client.bin import utils
+
+import tracers
+import base_tracer
+
+class tracing_microbenchmark(test.test):
+ version = 1
+ preserve_srcdir = True
+
+ def setup(self):
+ os.chdir(self.srcdir)
+ utils.system('make CROSS_COMPILE=""')
+
+ def initialize(self, tracer='ftrace', calls=100000, **kwargs):
+ self.job.require_gcc()
+ tracer_class = getattr(tracers, tracer)
+ if not issubclass(tracer_class, base_tracer.Tracer):
+ raise TypeError
+ self.tracer = tracer_class()
+
+ getuid_microbench = os.path.join(self.srcdir, 'getuid_microbench')
+ self.cmd = '%s %d' % (getuid_microbench, calls)
+
+ def warmup(self, buffer_size_kb=8000, **kwargs):
+ self.tracer.warmup(buffer_size_kb)
+
+ def cleanup(self):
+ self.tracer.cleanup()
+
+ def run_once(self, **kwargs):
+ self.results = {}
+
+ self.tracer.start_tracing()
+ self.cmd_result = utils.run(self.cmd)
+ self.tracer.stop_tracing()
+
+ self.tracer.gather_stats(self.results)
+ self.tracer.reset_tracing()
+
+ def postprocess_iteration(self):
+ result_re = re.compile(r'(?P<calls>\d+) calls '
+ r'in (?P<time>\d+\.\d+) s '
+ '\((?P<ns_per_call>\d+\.\d+) ns/call\)')
+ match = result_re.match(self.cmd_result.stdout)
+ self.results.update(match.groupdict())
+
+ self.write_perf_keyval(self.results)
diff --git a/client/tools/boottool b/client/tools/boottool
index 4dafbab..728e4ef 100755
--- a/client/tools/boottool
+++ b/client/tools/boottool
@@ -791,6 +791,38 @@
return $arch;
}
+=head3 detect_os_vendor()
+
+Input:
+Output: string
+
+This function determines the OS vendor (linux distribution breed).
+
+Return values: "Red Hat", "Fedora", "SUSE", "Ubuntu", "Debian", or
+"Unknown" if none of the predefined patterns could be found on the
+issue file.
+
+=cut
+
+sub detect_os_vendor {
+ my $vendor = "";
+ my $issue_file = '/etc/issue';
+ if ( not system("egrep 'Red Hat' $issue_file") ){
+ $vendor = 'Red Hat';
+ } elsif ( not system("egrep 'Fedora' $issue_file") ){
+ $vendor = 'Fedora';
+ } elsif ( not system("egrep 'SUSE' $issue_file") ){
+ $vendor = 'SUSE';
+ } elsif ( not system("egrep 'Ubuntu' $issue_file") ){
+ $vendor = 'Ubuntu';
+ } elsif ( not system("egrep 'Debian' $issue_file") ){
+ $vendor = 'Debian';
+ } else {
+ $vendor = 'Unknown';
+ }
+ return $vendor;
+}
+
=head3 detect_bootloader(['device1', 'device2', ...])
Input: devices to detect against (optional)
@@ -1595,20 +1627,31 @@
sub boot_once {
my $self=shift;
my $entry_to_boot_once = shift;
+ my $detected_os_vendor = Linux::Bootloader::Detect::detect_os_vendor();
unless ( $entry_to_boot_once ) { print "No kernel\n"; return undef;}
$self->read();
my $default=$self->get_default();
- if ( $default == $self->_lookup($entry_to_boot_once)){
- warn "The default and once-boot kernels are the same. No action taken. \nSet default to something else, then re-try.\n";
- return undef;
- }
if ( $self->_get_bootloader_version() < 0.97 ){
warn "This function works for grub version 0.97 and up. No action taken. \nUpgrade, then re-try.\n";
return undef;
}
+ if ( $detected_os_vendor eq "Red Hat" or $detected_os_vendor eq "Fedora" ) {
+ # if not a number, do title lookup
+ if ( $entry_to_boot_once !~ /^\d+$/ ) {
+ $entry_to_boot_once = $self->_lookup($entry_to_boot_once);
+ return undef unless ( defined $entry_to_boot_once );
+ }
+
+ return `echo "savedefault --default=$entry_to_boot_once" --once | grub --batch`;
+ } else {
+ if ( $default == $self->_lookup($entry_to_boot_once)){
+ warn "The default and once-boot kernels are the same. No action taken. \nSet default to something else, then re-try.\n";
+ return undef;
+ }
+
$self->set_default('saved');
if ( ! -f '/boot/grub/default' ){
open FH, '>/boot/grub/default';
@@ -1635,7 +1678,7 @@
$self->update( 'update-kernel'=>"$entry_to_boot_once",'option'=>'','savedefault' => 'fallback' );
$self->update( 'update-kernel'=>"$default",'option'=>'', 'savedefault' => '' );
$self->write();
-
+ }
}
sub _get_bootloader_version {
diff --git a/conmux/drivers/reboot-apc b/conmux/drivers/reboot-apc
index e66dc39..7cdf296 100755
--- a/conmux/drivers/reboot-apc
+++ b/conmux/drivers/reboot-apc
@@ -24,6 +24,13 @@
return [shift list]
}
+proc enter_outlet {outlet} {
+ send "\r"
+ expect "> "
+ send $outlet
+ send "\r"
+}
+
set timeout 10
set user {apc}
set pass {apc}
@@ -68,23 +75,29 @@
expect {
"3- Outlet Control/Configuration" {
send "3\r"
+ enter_outlet $outlet
exp_continue
}
"2- Outlet Control" {
send "2\r"
+ enter_outlet $outlet
+ exp_continue
+ }
+ "2- Outlet Management" {
+ send "2\r"
exp_continue
}
}
-send "\r"
-expect "> "
-send $outlet
-send "\r"
# Here too, if we're just an outlet controller we don't get the option
# to modify configuration
expect {
"1- Control Outlet" {
send "1\r"
}
+ "1- Outlet Control/Configuration" {
+ send "1\r"
+ enter_outlet $outlet
+ }
}
expect "3- Immediate Reboot"
expect "> "
diff --git a/frontend/afe/rpc_utils.py b/frontend/afe/rpc_utils.py
index 452aa8f..7250d92 100644
--- a/frontend/afe/rpc_utils.py
+++ b/frontend/afe/rpc_utils.py
@@ -233,8 +233,10 @@
set(host.hostname for host in ok_hosts))
if failing_hosts:
raise model_logic.ValidationError(
- {'hosts' : 'Host(s) failed to meet job dependencies: ' +
- ', '.join(failing_hosts)})
+ {'hosts' : 'Host(s) failed to meet job dependencies (' +
+ (', '.join(job_dependencies)) + '): ' +
+ (', '.join(failing_hosts))})
+
def _execution_key_for(host_queue_entry):
diff --git a/frontend/migrations/064_add_jobs_and_tests_time_indices.py b/frontend/migrations/064_add_jobs_and_tests_time_indices.py
new file mode 100644
index 0000000..3508bba
--- /dev/null
+++ b/frontend/migrations/064_add_jobs_and_tests_time_indices.py
@@ -0,0 +1,10 @@
+# These indices speed up date-range queries often used in making dashboards.
+UP_SQL = """
+alter table tko_tests add index started_time (started_time);
+alter table afe_jobs add index created_on (created_on);
+"""
+
+DOWN_SQL = """
+drop index started_time on tko_tests;
+drop index created_on on afe_jobs;
+"""
diff --git a/global_config.ini b/global_config.ini
index aa13701..8a603b9 100644
--- a/global_config.ini
+++ b/global_config.ini
@@ -77,6 +77,7 @@
gc_stats_interval_mins: 360
# set nonzero to enable periodic reverification of all dead hosts
reverify_period_minutes: 0
+reverify_max_hosts_at_once: 0
drone_sets_enabled: False
# default_drone_set_name: This is required if drone sets are enabled.
default_drone_set_name:
diff --git a/scheduler/drone_manager.py b/scheduler/drone_manager.py
index 18361d4..75724f3 100644
--- a/scheduler/drone_manager.py
+++ b/scheduler/drone_manager.py
@@ -452,12 +452,14 @@
while self._drone_queue:
drone = heapq.heappop(self._drone_queue).drone
checked_drones.append(drone)
+ logging.info('Checking drone %s', drone.hostname)
if not drone.usable_by(username):
continue
drone_allowed = (drone_hostnames_allowed is None
or drone.hostname in drone_hostnames_allowed)
if not drone_allowed:
+ logging.debug('Drone %s not allowed: ', drone.hostname)
continue
usable_drones.append(drone)
@@ -465,6 +467,9 @@
if drone.active_processes + num_processes <= drone.max_processes:
drone_to_use = drone
break
+ logging.info('Drone %s has %d active + %s requested > %s max',
+ drone.hostname, drone.active_processes, num_processes,
+ drone.max_processes)
if not drone_to_use and usable_drones:
drone_summary = ','.join('%s %s/%s' % (drone.hostname,
diff --git a/scheduler/drone_utility.py b/scheduler/drone_utility.py
index 55b0a6b..c84a033 100755
--- a/scheduler/drone_utility.py
+++ b/scheduler/drone_utility.py
@@ -103,13 +103,15 @@
for line_components in split_lines)
- def _refresh_processes(self, command_name, open=open):
+ def _refresh_processes(self, command_name, open=open,
+ site_check_parse=None):
# The open argument is used for test injection.
check_mark = global_config.global_config.get_config_value(
'SCHEDULER', 'check_processes_for_dark_mark', bool, False)
processes = []
for info in self._get_process_info():
- if info['comm'] == command_name:
+ is_parse = (site_check_parse and site_check_parse(info))
+ if info['comm'] == command_name or is_parse:
if (check_mark and not
self._check_pid_for_dark_mark(info['pid'], open=open)):
self._warn('%(comm)s process pid %(pid)s has no '
@@ -148,10 +150,14 @@
* pidfiles_second_read: same info as pidfiles, but gathered after the
processes are scanned.
"""
+ site_check_parse = utils.import_site_function(
+ __file__, 'autotest_lib.scheduler.site_drone_utility',
+ 'check_parse', lambda x: False)
results = {
'pidfiles' : self._read_pidfiles(pidfile_paths),
'autoserv_processes' : self._refresh_processes('autoserv'),
- 'parse_processes' : self._refresh_processes('parse'),
+ 'parse_processes' : self._refresh_processes(
+ 'parse', site_check_parse=site_check_parse),
'pidfiles_second_read' : self._read_pidfiles(pidfile_paths),
}
return results
diff --git a/scheduler/monitor_db.py b/scheduler/monitor_db.py
index 2d878bb..c5abed3 100755
--- a/scheduler/monitor_db.py
+++ b/scheduler/monitor_db.py
@@ -53,10 +53,16 @@
_db = None
_shutdown = False
_autoserv_path = os.path.join(drones.AUTOTEST_INSTALL_DIR, 'server', 'autoserv')
-_parser_path = os.path.join(drones.AUTOTEST_INSTALL_DIR, 'tko', 'parse')
_testing_mode = False
_drone_manager = None
+def _parser_path_default(install_dir):
+ return os.path.join(install_dir, 'tko', 'parse')
+_parser_path_func = utils.import_site_function(
+ __file__, 'autotest_lib.scheduler.site_monitor_db',
+ 'parser_path', _parser_path_default)
+_parser_path = _parser_path_func(drones.AUTOTEST_INSTALL_DIR)
+
def _get_pidfile_timeout_secs():
"""@returns How long to wait for autoserv to write pidfile."""
diff --git a/scheduler/monitor_db_cleanup.py b/scheduler/monitor_db_cleanup.py
index 60b02f7..d8bab11 100644
--- a/scheduler/monitor_db_cleanup.py
+++ b/scheduler/monitor_db_cleanup.py
@@ -3,7 +3,7 @@
"""
-import datetime, time, logging
+import datetime, time, logging, random
from autotest_lib.database import database_connection
from autotest_lib.frontend.afe import models
from autotest_lib.scheduler import email_manager, scheduler_config
@@ -162,6 +162,14 @@
return (self._last_reverify_time + reverify_period_sec) <= time.time()
+ def _choose_subset_of_hosts_to_reverify(self, hosts):
+ """Given hosts needing verification, return a subset to reverify."""
+ max_at_once = scheduler_config.config.reverify_max_hosts_at_once
+ if (max_at_once > 0 and len(hosts) > max_at_once):
+ return random.sample(hosts, max_at_once)
+ return sorted(hosts)
+
+
def _reverify_dead_hosts(self):
if not self._should_reverify_hosts_now():
return
@@ -177,8 +185,11 @@
if not hosts:
return
- logging.info('Reverifying dead hosts %s'
- % ', '.join(host.hostname for host in hosts))
+ hosts = list(hosts)
+ total_hosts = len(hosts)
+ hosts = self._choose_subset_of_hosts_to_reverify(hosts)
+ logging.info('Reverifying dead hosts (%d of %d) %s', len(hosts),
+ total_hosts, ', '.join(host.hostname for host in hosts))
for host in hosts:
models.SpecialTask.schedule_special_task(
host=host, task=models.SpecialTask.Task.VERIFY)
diff --git a/scheduler/monitor_db_cleanup_test.py b/scheduler/monitor_db_cleanup_test.py
index 0757313..6bf35c4 100755
--- a/scheduler/monitor_db_cleanup_test.py
+++ b/scheduler/monitor_db_cleanup_test.py
@@ -5,7 +5,7 @@
from autotest_lib.frontend import setup_django_environment
from autotest_lib.database import database_connection
from autotest_lib.frontend.afe import frontend_test_utils, models
-from autotest_lib.scheduler import monitor_db_cleanup
+from autotest_lib.scheduler import monitor_db_cleanup, scheduler_config
from autotest_lib.client.common_lib import host_protections
class UserCleanupTest(unittest.TestCase, frontend_test_utils.FrontendTestMixin):
@@ -23,6 +23,9 @@
def test_reverify_dead_hosts(self):
+ # unlimited reverifies
+ self.god.stub_with(scheduler_config.config,
+ 'reverify_max_hosts_at_once', 0)
for i in (0, 1, 2):
self.hosts[i].status = models.Host.Status.REPAIR_FAILED
self.hosts[i].save()
@@ -43,5 +46,34 @@
self.assertEquals(tasks[0].task, models.SpecialTask.Task.VERIFY)
+ def test_reverify_dead_hosts_limits(self):
+ # limit the number of reverifies
+ self.assertTrue(hasattr(scheduler_config.config,
+ 'reverify_max_hosts_at_once'))
+ self.god.stub_with(scheduler_config.config,
+ 'reverify_max_hosts_at_once', 2)
+ for i in (0, 1, 2, 3, 4, 5):
+ self.hosts[i].status = models.Host.Status.REPAIR_FAILED
+ self.hosts[i].save()
+
+ self.hosts[1].locked = True
+ self.hosts[1].save()
+
+ self.hosts[2].protection = host_protections.Protection.DO_NOT_VERIFY
+ self.hosts[2].save()
+
+ self.god.stub_with(self.cleanup, '_should_reverify_hosts_now',
+ lambda : True)
+ self.cleanup._reverify_dead_hosts()
+
+ tasks = models.SpecialTask.objects.all()
+ # four hosts need reverifying but our max limit was set to 2
+ self.assertEquals(len(tasks), 2)
+ self.assertTrue(tasks[0].host.id in (1, 4, 5, 6))
+ self.assertTrue(tasks[1].host.id in (1, 4, 5, 6))
+ self.assertEquals(tasks[0].task, models.SpecialTask.Task.VERIFY)
+ self.assertEquals(tasks[1].task, models.SpecialTask.Task.VERIFY)
+
+
if __name__ == '__main__':
unittest.main()
diff --git a/scheduler/scheduler_config.py b/scheduler/scheduler_config.py
index 23fb0ea..fdc1001 100644
--- a/scheduler/scheduler_config.py
+++ b/scheduler/scheduler_config.py
@@ -16,6 +16,7 @@
'secs_to_wait_for_atomic_group_hosts':
'secs_to_wait_for_atomic_group_hosts',
'reverify_period_minutes': 'reverify_period_minutes',
+ 'reverify_max_hosts_at_once': 'reverify_max_hosts_at_once',
}
diff --git a/server/frontend.py b/server/frontend.py
index 07c71b1..353ccab 100644
--- a/server/frontend.py
+++ b/server/frontend.py
@@ -263,18 +263,27 @@
def create_job_by_test(self, tests, kernel=None, use_container=False,
- **dargs):
+ kernel_cmdline=None, **dargs):
"""
Given a test name, fetch the appropriate control file from the server
and submit it.
+ @param kernel: A comma separated list of kernel versions to boot.
+ @param kernel_cmdline: The command line used to boot all kernels listed
+ in the kernel parameter.
+
Returns a list of job objects
"""
assert ('hosts' in dargs or
'atomic_group_name' in dargs and 'synch_count' in dargs)
if kernel:
kernel_list = re.split('[\s,]+', kernel.strip())
- kernel_info = [{'version': version} for version in kernel_list]
+ kernel_info = []
+ for version in kernel_list:
+ kernel_dict = {'version': version}
+ if kernel_cmdline is not None:
+ kernel_dict['cmdline'] = kernel_cmdline
+ kernel_info.append(kernel_dict)
else:
kernel_info = None
control_file = self.generate_control_file(
@@ -305,7 +314,7 @@
def run_test_suites(self, pairings, kernel, kernel_label=None,
priority='Medium', wait=True, poll_interval=10,
email_from=None, email_to=None, timeout=168,
- max_runtime_hrs=168):
+ max_runtime_hrs=168, kernel_cmdline=None):
"""
Run a list of test suites on a particular kernel.
@@ -317,6 +326,7 @@
'<kernel-version> : <config> : <date>'
If any pairing object has its job_label attribute set it
will override this value for that particular job.
+ @param kernel_cmdline: The command line to boot the kernel(s) with.
@param wait: boolean - Wait for the results to come back?
@param poll_interval: Interval between polling for job results (in mins)
@param email_from: Send notification email upon completion from here.
@@ -327,6 +337,7 @@
try:
new_job = self.invoke_test(pairing, kernel, kernel_label,
priority, timeout=timeout,
+ kernel_cmdline=kernel_cmdline,
max_runtime_hrs=max_runtime_hrs)
if not new_job:
continue
@@ -454,7 +465,7 @@
def invoke_test(self, pairing, kernel, kernel_label, priority='Medium',
- **dargs):
+ kernel_cmdline=None, **dargs):
"""
Given a pairing of a control file to a machine label, find all machines
with that label, and submit that control file to them.
@@ -486,6 +497,7 @@
tests=[pairing.control_file],
priority=priority,
kernel=kernel,
+ kernel_cmdline=kernel_cmdline,
use_container=pairing.container,
**dargs)
if new_job:
diff --git a/server/git_kernel.py b/server/git_kernel.py
index 9bd7ae6..0581ca0 100644
--- a/server/git_kernel.py
+++ b/server/git_kernel.py
@@ -16,7 +16,7 @@
It is used to pull down a local copy of a git repo, check if the local repo
is up-to-date, if not update and then build the kernel from the git repo.
"""
- def __init__(self, repodir, giturl, weburl):
+ def __init__(self, repodir, giturl, weburl=None):
super(GitKernel, self).__init__(repodir, giturl, weburl)
self._patches = []
self._config = None
@@ -96,13 +96,17 @@
self._build = os.path.join(host.get_tmp_dir(), "build")
logging.warning('Builddir %s is not persistent (it will be erased '
'in future jobs)', self._build)
+ else:
+ self._build = builddir
# push source to host for install
logging.info('Pushing %s to host', self.source_material)
host.send_file(self.source_material, self._build)
+ remote_source_material= os.path.join(self._build,
+ os.path.basename(self.source_material))
# use a source_kernel to configure, patch, build and install.
- sk = source_kernel.SourceKernel(self._build)
+ sk = source_kernel.SourceKernel(remote_source_material)
if build:
# apply patches
diff --git a/server/hosts/abstract_ssh.py b/server/hosts/abstract_ssh.py
index ef179f5..3d8d9e9 100644
--- a/server/hosts/abstract_ssh.py
+++ b/server/hosts/abstract_ssh.py
@@ -69,6 +69,7 @@
if not self._use_rsync:
logging.warn("rsync not available on remote host %s -- disabled",
self.hostname)
+ return self._use_rsync
def _check_rsync(self):