This is the result of a batch reindent.py across our tree.
As Martin pointed out, we ought to be more careful and
create a pre-svn commit script to avoid inserting trash
in the tree, meanwhile, this is a good start to cleanup
things

Signed-off-by: Lucas Meneghel Rodrigues <lmr@redhat.com>


git-svn-id: http://test.kernel.org/svn/autotest/trunk@3487 592f7852-d20e-0410-864c-8624ca9c26a4
diff --git a/client/tests/cerberus/cerberus.py b/client/tests/cerberus/cerberus.py
index b1db5d2..828bcea 100644
--- a/client/tests/cerberus/cerberus.py
+++ b/client/tests/cerberus/cerberus.py
@@ -5,9 +5,9 @@
 
 class cerberus(test.test):
     """
-    This autotest module runs CTCS (Cerberus Test Control System). This test 
-    suite was developed for the now extinct VA Linux's manufacturing system 
-    it has several hardware and software stress tests that can be run in 
+    This autotest module runs CTCS (Cerberus Test Control System). This test
+    suite was developed for the now extinct VA Linux's manufacturing system
+    it has several hardware and software stress tests that can be run in
     parallel. It does have a control file system that allows testers to specify
     the sorts of tests that they want to see executed. It's an excelent stress
     test for hardware and kernel.
@@ -24,7 +24,7 @@
         self.nfail = 0
 
 
-    def setup(self, tarball='ctcs-1.3.1pre1.tar.bz2', length = '4h', 
+    def setup(self, tarball='ctcs-1.3.1pre1.tar.bz2', length = '4h',
               tcf_contents=None):
         """
         Builds the test suite, and sets up the control file that is going to
@@ -39,7 +39,7 @@
         utils.extract_tarball_to_dir(cerberus_tarball, self.srcdir)
 
         os.chdir(self.srcdir)
-        # Apply patch to fix build problems on newer distros (absence of 
+        # Apply patch to fix build problems on newer distros (absence of
         # asm/page.h include.
         utils.system('patch -p1 < ../fix-ctcs-build.patch')
         utils.system('make')
@@ -80,7 +80,7 @@
         # After we are done with this iterations, we move the log files to
         # the results dir
         log_base_path = os.path.join(self.srcdir, 'log')
-        log_dir = glob.glob(os.path.join(log_base_path, 
+        log_dir = glob.glob(os.path.join(log_base_path,
                                          'autotest.tcf.log.*'))[0]
         logging.debug('Copying %s log directory to results dir', log_dir)
         dst = os.path.join(self.resultsdir, os.path.basename(log_dir))
diff --git a/client/tests/dma_memtest/dma_memtest.py b/client/tests/dma_memtest/dma_memtest.py
index c35b545..189bde4 100644
--- a/client/tests/dma_memtest/dma_memtest.py
+++ b/client/tests/dma_memtest/dma_memtest.py
@@ -5,7 +5,7 @@
 
 class dma_memtest(test.test):
     """
-    A test for the memory subsystem against heavy IO and DMA operations, 
+    A test for the memory subsystem against heavy IO and DMA operations,
     implemented based on the work of Doug Leford
     (http://people.redhat.com/dledford/memtest.shtml)
 
@@ -23,8 +23,8 @@
         Downloads a copy of the linux kernel, calculate an estimated size of
         the uncompressed tarball, use this value to calculate the number of
         copies of the linux kernel that will be uncompressed.
-        
-            @param tarball_base: Name of the kernel tarball location that will 
+
+            @param tarball_base: Name of the kernel tarball location that will
             be looked up on the kernel.org mirrors.
             @param parallel: If we are going to uncompress the copies of the
             kernel in parallel or not
@@ -37,7 +37,7 @@
         tarball_url = os.path.join(kernel_repo, tarball_base)
         tarball_md5 = '296a6d150d260144639c3664d127d174'
         logging.info('Downloading linux kernel tarball')
-        self.tarball = utils.unmap_url_cache(self.cachedir, tarball_url, 
+        self.tarball = utils.unmap_url_cache(self.cachedir, tarball_url,
                                              tarball_md5)
         size_tarball = os.path.getsize(self.tarball) / 1024 / 1024
         # Estimation of the tarball size after uncompression
@@ -119,7 +119,7 @@
 
         logging.info('Comparing test copies with base copy')
         for j in range(self.sim_cps):
-            tmp_dir = 'linux.%s/%s' % (j, 
+            tmp_dir = 'linux.%s/%s' % (j,
                             os.path.basename(self.tarball).strip('.tar.bz2'))
             if self.parallel:
                 diff_cmd = ['diff', '-U3', '-rN', 'linux.orig', tmp_dir]
diff --git a/client/tests/interbench/interbench.py b/client/tests/interbench/interbench.py
index c36d303..e988882 100644
--- a/client/tests/interbench/interbench.py
+++ b/client/tests/interbench/interbench.py
@@ -20,5 +20,5 @@
     def run_once(self, args = ''):
         os.chdir(self.tmpdir)
         args += " -c"
-        utils.system("%s/interbench -m 'run #%s' %s" % (self.srcdir, 
+        utils.system("%s/interbench -m 'run #%s' %s" % (self.srcdir,
                                                         self.iteration, args))
diff --git a/client/tests/iozone/iozone.py b/client/tests/iozone/iozone.py
index ea0a9a6..02f3413 100644
--- a/client/tests/iozone/iozone.py
+++ b/client/tests/iozone/iozone.py
@@ -120,4 +120,3 @@
                             keylist[key_name] = result
 
         self.write_perf_keyval(keylist)
-
diff --git a/client/tests/ipv6connect/ipv6connect.py b/client/tests/ipv6connect/ipv6connect.py
index 1d9a63a..5260ba2 100644
--- a/client/tests/ipv6connect/ipv6connect.py
+++ b/client/tests/ipv6connect/ipv6connect.py
@@ -32,6 +32,6 @@
 
 
     def postprocess(self):
-       pattern = re.compile(r'\nTotal time = ([0-9.]+)s\n')
-       for duration in pattern.findall('\n'.join(self.results)):
-           self.write_perf_keyval({'time': duration})
+        pattern = re.compile(r'\nTotal time = ([0-9.]+)s\n')
+        for duration in pattern.findall('\n'.join(self.results)):
+            self.write_perf_keyval({'time': duration})
diff --git a/client/tests/kvm/kvm_install.py b/client/tests/kvm/kvm_install.py
index 7118357..154b83c 100755
--- a/client/tests/kvm/kvm_install.py
+++ b/client/tests/kvm/kvm_install.py
@@ -10,7 +10,7 @@
     sub directory of module_dir. Function will walk through module_dir until
     it finds the modules.
 
-    @param module_dir: Directory where the KVM modules are located. 
+    @param module_dir: Directory where the KVM modules are located.
     """
     vendor = "intel"
     if os.system("grep vmx /proc/cpuinfo 1>/dev/null") != 0:
@@ -112,7 +112,7 @@
         elif load_modules == 'no':
             self.load_modules = False
 
-        if install_mode == 'localsrc': 
+        if install_mode == 'localsrc':
             if not srcdir:
                 raise error.TestError("Install from source directory specified"
                                       "but no source directory provided on the"
@@ -144,7 +144,7 @@
             snapshot_date = params.get("snapshot_date")
             if not snapshot_date:
                 # Take yesterday's snapshot
-                d = (datetime.date.today() - 
+                d = (datetime.date.today() -
                      datetime.timedelta(1)).strftime("%Y%m%d")
             else:
                 d = snapshot_date
diff --git a/client/tests/kvm/kvm_vm.py b/client/tests/kvm/kvm_vm.py
index eba9b84..2ea6681 100644
--- a/client/tests/kvm/kvm_vm.py
+++ b/client/tests/kvm/kvm_vm.py
@@ -751,7 +751,7 @@
             else:
                 self.send_key(char)
 
-    
+
     def get_uuid(self):
         """
         Catch UUID of the VM.
diff --git a/client/tests/lmbench/lmbench.py b/client/tests/lmbench/lmbench.py
index c375757..6bafc84 100755
--- a/client/tests/lmbench/lmbench.py
+++ b/client/tests/lmbench/lmbench.py
@@ -21,7 +21,7 @@
         utils.system('make')
 
 
-    def run_once(self, mem='', fastmem='NO', slowfs='NO', disks='', 
+    def run_once(self, mem='', fastmem='NO', slowfs='NO', disks='',
                 disks_desc='', mhz='', remote='', enough='5000',
                 sync_max='1', fsdir=None, file=None):
         if not fsdir:
diff --git a/client/tests/lsb_dtk/lsb_dtk.py b/client/tests/lsb_dtk/lsb_dtk.py
index 19f2bde..7d3abed 100644
--- a/client/tests/lsb_dtk/lsb_dtk.py
+++ b/client/tests/lsb_dtk/lsb_dtk.py
@@ -39,12 +39,12 @@
             # First, we download the LSB DTK manager package, worry about
             # installing it later
             dtk_manager_arch = self.config.get('dtk-manager', 'arch-%s' % self.arch)
-            dtk_manager_url = self.config.get('dtk-manager', 
+            dtk_manager_url = self.config.get('dtk-manager',
                                          'tarball_url') % dtk_manager_arch
             if not dtk_manager_url:
                 raise error.TestError('Could not get DTK manager URL from'
                                       ' configuration file')
-    
+
             dtk_md5 = self.config.get('dtk-manager', 'md5-%s' % self.arch)
             if dtk_md5:
                 logging.info('Caching LSB DTK manager RPM')
@@ -54,7 +54,7 @@
             else:
                 raise error.TestError('Could not find DTK manager package md5,'
                                       ' cannot cache DTK manager tarball')
-    
+
             # Get LSB tarball, cache it and uncompress under autotest srcdir
             if self.config.get('lsb', 'override_default_url') == 'no':
                 lsb_url = self.config.get('lsb', 'tarball_url') % self.arch
@@ -71,9 +71,9 @@
             else:
                 raise error.TestError('Could not find LSB package md5, cannot'
                                       ' cache LSB tarball')
-    
+
             utils.extract_tarball_to_dir(lsb_pkg, self.srcdir)
-    
+
             # Lets load a file that contains the list of RPMs
             os.chdir(self.srcdir)
             if not os.path.isfile('inst-config'):
@@ -92,7 +92,7 @@
                 except:
                     # If we don't get a match, no problem
                     pass
-    
+
             # Lets figure out the host distro
             distro_pkg_support = package.os_support()
             if os.path.isfile('/etc/debian_version') and \
@@ -111,7 +111,7 @@
             else:
                 logging.error('OS does not seem to be red hat or debian based')
                 raise EnvironmentError('Cannot handle LSB package installation')
-    
+
             # According to the host distro detection, we can install the packages
             # using the list previously assembled
             if distro_type == 'redhat-based':
@@ -130,7 +130,7 @@
                 for lsb_rpm in lsb_pkg_list:
                     lsb_dpkg = package.convert(lsb_rpm, 'dpkg')
                     package.install(lsb_dpkg, nodeps=True)
-    
+
             self.packages_installed = True
 
 
@@ -179,4 +179,3 @@
 
         logging.info('Executing LSB main test script')
         utils.system(cmd)
-
diff --git a/client/tests/memory_api/memory_api.py b/client/tests/memory_api/memory_api.py
index b1b5d24..71ad542 100755
--- a/client/tests/memory_api/memory_api.py
+++ b/client/tests/memory_api/memory_api.py
@@ -3,70 +3,70 @@
 from autotest_lib.client.common_lib import error
 
 class memory_api(test.test):
-   version = 1
+    version = 1
 
-   def setup(self):
-      utils.system("gcc %s -o %s" %
-                    (os.path.join(self.bindir, "memory_api.c"),
-                     os.path.join(self.tmpdir, "memory_api")))
-      utils.system("gcc %s -o %s" %
-                    (os.path.join(self.bindir, "mremaps.c"),
-                     os.path.join(self.tmpdir, "mremaps")))
+    def setup(self):
+        utils.system("gcc %s -o %s" %
+                      (os.path.join(self.bindir, "memory_api.c"),
+                       os.path.join(self.tmpdir, "memory_api")))
+        utils.system("gcc %s -o %s" %
+                      (os.path.join(self.bindir, "mremaps.c"),
+                       os.path.join(self.tmpdir, "mremaps")))
 
 
-   def initialize(self):
-      self.job.require_gcc()
+    def initialize(self):
+        self.job.require_gcc()
 
 
-   def run_once(self, memsize = "1000000000", args=''):
+    def run_once(self, memsize = "1000000000", args=''):
 
-      vma_re = re.compile("([0-9,a-f]+)-([0-9,a-f]+)")
-      memory_re = re.compile("(\d+) bytes @(0x[0-9,a-f]+)")
+        vma_re = re.compile("([0-9,a-f]+)-([0-9,a-f]+)")
+        memory_re = re.compile("(\d+) bytes @(0x[0-9,a-f]+)")
 
-      vma_max_shift = 0
-      if os.access("/proc/sys/vm/vma_max_shift", os.R_OK):
-          vma_max_shift = int(
-                    open("/proc/sys/vm/vma_max_shift").read().rstrip())
-      p1 = subprocess.Popen('%s/memory_api ' % self.tmpdir  + memsize,
-                            shell=True, stdin=subprocess.PIPE,
-                            stdout=subprocess.PIPE)
-      while p1.poll() is None:
-         output = p1.stdout.readline().rstrip()
-         m = memory_re.search(output)
-         mem_start = 0
-         mem_len = 0
-         if m:
-            mem_start = int(m.group(2), 16)
-            mem_len = int(m.group(1))
-         else:
-            continue
-         map_output = open("/proc/%s/maps_backing" % p1.pid).readlines()
-         vma_count = 0
-         vma_start = 0
-         vma_len = 0
-         expected_vma_count = 1
-         for line in map_output:
-            m = vma_re.search(line)
+        vma_max_shift = 0
+        if os.access("/proc/sys/vm/vma_max_shift", os.R_OK):
+            vma_max_shift = int(
+                      open("/proc/sys/vm/vma_max_shift").read().rstrip())
+        p1 = subprocess.Popen('%s/memory_api ' % self.tmpdir  + memsize,
+                              shell=True, stdin=subprocess.PIPE,
+                              stdout=subprocess.PIPE)
+        while p1.poll() is None:
+            output = p1.stdout.readline().rstrip()
+            m = memory_re.search(output)
+            mem_start = 0
+            mem_len = 0
             if m:
-               vma_start = int("0x%s" % m.group(1),16)
-               vma_end = int("0x%s" % m.group(2),16)
-               if ((vma_start >= mem_start) and
-                   (vma_start < (mem_start + mem_len))):
-                  vma_count+=1
+                mem_start = int(m.group(2), 16)
+                mem_len = int(m.group(1))
+            else:
+                continue
+            map_output = open("/proc/%s/maps_backing" % p1.pid).readlines()
+            vma_count = 0
+            vma_start = 0
+            vma_len = 0
+            expected_vma_count = 1
+            for line in map_output:
+                m = vma_re.search(line)
+                if m:
+                    vma_start = int("0x%s" % m.group(1),16)
+                    vma_end = int("0x%s" % m.group(2),16)
+                    if ((vma_start >= mem_start) and
+                        (vma_start < (mem_start + mem_len))):
+                        vma_count+=1
 
-         if (('file' not in output) and (vma_max_shift != 0)):
-            expected_vma_count = mem_len >> vma_max_shift
-            if (mem_len % (1 << vma_max_shift)):
-               expected_vma_count += 1
-         if expected_vma_count != vma_count:
-            raise error.TestFail("VmaCountMismatch")
-         logging.info("%s %s %d %d", hex(mem_start), hex(mem_len), vma_count,
-                      expected_vma_count)
-         if p1.poll() is None:
-            p1.stdin.write("\n")
-            p1.stdin.flush()
+            if (('file' not in output) and (vma_max_shift != 0)):
+                expected_vma_count = mem_len >> vma_max_shift
+                if (mem_len % (1 << vma_max_shift)):
+                    expected_vma_count += 1
+            if expected_vma_count != vma_count:
+                raise error.TestFail("VmaCountMismatch")
+            logging.info("%s %s %d %d", hex(mem_start), hex(mem_len), vma_count,
+                         expected_vma_count)
+            if p1.poll() is None:
+                p1.stdin.write("\n")
+                p1.stdin.flush()
 
-      if p1.poll() != 0:
-         raise error.TestFail("Unexpected application abort")
+        if p1.poll() != 0:
+            raise error.TestFail("Unexpected application abort")
 
-      utils.system('%s/mremaps ' % self.tmpdir  + '100000000')
+        utils.system('%s/mremaps ' % self.tmpdir  + '100000000')
diff --git a/client/tests/parallel_dd/parallel_dd.py b/client/tests/parallel_dd/parallel_dd.py
index ecdb752..02774f7 100755
--- a/client/tests/parallel_dd/parallel_dd.py
+++ b/client/tests/parallel_dd/parallel_dd.py
@@ -123,7 +123,7 @@
         start = time.time()
         self.fs_read()
         self.fs_read_rate = self.megabytes / (time.time() - start)
-        
+
         self.write_perf_keyval({
             'raw_write' : self.raw_write_rate,
             'raw_read'  : self.raw_read_rate,
diff --git a/client/tests/perfmon/perfmon.py b/client/tests/perfmon/perfmon.py
index 7ccd343..ec1145f 100755
--- a/client/tests/perfmon/perfmon.py
+++ b/client/tests/perfmon/perfmon.py
@@ -22,4 +22,4 @@
         cmd = self.srcdir + '/tests/pfm_tests' + args
         # self.results.append(utils.system_output(cmd, retain_output=True))
         if 'FAIL' in utils.system_output(cmd, retain_output=True):
-                raise error.TestError('some perfmon tests failed')
+            raise error.TestError('some perfmon tests failed')
diff --git a/client/tests/tsc/tsc.py b/client/tests/tsc/tsc.py
index 7077784..5d69edd 100755
--- a/client/tests/tsc/tsc.py
+++ b/client/tests/tsc/tsc.py
@@ -26,7 +26,7 @@
                               (result.exit_status, result.command))
             ## Analyze result.stdout to see if it is possible to form qualified
             ## reason of failure and to raise an appropriate exception.
-            ## For this test we qualify the reason of failure if the 
+            ## For this test we qualify the reason of failure if the
             ## following conditions are met:
             ## (i) result.exit_status = 1
             ## (ii) result.stdout ends with 'FAIL'
@@ -57,5 +57,3 @@
             ## If we are here, we failed to qualify the reason of test failre
             ## Consider it as a test error
             raise error.TestError(default_reason)
-
-