Rename the tables in the databases, by prefixing the app name. This is
in preparation for merging the two databases and the two Django
projects into one.

Note that this renames *all* standard Autotest DB tables in both the
autotest_web and tko databases.  If you have scripts written directly
against these databases, *they will break*.  If your scripts access
the RPC interfaces, they should continue to work.

Another patch will be along within the next few weeks to actually move
the TKO tables into the autotest_web database.

From: James Ren <jamesren@google.com>
Signed-off-by: Steve Howard <showard@google.com>

Rename the tables in the databases, by prefixing the app name. This is
in preparation for merging the two databases and the two Django projects
into one.

Note that this renames *all* standard Autotest DB tables in both the autotest_web and tko databases.  If you have scripts written directly against these databases, *they will break*.  If your scripts access the RPC interfaces, they should continue to work.

From: James Ren <jamesren@google.com>
Signed-off-by: Steve Howard <showard@google.com>


git-svn-id: http://test.kernel.org/svn/autotest/trunk@4040 592f7852-d20e-0410-864c-8624ca9c26a4
diff --git a/cli/query_results b/cli/query_results
index aa2c3f2..d872778 100755
--- a/cli/query_results
+++ b/cli/query_results
@@ -15,7 +15,7 @@
 parser = optparse.OptionParser()
 parser.add_option('-C', '--columns', action='store', dest='columns',
             default='*', help="""\
-By default or when using the -c flag: 
+By default or when using the -c flag:
 kernel hostname test label machine_group reason tag user status
 
 OR
@@ -50,11 +50,11 @@
 elif options.old_condition:
     where = query_lib.parse_scrub_and_gen_condition(
                 options.old_condition, frontend.test_view_field_dict)
-    view = 'test_view'
+    view = 'tko_test_view'
     tag = 'tag'
 elif options.new_condition:
     where = options.new_condition.replace('%', '%%')
-    view = 'test_view_2'
+    view = 'tko_test_view_2'
     tag = 'job_tag'
 else:
     parser.error('You must specify at least one condition.')
@@ -74,9 +74,9 @@
 if options.host_label:
     database = database_connection.DatabaseConnection("AUTOTEST_WEB")
     database.connect()
-    sql = ("SELECT hostname FROM labels JOIN hosts_labels "
-           "ON labels.id=hosts_labels.label_id JOIN hosts "
-           "ON hosts_labels.host_id=hosts.id WHERE name=%s")
+    sql = ("SELECT hostname FROM afe_labels JOIN afe_hosts_labels "
+           "ON afe_labels.id=afe_hosts_labels.label_id JOIN afe_hosts "
+           "ON afe_hosts_labels.host_id=afe_hosts.id WHERE name=%s")
     results = database.execute(sql, options.host_label)
     hosts = [row[0] for row in results]
     where += " AND hostname IN ('" + "','".join(hosts) + "')"
diff --git a/database/db_utils.py b/database/db_utils.py
new file mode 100644
index 0000000..9b6dfb3
--- /dev/null
+++ b/database/db_utils.py
@@ -0,0 +1,59 @@
+import migrate
+
+
+def drop_views(manager, views):
+    """
+    Drops the specified views from the database
+
+    If a specified view does not exist in the database, this method fails
+    without modification
+
+    @param manager the migration manager
+    @param views the views to drop
+    """
+    _check_exists(manager, views, 'VIEW')
+    for view in views:
+        manager.execute('DROP VIEW `%s`' % view)
+
+
+def rename(manager, mapping):
+    """
+    Renames specified tables in the database
+
+    Use this to rename a specified set of tables in a database. If a source in
+    the mapping does not exist, this method fails without modification.
+
+    @param manager the migration manager
+    @param mapping a dictionary of orig_name => new_name. Any table not matching
+                   an entry in this dictionary will not be renamed
+    """
+    _check_exists(manager, (table for table, _ in mapping.iteritems()), 'TABLE')
+    for orig_name, new_name in mapping.iteritems():
+        manager.execute('RENAME TABLE `%s` TO `%s`' % (orig_name, new_name))
+
+
+def _check_exists(manager, names, type):
+    """
+    Checks if the tables or views exists.
+
+    Raise an Exception if any of the names do not exist
+
+    @param manager the migration manager
+    @param names the table/view names
+    @param type one of 'TABLE' or 'VIEW'
+    """
+    if type == 'TABLE':
+        info_table = 'TABLES'
+    elif type == 'VIEW':
+        info_table = 'VIEWS'
+    else:
+        raise Exception("type parameter must be either 'TABLE' or 'VIEW'")
+
+    query = ('SELECT table_name FROM information_schema.%s '
+             'WHERE table_schema = %%s' % info_table)
+    rows = manager.execute(query, manager.get_db_name())
+    existing_names = [row[0] for row in rows]
+
+    for name in names:
+        if name not in existing_names:
+            raise Exception('%s missing from database, stopping' % name)
diff --git a/database/db_utils_unittest.py b/database/db_utils_unittest.py
new file mode 100644
index 0000000..f0c622c
--- /dev/null
+++ b/database/db_utils_unittest.py
@@ -0,0 +1,81 @@
+#!/usr/bin/python
+
+import unittest
+import common
+from autotest_lib.client.common_lib.test_utils import mock
+from autotest_lib.database import migrate, db_utils
+
+class UtilsTest(unittest.TestCase):
+
+    EXISTS_QUERY_BASE = ('SELECT table_name FROM information_schema.%s '
+                         'WHERE table_schema = %%s')
+    DB_NAME = 'test_db'
+
+
+    def setUp(self):
+        self.god = mock.mock_god()
+        self.manager = self.god.create_mock_class(migrate.MigrationManager,
+                                                  'manager')
+
+        self.god.stub_function(self.manager, 'execute')
+        self.god.stub_function(self.manager, 'get_db_name')
+
+
+    def tearDown(self):
+        self.god.unstub_all()
+
+
+    def test_check_exists(self):
+        views = ('view1', 'view2')
+        def _call_check_exists():
+            db_utils._check_exists(self.manager, views, 'VIEW')
+
+        self._setup_exists_expects(views, 'VIEWS')
+        _call_check_exists()
+        self.god.check_playback()
+
+        self._setup_exists_expects(('view1',), 'VIEWS')
+        self.assertRaises(Exception, _call_check_exists)
+        self.god.check_playback()
+
+
+    def test_drop_views(self):
+        views = ('view1', 'view2')
+        self._setup_exists_expects(views, 'VIEWS')
+
+        for view in views:
+            self.manager.execute.expect_call('DROP VIEW `%s`' % view)
+
+        db_utils.drop_views(self.manager, views)
+        self.god.check_playback()
+
+
+    def test_rename(self):
+        mapping = {
+                'table1' : 'new_table1',
+                'table2' : 'new_table2',
+                }
+        self._setup_exists_expects((name for name, _ in mapping.iteritems()),
+                                   'TABLES')
+
+        for name, new_name in mapping.iteritems():
+            self.manager.execute.expect_call(
+                    'RENAME TABLE `%s` TO `%s`' % (name, new_name))
+
+        db_utils.rename(self.manager, mapping)
+        self.god.check_playback()
+
+
+    def _setup_exists_expects(self, names, table):
+        self.manager.get_db_name.expect_call().and_return(self.DB_NAME)
+        self.manager.execute.expect_call(
+                self.EXISTS_QUERY_BASE % table, self.DB_NAME).and_return(
+                self._create_exists_query_result(names))
+
+
+    def _create_exists_query_result(self, names):
+        return ((name, None) for name in names)
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/database/migrate.py b/database/migrate.py
index 9a95d7c..8c4ede7 100755
--- a/database/migrate.py
+++ b/database/migrate.py
@@ -83,7 +83,7 @@
         assert os.path.exists(migrations_dir), migrations_dir + " doesn't exist"
 
 
-    def _get_db_name(self):
+    def get_db_name(self):
         return self._database.get_database_info()['db_name']
 
 
@@ -197,7 +197,7 @@
 
 
     def initialize_test_db(self):
-        db_name = self._get_db_name()
+        db_name = self.get_db_name()
         test_db_name = 'test_' + db_name
         # first, connect to no DB so we can create a test DB
         self._database.connect(db_name='')
@@ -210,7 +210,7 @@
 
     def remove_test_db(self):
         print 'Removing test DB'
-        self.execute('DROP DATABASE ' + self._get_db_name())
+        self.execute('DROP DATABASE ' + self.get_db_name())
         # reset connection back to real DB
         self._database.disconnect()
         self._database.connect()
@@ -229,7 +229,7 @@
 
 
     def do_sync_db(self, version=None):
-        print 'Migration starting for database', self._get_db_name()
+        print 'Migration starting for database', self.get_db_name()
         self.migrate_to_version_or_latest(version)
         print 'Migration complete'
 
@@ -240,7 +240,7 @@
         """
         self.initialize_test_db()
         try:
-            print 'Starting migration test on DB', self._get_db_name()
+            print 'Starting migration test on DB', self.get_db_name()
             self.migrate_to_version_or_latest(version)
             # show schema to the user
             os.system('mysqldump %s --no-data=true '
@@ -273,7 +273,7 @@
         os.close(dump_fd)
         os.remove(dump_file)
         try:
-            print 'Starting migration test on DB', self._get_db_name()
+            print 'Starting migration test on DB', self.get_db_name()
             self.migrate_to_version_or_latest(version)
         finally:
             self.remove_test_db()
diff --git a/frontend/afe/models.py b/frontend/afe/models.py
index 6486300..9af822d 100644
--- a/frontend/afe/models.py
+++ b/frontend/afe/models.py
@@ -64,7 +64,7 @@
 
 
     class Meta:
-        db_table = 'atomic_groups'
+        db_table = 'afe_atomic_groups'
 
 
     def __unicode__(self):
@@ -111,7 +111,7 @@
 
 
     class Meta:
-        db_table = 'labels'
+        db_table = 'afe_labels'
 
     def __unicode__(self):
         return unicode(self.name)
@@ -162,7 +162,7 @@
 
 
     class Meta:
-        db_table = 'users'
+        db_table = 'afe_users'
 
     def __unicode__(self):
         return unicode(self.login)
@@ -191,7 +191,8 @@
                        string_values=True)
 
     hostname = dbmodels.CharField(max_length=255, unique=True)
-    labels = dbmodels.ManyToManyField(Label, blank=True)
+    labels = dbmodels.ManyToManyField(Label, blank=True,
+                                      db_table='afe_hosts_labels')
     locked = dbmodels.BooleanField(default=False)
     synch_id = dbmodels.IntegerField(blank=True, null=True,
                                      editable=settings.FULL_ADMIN)
@@ -342,7 +343,7 @@
 
 
     class Meta:
-        db_table = 'hosts'
+        db_table = 'afe_hosts'
 
     def __unicode__(self):
         return unicode(self.hostname)
@@ -357,7 +358,7 @@
     objects = model_logic.ExtendedManager()
 
     class Meta:
-        db_table = 'host_attributes'
+        db_table = 'afe_host_attributes'
 
 
 class Test(dbmodels.Model, model_logic.ModelExtensions):
@@ -400,14 +401,16 @@
     test_type = dbmodels.SmallIntegerField(choices=Types.choices())
     sync_count = dbmodels.IntegerField(default=1)
     path = dbmodels.CharField(max_length=255, unique=True)
-    dependency_labels = dbmodels.ManyToManyField(Label, blank=True)
 
+    dependency_labels = (
+        dbmodels.ManyToManyField(Label, blank=True,
+                                 db_table='afe_autotests_dependency_labels'))
     name_field = 'name'
     objects = model_logic.ExtendedManager()
 
 
     class Meta:
-        db_table = 'autotests'
+        db_table = 'afe_autotests'
 
     def __unicode__(self):
         return unicode(self.name)
@@ -430,7 +433,7 @@
 
 
     class Meta:
-        db_table = 'profilers'
+        db_table = 'afe_profilers'
 
     def __unicode__(self):
         return unicode(self.name)
@@ -446,8 +449,10 @@
     """
     name = dbmodels.CharField(max_length=255, unique=True)
     description = dbmodels.CharField(max_length=255, blank=True)
-    users = dbmodels.ManyToManyField(User, blank=False)
-    hosts = dbmodels.ManyToManyField(Host, blank=True)
+    users = dbmodels.ManyToManyField(User, blank=False,
+                                     db_table='afe_acl_groups_users')
+    hosts = dbmodels.ManyToManyField(Host, blank=True,
+                                     db_table='afe_acl_groups_hosts')
 
     name_field = 'name'
     objects = model_logic.ExtendedManager()
@@ -566,7 +571,7 @@
 
 
     class Meta:
-        db_table = 'acl_groups'
+        db_table = 'afe_acl_groups'
 
     def __unicode__(self):
         return unicode(self.name)
@@ -585,7 +590,7 @@
         cursor = connection.cursor()
         cursor.execute("""
             SELECT job_id, status, aborted, complete, COUNT(*)
-            FROM host_queue_entries
+            FROM afe_host_queue_entries
             WHERE job_id IN %s
             GROUP BY job_id, status, aborted, complete
             """ % id_list)
@@ -646,7 +651,9 @@
     timeout = dbmodels.IntegerField(default=DEFAULT_TIMEOUT)
     run_verify = dbmodels.BooleanField(default=True)
     email_list = dbmodels.CharField(max_length=250, blank=True)
-    dependency_labels = dbmodels.ManyToManyField(Label, blank=True)
+    dependency_labels = (
+            dbmodels.ManyToManyField(Label, blank=True,
+                                     db_table='afe_jobs_dependency_labels'))
     reboot_before = dbmodels.SmallIntegerField(choices=RebootBefore.choices(),
                                                blank=True,
                                                default=DEFAULT_REBOOT_BEFORE)
@@ -737,7 +744,7 @@
 
 
     class Meta:
-        db_table = 'jobs'
+        db_table = 'afe_jobs'
 
     def __unicode__(self):
         return u'%s (%s-%s)' % (self.name, self.id, self.owner)
@@ -750,13 +757,13 @@
     objects = model_logic.ExtendedManager()
 
     class Meta:
-        db_table = 'ineligible_host_queues'
+        db_table = 'afe_ineligible_host_queues'
 
 
 class HostQueueEntry(dbmodels.Model, model_logic.ModelExtensions):
     Status = host_queue_entry_states.Status
     ACTIVE_STATUSES = host_queue_entry_states.ACTIVE_STATUSES
-    COMPLETE_STATUSES = host_queue_entry_states.COMPLETE_STATUSES 
+    COMPLETE_STATUSES = host_queue_entry_states.COMPLETE_STATUSES
 
     job = dbmodels.ForeignKey(Job)
     host = dbmodels.ForeignKey(Host, blank=True, null=True)
@@ -871,7 +878,7 @@
 
 
     class Meta:
-        db_table = 'host_queue_entries'
+        db_table = 'afe_host_queue_entries'
 
 
 
@@ -895,7 +902,7 @@
         super(AbortedHostQueueEntry, self).save(*args, **kwargs)
 
     class Meta:
-        db_table = 'aborted_host_queue_entries'
+        db_table = 'afe_aborted_host_queue_entries'
 
 
 class RecurringRun(dbmodels.Model, model_logic.ModelExtensions):
@@ -917,7 +924,7 @@
     objects = model_logic.ExtendedManager()
 
     class Meta:
-        db_table = 'recurring_run'
+        db_table = 'afe_recurring_run'
 
     def __unicode__(self):
         return u'RecurringRun(job %s, start %s, period %s, count %s)' % (
@@ -1027,7 +1034,7 @@
 
 
     class Meta:
-        db_table = 'special_tasks'
+        db_table = 'afe_special_tasks'
 
 
     def __unicode__(self):
diff --git a/frontend/afe/rpc_utils.py b/frontend/afe/rpc_utils.py
index 26555d9..8b993a8 100644
--- a/frontend/afe/rpc_utils.py
+++ b/frontend/afe/rpc_utils.py
@@ -100,9 +100,11 @@
                 (running and finished)), ('Cannot specify more than one '
                                           'filter to this function')
 
-    not_queued = ('(SELECT job_id FROM host_queue_entries WHERE status != "%s")'
+    not_queued = ('(SELECT job_id FROM afe_host_queue_entries '
+                  'WHERE status != "%s")'
                   % models.HostQueueEntry.Status.QUEUED)
-    not_finished = '(SELECT job_id FROM host_queue_entries WHERE not complete)'
+    not_finished = ('(SELECT job_id FROM afe_host_queue_entries '
+                    'WHERE not complete)')
 
     if not_yet_run:
         where = ['id NOT IN ' + not_queued]
@@ -121,7 +123,7 @@
     labels.
     """
     extra_args = {}
-    where_str = ('hosts.id in (select host_id from hosts_labels '
+    where_str = ('afe_hosts.id in (select host_id from afe_hosts_labels '
                  'where label_id=%s)')
     extra_args['where'] = [where_str] * len(multiple_labels)
     extra_args['params'] = [models.Label.smart_get(label).id
@@ -144,8 +146,8 @@
                     str(label['id'])
                     for label in only_if_needed_labels.values('id'))
             query = models.Host.objects.add_join(
-                query, 'hosts_labels', join_key='host_id',
-                join_condition=('hosts_labels_exclude_OIN.label_id IN (%s)'
+                query, 'afe_hosts_labels', join_key='host_id',
+                join_condition=('afe_hosts_labels_exclude_OIN.label_id IN (%s)'
                                 % only_if_needed_ids),
                 suffix='_exclude_OIN', exclude=True)
 
@@ -157,9 +159,10 @@
                     str(atomic_group['id'])
                     for atomic_group in atomic_group_labels.values('id'))
             query = models.Host.objects.add_join(
-                    query, 'hosts_labels', join_key='host_id',
-                    join_condition=('hosts_labels_exclude_AG.label_id IN (%s)'
-                                    % atomic_group_label_ids),
+                    query, 'afe_hosts_labels', join_key='host_id',
+                    join_condition=(
+                            'afe_hosts_labels_exclude_AG.label_id IN (%s)'
+                            % atomic_group_label_ids),
                     suffix='_exclude_AG', exclude=True)
 
     assert 'extra_args' not in filter_data
diff --git a/frontend/client/src/autotest/public/EmbeddedTkoClientTest.html b/frontend/client/src/autotest/public/EmbeddedTkoClientTest.html
index 097bd7a..0acea9b 100644
--- a/frontend/client/src/autotest/public/EmbeddedTkoClientTest.html
+++ b/frontend/client/src/autotest/public/EmbeddedTkoClientTest.html
@@ -12,9 +12,9 @@
 
       var plot1 = Autotest.createMetricsPlot(document.getElementById("plot1_canvas"));
       queries = {}
-      queries["__main__"] = "SELECT test_name, AVG(IF(kernel LIKE '2.6.11%', iteration_value, NULL)) '2.6.11', STDDEV(IF(kernel LIKE '2.6.11%', iteration_value, NULL)) 'errors-2.6.11', AVG(IF(kernel LIKE '2.6.18%', iteration_value, NULL)) '2.6.18', STDDEV(IF(kernel LIKE '2.6.18%', iteration_value, NULL)) 'errors-2.6.18' FROM perf_view_2 WHERE test_idx < 1000 AND test_name IN ('dbench', 'tbench') AND iteration_key = 'throughput' AND (kernel LIKE '2.6.11%' OR kernel LIKE '2.6.18%') GROUP BY test_name";
-      queries["__2.6.11__"] = "SELECT test_idx, iteration_value FROM perf_view_2 WHERE test_idx < 1000 AND test_name IN ('dbench', 'tbench') AND iteration_key = 'throughput' AND kernel LIKE '2.6.11%%' AND test_name = %s ORDER BY iteration_value";
-      queries["__2.6.18__"] = "SELECT test_idx, iteration_value FROM perf_view_2 WHERE test_idx < 1000 AND test_name IN ('dbench', 'tbench') AND iteration_key = 'throughput' AND kernel LIKE '2.6.18%%' AND test_name = %s ORDER BY iteration_value";
+      queries["__main__"] = "SELECT test_name, AVG(IF(kernel LIKE '2.6.11%', iteration_value, NULL)) '2.6.11', STDDEV(IF(kernel LIKE '2.6.11%', iteration_value, NULL)) 'errors-2.6.11', AVG(IF(kernel LIKE '2.6.18%', iteration_value, NULL)) '2.6.18', STDDEV(IF(kernel LIKE '2.6.18%', iteration_value, NULL)) 'errors-2.6.18' FROM tko_perf_view_2 WHERE test_idx < 1000 AND test_name IN ('dbench', 'tbench') AND iteration_key = 'throughput' AND (kernel LIKE '2.6.11%' OR kernel LIKE '2.6.18%') GROUP BY test_name";
+      queries["__2.6.11__"] = "SELECT test_idx, iteration_value FROM tko_perf_view_2 WHERE test_idx < 1000 AND test_name IN ('dbench', 'tbench') AND iteration_key = 'throughput' AND kernel LIKE '2.6.11%%' AND test_name = %s ORDER BY iteration_value";
+      queries["__2.6.18__"] = "SELECT test_idx, iteration_value FROM tko_perf_view_2 WHERE test_idx < 1000 AND test_name IN ('dbench', 'tbench') AND iteration_key = 'throughput' AND kernel LIKE '2.6.18%%' AND test_name = %s ORDER BY iteration_value";
       plot1.refresh({
           plot : "Bar",
           invert : [],
@@ -23,8 +23,8 @@
 
       var plot2 = Autotest.createMetricsPlot(document.getElementById("plot2_canvas"));
       queries = {}
-      queries["__main__"] = "SELECT kernel, AVG(iteration_value) 'throughput', STDDEV(iteration_value) 'errors-throughput' FROM perf_view_2 WHERE test_idx < 1000 AND test_name = 'dbench' AND iteration_key ='throughput' GROUP BY kernel";
-      queries["__throughput__"] = "SELECT test_idx, iteration_value FROM perf_view_2 WHERE test_idx < 1000 AND test_name = 'dbench' AND iteration_key ='throughput' AND kernel = %s ORDER BY iteration_value";
+      queries["__main__"] = "SELECT kernel, AVG(iteration_value) 'throughput', STDDEV(iteration_value) 'errors-throughput' FROM tko_perf_view_2 WHERE test_idx < 1000 AND test_name = 'dbench' AND iteration_key ='throughput' GROUP BY kernel";
+      queries["__throughput__"] = "SELECT test_idx, iteration_value FROM tko_perf_view_2 WHERE test_idx < 1000 AND test_name = 'dbench' AND iteration_key ='throughput' AND kernel = %s ORDER BY iteration_value";
       plot2.refresh({
           plot : "Line",
           invert : [],
diff --git a/frontend/client/src/autotest/tko/DBColumnSelector.java b/frontend/client/src/autotest/tko/DBColumnSelector.java
index 58add13..bcfcfc5 100644
--- a/frontend/client/src/autotest/tko/DBColumnSelector.java
+++ b/frontend/client/src/autotest/tko/DBColumnSelector.java
@@ -5,8 +5,8 @@
 
 
 public class DBColumnSelector extends ExtendedListBox {
-    public static final String PERF_VIEW = "perf_view";
-    public static final String TEST_VIEW = "test_view";
+    public static final String PERF_VIEW = "tko_perf_view";
+    public static final String TEST_VIEW = "tko_test_view";
 
     public DBColumnSelector(String view) {
         this(view, false);
diff --git a/frontend/client/src/autotest/tko/MachineQualHistogramFrontend.java b/frontend/client/src/autotest/tko/MachineQualHistogramFrontend.java
index 9ce7d64..5d226cd 100644
--- a/frontend/client/src/autotest/tko/MachineQualHistogramFrontend.java
+++ b/frontend/client/src/autotest/tko/MachineQualHistogramFrontend.java
@@ -60,7 +60,7 @@
             sql.append(" AND ");
         }
         
-        sql.append("status = 'GOOD', test_idx, NULL)) 'good' FROM test_view_outer_joins");
+        sql.append("status = 'GOOD', test_idx, NULL)) 'good' FROM tko_test_view_outer_joins");
         if (hasGFilter) {
             sql.append(" WHERE ");
             sql.append(gFilterString);
diff --git a/frontend/client/src/autotest/tko/MetricsPlotFrontend.java b/frontend/client/src/autotest/tko/MetricsPlotFrontend.java
index 75cad3c..cd4cd51 100644
--- a/frontend/client/src/autotest/tko/MetricsPlotFrontend.java
+++ b/frontend/client/src/autotest/tko/MetricsPlotFrontend.java
@@ -231,7 +231,7 @@
             addSeriesSelects(series, sql);
         }
         
-        sql.append(" FROM perf_view_2");
+        sql.append(" FROM tko_perf_view_2");
             
         String xFilterString = globalFilter.getFilterString();
         if (xFilterString.equals("")) {
@@ -261,7 +261,7 @@
         
         sql.append("SELECT test_idx, ");
         sql.append(valueSelector.getSelectedValue());
-        sql.append(" FROM perf_view_2 WHERE ");
+        sql.append(" FROM tko_perf_view_2 WHERE ");
         
         String seriesFilter = series.getFilterString();
         if (!xFilterString.equals("") || !seriesFilter.equals("")) {
@@ -296,13 +296,13 @@
         
         sql.append(", ");
         sql.append(series.getAggregation());
-        sql.append(ifClause);
+        sql.append(ifClause.toString());
         sql.append(") '");
         sql.append(series.getName());
         sql.append("'");
         if (series.wantErrorBars()) {
             sql.append(", STDDEV(");
-            sql.append(ifClause);
+            sql.append(ifClause.toString());
             sql.append(") 'errors-");
             sql.append(series.getName());
             sql.append("'");
diff --git a/frontend/make_superuser.py b/frontend/make_superuser.py
index d04c7cb..1d20578 100755
--- a/frontend/make_superuser.py
+++ b/frontend/make_superuser.py
@@ -24,20 +24,20 @@
 for username in sys.argv[1:]:
     cur.execute("""
         SELECT access_level
-        FROM users
+        FROM afe_users
         WHERE login = %s""", username)
     row = cur.fetchone()
 
     if row is None:
         print "User %s does not exist. Creating..." % username
         cur.execute("""
-            INSERT INTO users (login, access_level)
+            INSERT INTO afe_users (login, access_level)
             VALUES (%s, 100)""", username)
         print "    Done"
     else:
         print "Updating user %s..." % username
         cur.execute("""
-            UPDATE users
+            UPDATE afe_users
             SET access_level = 100
             WHERE login = %s""", username)
         if (cur.rowcount == 1):
diff --git a/frontend/migrations/044_rename_afe_tables.py b/frontend/migrations/044_rename_afe_tables.py
new file mode 100644
index 0000000..3c01ba4
--- /dev/null
+++ b/frontend/migrations/044_rename_afe_tables.py
@@ -0,0 +1,37 @@
+import common
+from autotest_lib.database import db_utils
+
+
+ORIG_NAMES = (
+        'aborted_host_queue_entries',
+        'acl_groups',
+        'acl_groups_hosts',
+        'acl_groups_users',
+        'atomic_groups',
+        'autotests',
+        'autotests_dependency_labels',
+        'host_attributes',
+        'host_queue_entries',
+        'hosts',
+        'hosts_labels',
+        'ineligible_host_queues',
+        'jobs',
+        'jobs_dependency_labels',
+        'labels',
+        'profilers',
+        'recurring_run',
+        'special_tasks',
+        'users',
+        )
+
+RENAMES_UP = dict((name, 'afe_' + name) for name in ORIG_NAMES)
+
+RENAMES_DOWN = dict((value, key) for key, value in RENAMES_UP.iteritems())
+
+
+def migrate_up(manager):
+    db_utils.rename(manager, RENAMES_UP)
+
+
+def migrate_down(manager):
+    db_utils.rename(manager, RENAMES_DOWN)
diff --git a/frontend/migrations/common.py b/frontend/migrations/common.py
new file mode 100644
index 0000000..4c8760b
--- /dev/null
+++ b/frontend/migrations/common.py
@@ -0,0 +1,8 @@
+import os, sys
+dirname = os.path.dirname(sys.modules[__name__].__file__)
+autotest_dir = os.path.abspath(os.path.join(dirname, "..", '..'))
+client_dir = os.path.join(autotest_dir, "client")
+sys.path.insert(0, client_dir)
+import setup_modules
+sys.path.pop(0)
+setup_modules.setup(base_path=autotest_dir, root_module_name="autotest_lib")
diff --git a/new_tko/tko/models.py b/new_tko/tko/models.py
index 9b4479d..daf1368 100644
--- a/new_tko/tko/models.py
+++ b/new_tko/tko/models.py
@@ -107,7 +107,7 @@
     owner = dbmodels.CharField(blank=True, max_length=240)
 
     class Meta:
-        db_table = 'machines'
+        db_table = 'tko_machines'
 
 
 class Kernel(dbmodels.Model):
@@ -117,7 +117,7 @@
     printable = dbmodels.CharField(max_length=300)
 
     class Meta:
-        db_table = 'kernels'
+        db_table = 'tko_kernels'
 
 
 class Patch(dbmodels.Model):
@@ -127,7 +127,7 @@
     the_hash = dbmodels.CharField(blank=True, max_length=105, db_column='hash')
 
     class Meta:
-        db_table = 'patches'
+        db_table = 'tko_patches'
 
 
 class Status(dbmodels.Model):
@@ -135,7 +135,7 @@
     word = dbmodels.CharField(max_length=30)
 
     class Meta:
-        db_table = 'status'
+        db_table = 'tko_status'
 
 
 class Job(dbmodels.Model):
@@ -150,7 +150,7 @@
     afe_job_id = dbmodels.IntegerField(null=True, default=None)
 
     class Meta:
-        db_table = 'jobs'
+        db_table = 'tko_jobs'
 
 
 class Test(dbmodels.Model, model_logic.ModelExtensions,
@@ -185,7 +185,7 @@
 
 
     class Meta:
-        db_table = 'tests'
+        db_table = 'tko_tests'
 
 
 class TestAttribute(dbmodels.Model, model_logic.ModelExtensions):
@@ -197,7 +197,7 @@
     objects = model_logic.ExtendedManager()
 
     class Meta:
-        db_table = 'test_attributes'
+        db_table = 'tko_test_attributes'
 
 
 class IterationAttribute(dbmodels.Model, model_logic.ModelExtensions):
@@ -211,7 +211,7 @@
     objects = model_logic.ExtendedManager()
 
     class Meta:
-        db_table = 'iteration_attributes'
+        db_table = 'tko_iteration_attributes'
 
 
 class IterationResult(dbmodels.Model, model_logic.ModelExtensions):
@@ -225,19 +225,20 @@
     objects = model_logic.ExtendedManager()
 
     class Meta:
-        db_table = 'iteration_result'
+        db_table = 'tko_iteration_result'
 
 
 class TestLabel(dbmodels.Model, model_logic.ModelExtensions):
     name = dbmodels.CharField(max_length=80, unique=True)
     description = dbmodels.TextField(blank=True)
-    tests = dbmodels.ManyToManyField(Test, blank=True)
+    tests = dbmodels.ManyToManyField(Test, blank=True,
+                                     db_table='tko_test_labels_tests')
 
     name_field = 'name'
     objects = model_logic.ExtendedManager()
 
     class Meta:
-        db_table = 'test_labels'
+        db_table = 'tko_test_labels'
 
 
 class SavedQuery(dbmodels.Model, model_logic.ModelExtensions):
@@ -247,7 +248,7 @@
     url_token = dbmodels.TextField()
 
     class Meta:
-        db_table = 'saved_queries'
+        db_table = 'tko_saved_queries'
 
 
 class EmbeddedGraphingQuery(dbmodels.Model, model_logic.ModelExtensions):
@@ -264,7 +265,7 @@
     cached_png = dbmodels.TextField(editable=False)
 
     class Meta:
-        db_table = 'embedded_graphing_queries'
+        db_table = 'tko_embedded_graphing_queries'
 
 
 # views
@@ -289,14 +290,16 @@
                             suffix=None, exclude=False):
         if suffix is None:
             suffix = self._get_include_exclude_suffix(exclude)
-        return self.add_join(query_set, 'test_attributes', join_key='test_idx',
+        return self.add_join(query_set, 'tko_test_attributes',
+                             join_key='test_idx',
                              join_condition=join_condition,
                              suffix=suffix, exclude=exclude)
 
 
     def _add_label_pivot_table_join(self, query_set, suffix, join_condition='',
                                     exclude=False, force_left_join=False):
-        return self.add_join(query_set, 'test_labels_tests', join_key='test_id',
+        return self.add_join(query_set, 'tko_test_labels_tests',
+                             join_key='test_id',
                              join_condition=join_condition,
                              suffix=suffix, exclude=exclude,
                              force_left_join=force_left_join)
@@ -308,12 +311,12 @@
 
         # since we're not joining from the original table, we can't use
         # self.add_join() again
-        second_join_alias = 'test_labels' + suffix
+        second_join_alias = 'tko_test_labels' + suffix
         second_join_condition = ('%s.id = %s.testlabel_id' %
                                  (second_join_alias,
-                                  'test_labels_tests' + suffix))
+                                  'tko_test_labels_tests' + suffix))
         filter_object = self._CustomSqlQ()
-        filter_object.add_join('test_labels',
+        filter_object.add_join('tko_test_labels',
                                second_join_condition,
                                query_set.query.LOUTER,
                                alias=second_join_alias)
@@ -333,7 +336,7 @@
     def _include_or_exclude_labels(self, query_set, label_names, exclude=False):
         label_ids = self._get_label_ids_from_names(label_names)
         suffix = self._get_include_exclude_suffix(exclude)
-        condition = ('test_labels_tests%s.testlabel_id IN (%s)' %
+        condition = ('tko_test_labels_tests%s.testlabel_id IN (%s)' %
                      (suffix, ','.join(label_ids)))
         return self._add_label_pivot_table_join(query_set,
                                                 join_condition=condition,
@@ -350,7 +353,7 @@
         # TODO: make this feature obsolete in favor of include_labels and
         # exclude_labels
         extra_where = filter_data.get('extra_where', '')
-        if 'test_labels' in extra_where:
+        if 'tko_test_labels' in extra_where:
             query_set = self._add_label_joins(query_set)
             joined = True
 
@@ -380,7 +383,7 @@
                 exclude=True)
             joined = True
 
-        test_attributes = filter_data.pop('test_attributes', [])
+        test_attributes = filter_data.pop('tko_test_attributes', [])
         for attribute in test_attributes:
             query_set = self.join_attribute(query_set, attribute)
             joined = True
@@ -389,10 +392,11 @@
             filter_data['no_distinct'] = True
 
         # TODO: make test_attributes_host_labels obsolete too
-        if include_host_labels or 'test_attributes_host_labels' in extra_where:
+        if (include_host_labels or
+                'tko_test_attributes_host_labels' in extra_where):
             query_set = self._add_attribute_join(
                 query_set, suffix='_host_labels',
-                join_condition='test_attributes_host_labels.attribute = '
+                join_condition='tko_test_attributes_host_labels.attribute = '
                                '"host-labels"')
 
         return query_set
@@ -408,7 +412,7 @@
     def query_test_label_ids(self, filter_data):
         query_set = self.model.query_objects(filter_data)
         query_set = self._add_label_joins(query_set, suffix='_list')
-        rows = self._custom_select_query(query_set, ['test_labels_list.id'])
+        rows = self._custom_select_query(query_set, ['tko_test_labels_list.id'])
         return [row[0] for row in rows if row[0] is not None]
 
 
@@ -558,4 +562,4 @@
 
 
     class Meta:
-        db_table = 'test_view_2'
+        db_table = 'tko_test_view_2'
diff --git a/new_tko/tko/rpc_interface.py b/new_tko/tko/rpc_interface.py
index ad1fa3a..09518c5 100644
--- a/new_tko/tko/rpc_interface.py
+++ b/new_tko/tko/rpc_interface.py
@@ -344,7 +344,7 @@
     extra_where = test_filter_data.get('extra_where', '')
     if extra_where:
         extra_where = '(' + extra_where + ') AND '
-    extra_where += 'test_labels.id = %s' % label.id
+    extra_where += 'tko_test_labels.id = %s' % label.id
     test_filter_data['extra_where'] = extra_where
     test_ids = models.TestView.objects.query_test_ids(test_filter_data)
 
@@ -426,7 +426,7 @@
         'iozone' : '32768-4096-fwrite'
     }
 
-    perf_view = [
+    tko_perf_view = [
         ['Test Index', 'test_idx'],
         ['Job Index', 'job_idx'],
         ['Test Name', 'test_name'],
@@ -460,8 +460,8 @@
     result['test_labels'] = get_test_labels(sort_by=['name'])
     result['current_user'] = {'login' : thread_local.get_user()}
     result['benchmark_key'] = benchmark_key
-    result['perf_view'] = perf_view
-    result['test_view'] = model_fields
+    result['tko_perf_view'] = tko_perf_view
+    result['tko_test_view'] = model_fields
     result['preconfigs'] = preconfigs.manager.all_preconfigs()
     result['motd'] = rpc_utils.get_motd()
 
diff --git a/new_tko/tko/rpc_interface_unittest.py b/new_tko/tko/rpc_interface_unittest.py
index 46c5fa0..8c8f952 100755
--- a/new_tko/tko/rpc_interface_unittest.py
+++ b/new_tko/tko/rpc_interface_unittest.py
@@ -11,43 +11,43 @@
 # this will need to be updated when the view changes for the test to be
 # consistent with reality
 _CREATE_TEST_VIEW = """
-CREATE VIEW test_view_2 AS
-SELECT  tests.test_idx AS test_idx,
-        tests.job_idx AS job_idx,
-        tests.test AS test_name,
-        tests.subdir AS subdir,
-        tests.kernel_idx AS kernel_idx,
-        tests.status AS status_idx,
-        tests.reason AS reason,
-        tests.machine_idx AS machine_idx,
-        tests.started_time AS test_started_time,
-        tests.finished_time AS test_finished_time,
-        jobs.tag AS job_tag,
-        jobs.label AS job_name,
-        jobs.username AS job_owner,
-        jobs.queued_time AS job_queued_time,
-        jobs.started_time AS job_started_time,
-        jobs.finished_time AS job_finished_time,
-        jobs.afe_job_id AS afe_job_id,
-        machines.hostname AS hostname,
-        machines.machine_group AS platform,
-        machines.owner AS machine_owner,
-        kernels.kernel_hash AS kernel_hash,
-        kernels.base AS kernel_base,
-        kernels.printable AS kernel,
-        status.word AS status
-FROM tests
-INNER JOIN jobs ON jobs.job_idx = tests.job_idx
-INNER JOIN machines ON machines.machine_idx = jobs.machine_idx
-INNER JOIN kernels ON kernels.kernel_idx = tests.kernel_idx
-INNER JOIN status ON status.status_idx = tests.status;
+CREATE VIEW tko_test_view_2 AS
+SELECT  tko_tests.test_idx AS test_idx,
+        tko_tests.job_idx AS job_idx,
+        tko_tests.test AS test_name,
+        tko_tests.subdir AS subdir,
+        tko_tests.kernel_idx AS kernel_idx,
+        tko_tests.status AS status_idx,
+        tko_tests.reason AS reason,
+        tko_tests.machine_idx AS machine_idx,
+        tko_tests.started_time AS test_started_time,
+        tko_tests.finished_time AS test_finished_time,
+        tko_jobs.tag AS job_tag,
+        tko_jobs.label AS job_name,
+        tko_jobs.username AS job_owner,
+        tko_jobs.queued_time AS job_queued_time,
+        tko_jobs.started_time AS job_started_time,
+        tko_jobs.finished_time AS job_finished_time,
+        tko_jobs.afe_job_id AS afe_job_id,
+        tko_machines.hostname AS hostname,
+        tko_machines.machine_group AS platform,
+        tko_machines.owner AS machine_owner,
+        tko_kernels.kernel_hash AS kernel_hash,
+        tko_kernels.base AS kernel_base,
+        tko_kernels.printable AS kernel,
+        tko_status.word AS status
+FROM tko_tests
+INNER JOIN tko_jobs ON tko_jobs.job_idx = tko_tests.job_idx
+INNER JOIN tko_machines ON tko_machines.machine_idx = tko_jobs.machine_idx
+INNER JOIN tko_kernels ON tko_kernels.kernel_idx = tko_tests.kernel_idx
+INNER JOIN tko_status ON tko_status.status_idx = tko_tests.status;
 """
 
 # this will need to be updated if the table schemas change (or removed if we
 # add proper primary keys)
 _CREATE_ITERATION_ATTRIBUTES = """
-CREATE TABLE "iteration_attributes" (
-    "test_idx" integer NOT NULL REFERENCES "tests" ("test_idx"),
+CREATE TABLE "tko_iteration_attributes" (
+    "test_idx" integer NOT NULL REFERENCES "tko_tests" ("test_idx"),
     "iteration" integer NOT NULL,
     "attribute" varchar(90) NOT NULL,
     "value" varchar(300) NOT NULL
@@ -55,8 +55,8 @@
 """
 
 _CREATE_ITERATION_RESULTS = """
-CREATE TABLE "iteration_result" (
-    "test_idx" integer NOT NULL REFERENCES "tests" ("test_idx"),
+CREATE TABLE "tko_iteration_result" (
+    "test_idx" integer NOT NULL REFERENCES "tko_tests" ("test_idx"),
     "iteration" integer NOT NULL,
     "attribute" varchar(90) NOT NULL,
     "value" numeric(12, 31) NULL
@@ -71,7 +71,7 @@
     So manually remove that table and replace it with a view.
     """
     cursor = connection.cursor()
-    cursor.execute('DROP TABLE test_view_2')
+    cursor.execute('DROP TABLE tko_test_view_2')
     cursor.execute(_CREATE_TEST_VIEW)
 
 
@@ -81,9 +81,9 @@
     Django models.  So fix up the generated schema to match the real schema.
     """
     cursor = connection.cursor()
-    cursor.execute('DROP TABLE iteration_attributes')
+    cursor.execute('DROP TABLE tko_iteration_attributes')
     cursor.execute(_CREATE_ITERATION_ATTRIBUTES)
-    cursor.execute('DROP TABLE iteration_result')
+    cursor.execute('DROP TABLE tko_iteration_result')
     cursor.execute(_CREATE_ITERATION_RESULTS)
 
 
@@ -152,19 +152,19 @@
         models.TestAttribute.objects.create(test=job1_test1,
                                             attribute='myattr2', value='myval2')
 
-        self._add_iteration_keyval('iteration_attributes', test=job1_test1,
+        self._add_iteration_keyval('tko_iteration_attributes', test=job1_test1,
                                    iteration=1, attribute='iattr',
                                    value='ival')
-        self._add_iteration_keyval('iteration_attributes', test=job1_test1,
+        self._add_iteration_keyval('tko_iteration_attributes', test=job1_test1,
                                    iteration=1, attribute='iattr2',
                                    value='ival2')
-        self._add_iteration_keyval('iteration_result', test=job1_test1,
+        self._add_iteration_keyval('tko_iteration_result', test=job1_test1,
                                    iteration=1, attribute='iresult', value=1)
-        self._add_iteration_keyval('iteration_result', test=job1_test1,
+        self._add_iteration_keyval('tko_iteration_result', test=job1_test1,
                                    iteration=1, attribute='iresult2', value=2)
-        self._add_iteration_keyval('iteration_result', test=job1_test1,
+        self._add_iteration_keyval('tko_iteration_result', test=job1_test1,
                                    iteration=2, attribute='iresult', value=3)
-        self._add_iteration_keyval('iteration_result', test=job1_test1,
+        self._add_iteration_keyval('tko_iteration_result', test=job1_test1,
                                    iteration=2, attribute='iresult2', value=4)
 
         label1 = models.TestLabel.objects.create(name='testlabel1')
@@ -412,7 +412,7 @@
 
     def test_get_test_attributes(self):
         tests = rpc_interface.get_test_views(
-                test_attributes=['myattr', 'myattr2'])
+                tko_test_attributes=['myattr', 'myattr2'])
         self.assertEquals(len(tests), 3)
 
         self.assertEquals(tests[0]['attribute_myattr'], 'myval')
@@ -425,11 +425,11 @@
 
     def test_grouping_with_test_attributes(self):
         num_groups = rpc_interface.get_num_groups(['attribute_myattr'],
-                                                test_attributes=['myattr'])
+                                                tko_test_attributes=['myattr'])
         self.assertEquals(num_groups, 2)
 
         counts = rpc_interface.get_group_counts(['attribute_myattr'],
-                                                test_attributes=['myattr'])
+                                                tko_test_attributes=['myattr'])
         groups = counts['groups']
         self.assertEquals(len(groups), num_groups)
         self.assertEquals(groups[0]['attribute_myattr'], None)
diff --git a/new_tko/tko/tko_rpc_utils.py b/new_tko/tko/tko_rpc_utils.py
index ece70fc..be6e754 100644
--- a/new_tko/tko/tko_rpc_utils.py
+++ b/new_tko/tko/tko_rpc_utils.py
@@ -78,9 +78,9 @@
     """
     Example result for machine_labels=['Index', 'Diskful']:
     CONCAT_WS(",",
-              IF(FIND_IN_SET("Diskful", test_attributes_host_labels.value),
+              IF(FIND_IN_SET("Diskful", tko_test_attributes_host_labels.value),
                  "Diskful", NULL),
-              IF(FIND_IN_SET("Index", test_attributes_host_labels.value),
+              IF(FIND_IN_SET("Index", tko_test_attributes_host_labels.value),
                  "Index", NULL))
 
     This would result in field values "Diskful,Index", "Diskful", "Index", NULL.
@@ -89,7 +89,7 @@
     if_clauses = []
     for label in machine_labels:
         if_clauses.append(
-            'IF(FIND_IN_SET("%s", test_attributes_host_labels.value), '
+            'IF(FIND_IN_SET("%s", tko_test_attributes_host_labels.value), '
                '"%s", NULL)' % (label, label))
     return 'CONCAT_WS(",", %s)' % ', '.join(if_clauses)
 
diff --git a/scheduler/monitor_db.py b/scheduler/monitor_db.py
index 93c18eb..d4ec92a 100755
--- a/scheduler/monitor_db.py
+++ b/scheduler/monitor_db.py
@@ -263,12 +263,13 @@
     def _get_ready_hosts(self):
         # avoid any host with a currently active queue entry against it
         hosts = Host.fetch(
-            joins='LEFT JOIN host_queue_entries AS active_hqe '
-                  'ON (hosts.id = active_hqe.host_id AND '
+            joins='LEFT JOIN afe_host_queue_entries AS active_hqe '
+                  'ON (afe_hosts.id = active_hqe.host_id AND '
                       'active_hqe.active)',
             where="active_hqe.host_id IS NULL "
-                  "AND NOT hosts.locked "
-                  "AND (hosts.status IS NULL OR hosts.status = 'Ready')")
+                  "AND NOT afe_hosts.locked "
+                  "AND (afe_hosts.status IS NULL "
+                          "OR afe_hosts.status = 'Ready')")
         return dict((host.id, host) for host in hosts)
 
 
@@ -300,11 +301,12 @@
     @classmethod
     def _get_job_acl_groups(cls, job_ids):
         query = """
-        SELECT jobs.id, acl_groups_users.aclgroup_id
-        FROM jobs
-        INNER JOIN users ON users.login = jobs.owner
-        INNER JOIN acl_groups_users ON acl_groups_users.user_id = users.id
-        WHERE jobs.id IN (%s)
+        SELECT afe_jobs.id, afe_acl_groups_users.aclgroup_id
+        FROM afe_jobs
+        INNER JOIN afe_users ON afe_users.login = afe_jobs.owner
+        INNER JOIN afe_acl_groups_users ON
+                afe_acl_groups_users.user_id = afe_users.id
+        WHERE afe_jobs.id IN (%s)
         """
         return cls._get_many2many_dict(query, job_ids)
 
@@ -313,7 +315,7 @@
     def _get_job_ineligible_hosts(cls, job_ids):
         query = """
         SELECT job_id, host_id
-        FROM ineligible_host_queues
+        FROM afe_ineligible_host_queues
         WHERE job_id IN (%s)
         """
         return cls._get_many2many_dict(query, job_ids)
@@ -323,7 +325,7 @@
     def _get_job_dependencies(cls, job_ids):
         query = """
         SELECT job_id, label_id
-        FROM jobs_dependency_labels
+        FROM afe_jobs_dependency_labels
         WHERE job_id IN (%s)
         """
         return cls._get_many2many_dict(query, job_ids)
@@ -333,7 +335,7 @@
     def _get_host_acls(cls, host_ids):
         query = """
         SELECT host_id, aclgroup_id
-        FROM acl_groups_hosts
+        FROM afe_acl_groups_hosts
         WHERE host_id IN (%s)
         """
         return cls._get_many2many_dict(query, host_ids)
@@ -345,7 +347,7 @@
             return {}, {}
         query = """
         SELECT label_id, host_id
-        FROM hosts_labels
+        FROM afe_hosts_labels
         WHERE host_id IN (%s)
         """ % cls._get_sql_id_list(host_ids)
         rows = _db.execute(query)
@@ -929,12 +931,13 @@
         # exclude hosts with active queue entries unless the SpecialTask is for
         # that queue entry
         queued_tasks = models.Host.objects.add_join(
-                queued_tasks, 'host_queue_entries', 'host_id',
-                join_condition='host_queue_entries.active',
+                queued_tasks, 'afe_host_queue_entries', 'host_id',
+                join_condition='afe_host_queue_entries.active',
                 force_left_join=True)
         queued_tasks = queued_tasks.extra(
-                where=['(host_queue_entries.id IS NULL OR '
-                       'host_queue_entries.id = special_tasks.queue_entry_id)'])
+                where=['(afe_host_queue_entries.id IS NULL OR '
+                       'afe_host_queue_entries.id = '
+                               'afe_special_tasks.queue_entry_id)'])
 
         # reorder tasks by priority
         task_priority_order = [models.SpecialTask.Task.REPAIR,
@@ -993,9 +996,9 @@
     def _get_pending_queue_entries(self):
         # prioritize by job priority, then non-metahost over metahost, then FIFO
         return list(HostQueueEntry.fetch(
-            joins='INNER JOIN jobs ON (job_id=jobs.id)',
+            joins='INNER JOIN afe_jobs ON (job_id=afe_jobs.id)',
             where='NOT complete AND NOT active AND status="Queued"',
-            order_by='jobs.priority DESC, meta_host, job_id'))
+            order_by='afe_jobs.priority DESC, meta_host, job_id'))
 
 
     def _refresh_pending_queue_entries(self):
@@ -2669,18 +2672,18 @@
 
 
 class IneligibleHostQueue(DBObject):
-    _table_name = 'ineligible_host_queues'
+    _table_name = 'afe_ineligible_host_queues'
     _fields = ('id', 'job_id', 'host_id')
 
 
 class AtomicGroup(DBObject):
-    _table_name = 'atomic_groups'
+    _table_name = 'afe_atomic_groups'
     _fields = ('id', 'name', 'description', 'max_number_of_machines',
                'invalid')
 
 
 class Label(DBObject):
-    _table_name = 'labels'
+    _table_name = 'afe_labels'
     _fields = ('id', 'name', 'kernel_config', 'platform', 'invalid',
                'only_if_needed', 'atomic_group_id')
 
@@ -2691,7 +2694,7 @@
 
 
 class Host(DBObject):
-    _table_name = 'hosts'
+    _table_name = 'afe_hosts'
     _fields = ('id', 'hostname', 'locked', 'synch_id', 'status',
                'invalid', 'protection', 'locked_by_id', 'lock_time', 'dirty')
 
@@ -2706,11 +2709,12 @@
         Returns a tuple (platform_name, list_of_all_label_names).
         """
         rows = _db.execute("""
-                SELECT labels.name, labels.platform
-                FROM labels
-                INNER JOIN hosts_labels ON labels.id = hosts_labels.label_id
-                WHERE hosts_labels.host_id = %s
-                ORDER BY labels.name
+                SELECT afe_labels.name, afe_labels.platform
+                FROM afe_labels
+                INNER JOIN afe_hosts_labels ON
+                        afe_labels.id = afe_hosts_labels.label_id
+                WHERE afe_hosts_labels.host_id = %s
+                ORDER BY afe_labels.name
                 """, (self.id,))
         platform = None
         all_labels = []
@@ -2762,7 +2766,7 @@
 
 
 class HostQueueEntry(DBObject):
-    _table_name = 'host_queue_entries'
+    _table_name = 'afe_host_queue_entries'
     _fields = ('id', 'job_id', 'host_id', 'status', 'meta_host',
                'active', 'complete', 'deleted', 'execution_subdir',
                'atomic_group_id', 'aborted', 'started_on')
@@ -2818,8 +2822,8 @@
         if self.meta_host:
             yield Label(id=self.meta_host, always_query=False)
         labels = Label.fetch(
-                joins="JOIN jobs_dependency_labels AS deps "
-                      "ON (labels.id = deps.label_id)",
+                joins="JOIN afe_jobs_dependency_labels AS deps "
+                      "ON (afe_labels.id = deps.label_id)",
                 where="deps.job_id = %d" % self.job.id)
         for label in labels:
             yield label
@@ -3021,11 +3025,12 @@
         if hasattr(self, "_aborted_by"):
             return
         rows = _db.execute("""
-                SELECT users.login, aborted_host_queue_entries.aborted_on
-                FROM aborted_host_queue_entries
-                INNER JOIN users
-                ON users.id = aborted_host_queue_entries.aborted_by_id
-                WHERE aborted_host_queue_entries.queue_entry_id = %s
+                SELECT afe_users.login,
+                        afe_aborted_host_queue_entries.aborted_on
+                FROM afe_aborted_host_queue_entries
+                INNER JOIN afe_users
+                ON afe_users.id = afe_aborted_host_queue_entries.aborted_by_id
+                WHERE afe_aborted_host_queue_entries.queue_entry_id = %s
                 """, (self.id,))
         if rows:
             self._aborted_by, self._aborted_on = rows[0]
@@ -3111,7 +3116,7 @@
 
 
 class Job(DBObject):
-    _table_name = 'jobs'
+    _table_name = 'afe_jobs'
     _fields = ('id', 'owner', 'name', 'priority', 'control_file',
                'control_type', 'created_on', 'synch_count', 'timeout',
                'run_verify', 'email_list', 'reboot_before', 'reboot_after',
@@ -3156,7 +3161,7 @@
 
     def get_host_queue_entries(self):
         rows = _db.execute("""
-                SELECT * FROM host_queue_entries
+                SELECT * FROM afe_host_queue_entries
                 WHERE job_id= %s
         """, (self.id,))
         entries = [HostQueueEntry(row=i) for i in rows]
@@ -3247,7 +3252,7 @@
         sql = "job_id=%s" % self.id
         if clause:
             sql += " AND (%s)" % clause
-        return self.count(sql, table='host_queue_entries')
+        return self.count(sql, table='afe_host_queue_entries')
 
 
     def num_queued(self):
diff --git a/scheduler/monitor_db_cleanup.py b/scheduler/monitor_db_cleanup.py
index ae7d938..dfe2075 100644
--- a/scheduler/monitor_db_cleanup.py
+++ b/scheduler/monitor_db_cleanup.py
@@ -73,10 +73,10 @@
         logging.info('Aborting all jobs that have passed maximum runtime')
         rows = self._db.execute("""
             SELECT hqe.id
-            FROM host_queue_entries AS hqe
-            INNER JOIN jobs ON (hqe.job_id = jobs.id)
+            FROM afe_host_queue_entries AS hqe
+            INNER JOIN afe_jobs ON (hqe.job_id = afe_jobs.id)
             WHERE NOT hqe.complete AND NOT hqe.aborted AND
-            hqe.started_on + INTERVAL jobs.max_runtime_hrs HOUR < NOW()""")
+            hqe.started_on + INTERVAL afe_jobs.max_runtime_hrs HOUR < NOW()""")
         query = models.HostQueueEntry.objects.filter(
             id__in=[row[0] for row in rows])
         for queue_entry in query.distinct():
@@ -146,8 +146,8 @@
         # treats all IN subqueries as dependent, so this optimizes much
         # better
         self._db.execute("""
-            DELETE ihq FROM ineligible_host_queues ihq
-            LEFT JOIN (SELECT DISTINCT job_id FROM host_queue_entries
+            DELETE ihq FROM afe_ineligible_host_queues ihq
+            LEFT JOIN (SELECT DISTINCT job_id FROM afe_host_queue_entries
                        WHERE NOT complete) hqe
             USING (job_id) WHERE hqe.job_id IS NULL""")
 
@@ -198,13 +198,14 @@
 
     def _check_for_multiple_platform_hosts(self):
         rows = self._db.execute("""
-            SELECT hosts.id, hostname, COUNT(1) AS platform_count,
-                   GROUP_CONCAT(labels.name)
-            FROM hosts
-            INNER JOIN hosts_labels ON hosts.id = hosts_labels.host_id
-            INNER JOIN labels ON hosts_labels.label_id = labels.id
-            WHERE labels.platform
-            GROUP BY hosts.id
+            SELECT afe_hosts.id, hostname, COUNT(1) AS platform_count,
+                   GROUP_CONCAT(afe_labels.name)
+            FROM afe_hosts
+            INNER JOIN afe_hosts_labels ON
+                    afe_hosts.id = afe_hosts_labels.host_id
+            INNER JOIN afe_labels ON afe_hosts_labels.label_id = afe_labels.id
+            WHERE afe_labels.platform
+            GROUP BY afe_hosts.id
             HAVING platform_count > 1
             ORDER BY hostname""")
         if rows:
@@ -217,12 +218,12 @@
     def _check_for_no_platform_hosts(self):
         rows = self._db.execute("""
             SELECT hostname
-            FROM hosts
-            LEFT JOIN hosts_labels
-              ON hosts.id = hosts_labels.host_id
-              AND hosts_labels.label_id IN (SELECT id FROM labels
-                                            WHERE platform)
-            WHERE NOT hosts.invalid AND hosts_labels.host_id IS NULL""")
+            FROM afe_hosts
+            LEFT JOIN afe_hosts_labels
+              ON afe_hosts.id = afe_hosts_labels.host_id
+              AND afe_hosts_labels.label_id IN (SELECT id FROM afe_labels
+                                                WHERE platform)
+            WHERE NOT afe_hosts.invalid AND afe_hosts_labels.host_id IS NULL""")
         if rows:
             subject = '%s hosts with no platform' % self._db.rowcount
             self._send_inconsistency_message(
@@ -231,16 +232,18 @@
 
     def _check_for_multiple_atomic_group_hosts(self):
         rows = self._db.execute("""
-            SELECT hosts.id, hostname, COUNT(DISTINCT atomic_groups.name) AS
-                   atomic_group_count, GROUP_CONCAT(labels.name),
-                   GROUP_CONCAT(atomic_groups.name)
-            FROM hosts
-            INNER JOIN hosts_labels ON hosts.id = hosts_labels.host_id
-            INNER JOIN labels ON hosts_labels.label_id = labels.id
-            INNER JOIN atomic_groups ON
-                       labels.atomic_group_id = atomic_groups.id
-            WHERE NOT hosts.invalid AND NOT labels.invalid
-            GROUP BY hosts.id
+            SELECT afe_hosts.id, hostname,
+                   COUNT(DISTINCT afe_atomic_groups.name) AS atomic_group_count,
+                   GROUP_CONCAT(afe_labels.name),
+                   GROUP_CONCAT(afe_atomic_groups.name)
+            FROM afe_hosts
+            INNER JOIN afe_hosts_labels ON
+                    afe_hosts.id = afe_hosts_labels.host_id
+            INNER JOIN afe_labels ON afe_hosts_labels.label_id = afe_labels.id
+            INNER JOIN afe_atomic_groups ON
+                       afe_labels.atomic_group_id = afe_atomic_groups.id
+            WHERE NOT afe_hosts.invalid AND NOT afe_labels.invalid
+            GROUP BY afe_hosts.id
             HAVING atomic_group_count > 1
             ORDER BY hostname""")
         if rows:
diff --git a/scheduler/monitor_db_unittest.py b/scheduler/monitor_db_unittest.py
index 446df33..4fa2aba 100755
--- a/scheduler/monitor_db_unittest.py
+++ b/scheduler/monitor_db_unittest.py
@@ -113,7 +113,7 @@
 
 
     def _update_hqe(self, set, where=''):
-        query = 'UPDATE host_queue_entries SET ' + set
+        query = 'UPDATE afe_host_queue_entries SET ' + set
         if where:
             query += ' WHERE ' + where
         self._do_query(query)
@@ -150,7 +150,8 @@
     def test_always_query(self):
         host_a = monitor_db.Host(id=2)
         self.assertEqual(host_a.hostname, 'host2')
-        self._do_query('UPDATE hosts SET hostname="host2-updated" WHERE id=2')
+        self._do_query('UPDATE afe_hosts SET hostname="host2-updated" '
+                       'WHERE id=2')
         host_b = monitor_db.Host(id=2, always_query=True)
         self.assert_(host_a is host_b, 'Cached instance not returned.')
         self.assertEqual(host_a.hostname, 'host2-updated',
@@ -268,13 +269,13 @@
 
     def _convert_jobs_to_metahosts(self, *job_ids):
         sql_tuple = '(' + ','.join(str(i) for i in job_ids) + ')'
-        self._do_query('UPDATE host_queue_entries SET '
+        self._do_query('UPDATE afe_host_queue_entries SET '
                        'meta_host=host_id, host_id=NULL '
                        'WHERE job_id IN ' + sql_tuple)
 
 
     def _lock_host(self, host_id):
-        self._do_query('UPDATE hosts SET locked=1 WHERE id=' +
+        self._do_query('UPDATE afe_hosts SET locked=1 WHERE id=' +
                        str(host_id))
 
 
@@ -311,16 +312,16 @@
         scheduled.
         """
         self._create_job_simple([1], use_metahosts)
-        self._do_query('UPDATE hosts SET status="Running" WHERE id=1')
+        self._do_query('UPDATE afe_hosts SET status="Running" WHERE id=1')
         self._dispatcher._schedule_new_jobs()
         self._check_for_extra_schedulings()
 
-        self._do_query('UPDATE hosts SET status="Ready", locked=1 '
+        self._do_query('UPDATE afe_hosts SET status="Ready", locked=1 '
                        'WHERE id=1')
         self._dispatcher._schedule_new_jobs()
         self._check_for_extra_schedulings()
 
-        self._do_query('UPDATE hosts SET locked=0, invalid=1 '
+        self._do_query('UPDATE afe_hosts SET locked=0, invalid=1 '
                        'WHERE id=1')
         self._dispatcher._schedule_new_jobs()
         if not use_metahosts:
@@ -337,7 +338,7 @@
 
 
     def _test_obey_ACLs_helper(self, use_metahosts):
-        self._do_query('DELETE FROM acl_groups_hosts WHERE host_id=1')
+        self._do_query('DELETE FROM afe_acl_groups_hosts WHERE host_id=1')
         self._create_job_simple([1], use_metahosts)
         self._dispatcher._schedule_new_jobs()
         self._check_for_extra_schedulings()
@@ -364,8 +365,8 @@
 
 
     def test_one_time_hosts_ignore_ACLs(self):
-        self._do_query('DELETE FROM acl_groups_hosts WHERE host_id=1')
-        self._do_query('UPDATE hosts SET invalid=1 WHERE id=1')
+        self._do_query('DELETE FROM afe_acl_groups_hosts WHERE host_id=1')
+        self._do_query('UPDATE afe_hosts SET invalid=1 WHERE id=1')
         self._create_job_simple([1])
         self._dispatcher._schedule_new_jobs()
         self._assert_job_scheduled_on(1, 1)
@@ -377,7 +378,7 @@
         Non-metahost entries can get scheduled on invalid hosts (this is how
         one-time hosts work).
         """
-        self._do_query('UPDATE hosts SET invalid=1')
+        self._do_query('UPDATE afe_hosts SET invalid=1')
         self._test_basic_scheduling_helper(False)
 
 
@@ -429,7 +430,7 @@
         job = self._setup_test_only_if_needed_labels()
         job.dependency_labels.add(self.label3)
         # should also work if the metahost is the only_if_needed label
-        self._do_query('DELETE FROM jobs_dependency_labels')
+        self._do_query('DELETE FROM afe_jobs_dependency_labels')
         self._create_job(metahosts=[3])
         self._dispatcher._schedule_new_jobs()
         self._assert_job_scheduled_on(2, 1)
@@ -580,7 +581,8 @@
 
     def test_atomic_group_scheduling_obeys_acls(self):
         # Request scheduling on a specific atomic label but be denied by ACLs.
-        self._do_query('DELETE FROM acl_groups_hosts WHERE host_id in (8,9)')
+        self._do_query('DELETE FROM afe_acl_groups_hosts '
+                       'WHERE host_id in (8,9)')
         job = self._create_job(metahosts=[self.label5.id], atomic_group=1)
         self._dispatcher._schedule_new_jobs()
         self._check_for_extra_schedulings()
@@ -615,7 +617,7 @@
 
     def test_atomic_group_scheduling_no_metahost(self):
         # Force it to schedule on the other group for a reliable test.
-        self._do_query('UPDATE hosts SET invalid=1 WHERE id=9')
+        self._do_query('UPDATE afe_hosts SET invalid=1 WHERE id=9')
         # An atomic job without a metahost.
         job = self._create_job(synchronous=True, atomic_group=1)
         self._dispatcher._schedule_new_jobs()
@@ -626,7 +628,7 @@
     def test_atomic_group_scheduling_partial_group(self):
         # Make one host in labels[3] unavailable so that there are only two
         # hosts left in the group.
-        self._do_query('UPDATE hosts SET status="Repair Failed" WHERE id=5')
+        self._do_query('UPDATE afe_hosts SET status="Repair Failed" WHERE id=5')
         job = self._create_job(synchronous=True, metahosts=[self.label4.id],
                          atomic_group=1)
         self._dispatcher._schedule_new_jobs()
@@ -639,10 +641,10 @@
     def test_atomic_group_scheduling_not_enough_available(self):
         # Mark some hosts in each atomic group label as not usable.
         # One host running, another invalid in the first group label.
-        self._do_query('UPDATE hosts SET status="Running" WHERE id=5')
-        self._do_query('UPDATE hosts SET invalid=1 WHERE id=6')
+        self._do_query('UPDATE afe_hosts SET status="Running" WHERE id=5')
+        self._do_query('UPDATE afe_hosts SET invalid=1 WHERE id=6')
         # One host invalid in the second group label.
-        self._do_query('UPDATE hosts SET invalid=1 WHERE id=9')
+        self._do_query('UPDATE afe_hosts SET invalid=1 WHERE id=9')
         # Nothing to schedule when no group label has enough (2) good hosts..
         self._create_job(atomic_group=1, synchronous=True)
         self._dispatcher._schedule_new_jobs()
@@ -658,7 +660,7 @@
 
 
     def test_atomic_group_scheduling_no_valid_hosts(self):
-        self._do_query('UPDATE hosts SET invalid=1 WHERE id in (8,9)')
+        self._do_query('UPDATE afe_hosts SET invalid=1 WHERE id in (8,9)')
         self._create_job(synchronous=True, metahosts=[self.label5.id],
                          atomic_group=1)
         self._dispatcher._schedule_new_jobs()
@@ -765,7 +767,7 @@
 
     def test_no_ready_hosts(self):
         self._create_job(hosts=[1])
-        self._do_query('UPDATE hosts SET status="Repair Failed"')
+        self._do_query('UPDATE afe_hosts SET status="Repair Failed"')
         self._dispatcher._schedule_new_jobs()
         self._check_for_extra_schedulings()
 
diff --git a/tko/create_db-postgres b/tko/create_db-postgres
index 0a34a1e..e69de29 100644
--- a/tko/create_db-postgres
+++ b/tko/create_db-postgres
@@ -1,137 +0,0 @@
--- drop all views (since they depend on some or all of the following tables)
-DROP VIEW IF EXISTS test_view;
-
--- kernel versions
-DROP SEQUENCE IF EXISTS kernel_idx_seq;
-CREATE SEQUENCE kernel_idx_seq
-    INCREMENT BY 1
-    NO MAXVALUE
-    NO MINVALUE
-    CACHE 1;
-
-DROP TABLE IF EXISTS kernels;
-CREATE TABLE kernels (
-kernel_idx int NOT NULL DEFAULT nextval('kernel_idx_seq'::TEXT) PRIMARY KEY,
-kernel_hash TEXT,        -- Hash of base + all patches
-base TEXT,            -- Base version without patches
-printable TEXT            -- Full version with patches
-);
-
--- main jobs table
-DROP SEQUENCE IF EXISTS machine_idx_seq;
-CREATE SEQUENCE machine_idx_seq
-    INCREMENT BY 1
-    NO MAXVALUE
-    NO MINVALUE
-    CACHE 1;
-
-DROP TABLE IF EXISTS machines;
-CREATE TABLE machines (
-machine_idx int NOT NULL DEFAULT nextval('machine_idx_seq'::TEXT) PRIMARY KEY,
-hostname TEXT,            -- hostname
-machine_group TEXT,        -- group name
-owner TEXT            -- owner name
-);
-
--- main jobs table
-DROP SEQUENCE IF EXISTS job_idx_seq;
-CREATE SEQUENCE job_idx_seq
-    INCREMENT BY 1
-    NO MAXVALUE
-    NO MINVALUE
-    CACHE 1;
-
-DROP TABLE IF EXISTS jobs;
-CREATE TABLE jobs (
-job_idx int NOT NULL DEFAULT nextval('job_idx_seq'::TEXT) PRIMARY KEY,
-tag TEXT,            -- job key
-label TEXT,                     -- job label assigned by user
-username TEXT,            -- user name
-machine_idx int            -- reference to machine table
-);
-
--- One entry per patch used, anywhere
-DROP TABLE IF EXISTS patches;
-CREATE TABLE patches (
-kernel_idx INTEGER,            -- index number
-name TEXT,            -- short name
-url TEXT,            -- full URL
-hash TEXT
-);
-
--- test functional results
-DROP SEQUENCE IF EXISTS test_idx_seq;
-CREATE SEQUENCE test_idx_seq
-    INCREMENT BY 1
-    NO MAXVALUE
-    NO MINVALUE
-    CACHE 1;
-
-DROP TABLE IF EXISTS tests;
-CREATE TABLE tests (
-test_idx int NOT NULL DEFAULT nextval('test_idx_seq'::TEXT) PRIMARY KEY,
-job_idx INTEGER,            -- ref to job table
-test TEXT,            -- name of test
-subdir TEXT,            -- subdirectory name
-kernel_idx INTEGER,            -- kernel test was AGAINST
-status INTEGER,                -- test status
-reason TEXT,            -- reason for test status
-machine_idx int            -- reference to machine table
-);
-
--- test functional results
-DROP TABLE IF EXISTS test_attributes;
-CREATE TABLE test_attributes (
-test_idx INTEGER,            -- ref to test table
-attribute TEXT,            -- attribute name (e.g. 'throughput')
-value TEXT            -- attribute value
-);
-
--- test functional results
-DROP TABLE IF EXISTS iteration_result;
-CREATE TABLE iteration_result(
-test_idx INTEGER,            -- ref to test table
-iteration INTEGER,            -- integer
-attribute TEXT,            -- attribute name (e.g. 'throughput')
-value FLOAT                -- attribute value (eg 700.1)
-);
-
--- status key
-DROP SEQUENCE IF EXISTS status_idx_seq;
-CREATE SEQUENCE status_idx_seq
-    INCREMENT BY 1
-    NO MAXVALUE
-    NO MINVALUE
-    CACHE 1;
-
-DROP TABLE IF EXISTS status;
-CREATE TABLE status (
-status_idx int NOT NULL DEFAULT nextval('status_idx_seq'::TEXT) PRIMARY KEY,
-word TEXT            -- status word
-);
-
--- BRRD syncronization
-DROP TABLE IF EXISTS brrd_sync;
-CREATE TABLE brrd_sync (
-test_idx INTEGER            -- ref to test table
-);
-
--- test_view (to make life easier for people trying to mine data)
-CREATE VIEW test_view AS
-SELECT tests.test_idx, tests.job_idx, tests.test, tests.subdir,
-       tests.kernel_idx, tests.status, tests.reason, jobs.machine_idx,
-       jobs.tag, jobs.label, jobs.username, machines.hostname,
-       machines.machine_group, machines.owner, kernels.kernel_hash,
-       kernels.base, kernels.printable
-FROM tests
-INNER JOIN jobs ON jobs.job_idx = tests.job_idx
-INNER JOIN machines ON machines.machine_idx = jobs.machine_idx
-INNER JOIN kernels ON kernels.kernel_idx = tests.kernel_idx;
-
-INSERT INTO status (word) VALUES ('NOSTATUS');
-INSERT INTO status (word) VALUES ('ERROR');
-INSERT INTO status (word) VALUES ('ABORT');
-INSERT INTO status (word) VALUES ('FAIL');
-INSERT INTO status (word) VALUES ('WARN');
-INSERT INTO status (word) VALUES ('GOOD');
-INSERT INTO status (word) VALUES ('ALERT');
diff --git a/tko/db.py b/tko/db.py
index 5303ff0..536e3cc 100644
--- a/tko/db.py
+++ b/tko/db.py
@@ -21,7 +21,7 @@
         # if not present, insert statuses
         self.status_idx = {}
         self.status_word = {}
-        status_rows = self.select('status_idx, word', 'status', None)
+        status_rows = self.select('status_idx, word', 'tko_status', None)
         for s in status_rows:
             self.status_idx[s[1]] = s[0]
             self.status_word[s[0]] = s[1]
@@ -302,13 +302,13 @@
         job_idx = self.find_job(tag)
         for test_idx in self.find_tests(job_idx):
             where = {'test_idx' : test_idx}
-            self.delete('iteration_result', where)
-            self.delete('iteration_attributes', where)
-            self.delete('test_attributes', where)
-            self.delete('test_labels_tests', {'test_id': test_idx})
+            self.delete('tko_iteration_result', where)
+            self.delete('tko_iteration_attributes', where)
+            self.delete('tko_test_attributes', where)
+            self.delete('tko_test_labels_tests', {'test_id': test_idx})
         where = {'job_idx' : job_idx}
-        self.delete('tests', where)
-        self.delete('jobs', where)
+        self.delete('tko_tests', where)
+        self.delete('tko_jobs', where)
 
 
     def insert_job(self, tag, job, commit = None):
@@ -334,9 +334,9 @@
                 'afe_job_id': afe_job_id}
         is_update = hasattr(job, 'index')
         if is_update:
-            self.update('jobs', data, {'job_idx': job.index}, commit=commit)
+            self.update('tko_jobs', data, {'job_idx': job.index}, commit=commit)
         else:
-            self.insert('jobs', data, commit=commit)
+            self.insert('tko_jobs', data, commit=commit)
             job.index = self.get_last_autonumber_value()
         for test in job.tests:
             self.insert_test(job, test, commit=commit)
@@ -353,14 +353,15 @@
         is_update = hasattr(test, "test_idx")
         if is_update:
             test_idx = test.test_idx
-            self.update('tests', data, {'test_idx': test_idx}, commit=commit)
+            self.update('tko_tests', data,
+                        {'test_idx': test_idx}, commit=commit)
             where = {'test_idx': test_idx}
-            self.delete('iteration_result', where)
-            self.delete('iteration_attributes', where)
+            self.delete('tko_iteration_result', where)
+            self.delete('tko_iteration_attributes', where)
             where['user_created'] = 0
-            self.delete('test_attributes', where)
+            self.delete('tko_test_attributes', where)
         else:
-            self.insert('tests', data, commit=commit)
+            self.insert('tko_tests', data, commit=commit)
             test_idx = test.test_idx = self.get_last_autonumber_value()
         data = {'test_idx': test_idx}
 
@@ -369,23 +370,23 @@
             for key, value in i.attr_keyval.iteritems():
                 data['attribute'] = key
                 data['value'] = value
-                self.insert('iteration_attributes', data,
+                self.insert('tko_iteration_attributes', data,
                             commit=commit)
             for key, value in i.perf_keyval.iteritems():
                 data['attribute'] = key
                 data['value'] = value
-                self.insert('iteration_result', data,
+                self.insert('tko_iteration_result', data,
                             commit=commit)
 
         for key, value in test.attributes.iteritems():
             data = {'test_idx': test_idx, 'attribute': key,
                     'value': value}
-            self.insert('test_attributes', data, commit=commit)
+            self.insert('tko_test_attributes', data, commit=commit)
 
         if not is_update:
             for label_index in test.labels:
                 data = {'test_id': test_idx, 'testlabel_id': label_index}
-                self.insert('test_labels_tests', data, commit=commit)
+                self.insert('tko_test_labels_tests', data, commit=commit)
 
 
     def read_machine_map(self):
@@ -412,20 +413,20 @@
 
     def insert_machine(self, job, commit = None):
         machine_info = self.machine_info_dict(job)
-        self.insert('machines', machine_info, commit=commit)
+        self.insert('tko_machines', machine_info, commit=commit)
         return self.get_last_autonumber_value()
 
 
     def update_machine_information(self, job, commit = None):
         machine_info = self.machine_info_dict(job)
-        self.update('machines', machine_info,
+        self.update('tko_machines', machine_info,
                     where={'hostname': machine_info['hostname']},
                     commit=commit)
 
 
     def lookup_machine(self, hostname):
         where = { 'hostname' : hostname }
-        rows = self.select('machine_idx', 'machines', where)
+        rows = self.select('machine_idx', 'tko_machines', where)
         if rows:
             return rows[0][0]
         else:
@@ -433,7 +434,7 @@
 
 
     def lookup_kernel(self, kernel):
-        rows = self.select('kernel_idx', 'kernels',
+        rows = self.select('kernel_idx', 'tko_kernels',
                                 {'kernel_hash':kernel.kernel_hash})
         if rows:
             return rows[0][0]
@@ -456,7 +457,7 @@
             if not match:
                 patch_count += 1
 
-        self.insert('kernels',
+        self.insert('tko_kernels',
                     {'base':kernel.base,
                      'kernel_hash':kernel.kernel_hash,
                      'printable':printable},
@@ -465,7 +466,7 @@
 
         if patch_count > 0:
             printable += ' p%d' % (kver)
-            self.update('kernels',
+            self.update('tko_kernels',
                     {'printable':printable},
                     {'kernel_idx':kver})
 
@@ -477,7 +478,7 @@
     def insert_patch(self, kver, patch, commit = None):
         print patch.reference
         name = os.path.basename(patch.reference)[:80]
-        self.insert('patches',
+        self.insert('tko_patches',
                     {'kernel_idx': kver,
                      'name':name,
                      'url':patch.reference,
@@ -487,7 +488,7 @@
 
     def find_test(self, job_idx, testname, subdir):
         where = {'job_idx': job_idx , 'test': testname, 'subdir': subdir}
-        rows = self.select('test_idx', 'tests', where)
+        rows = self.select('test_idx', 'tko_tests', where)
         if rows:
             return rows[0][0]
         else:
@@ -496,7 +497,7 @@
 
     def find_tests(self, job_idx):
         where = { 'job_idx':job_idx }
-        rows = self.select('test_idx', 'tests', where)
+        rows = self.select('test_idx', 'tko_tests', where)
         if rows:
             return [row[0] for row in rows]
         else:
@@ -504,7 +505,7 @@
 
 
     def find_job(self, tag):
-        rows = self.select('job_idx', 'jobs', {'tag': tag})
+        rows = self.select('job_idx', 'tko_jobs', {'tag': tag})
         if rows:
             return rows[0][0]
         else:
diff --git a/tko/frontend.py b/tko/frontend.py
index 1b51270..cbc328d 100755
--- a/tko/frontend.py
+++ b/tko/frontend.py
@@ -85,7 +85,7 @@
     fields = ','.join(query_fields_list)
 
     group_by = '%s, %s, status' % (x_field, y_field)
-    rows = db_obj.select(fields, 'test_view',
+    rows = db_obj.select(fields, 'tko_test_view',
                     where=where, group_by=group_by, max_rows = MAX_RECORDS)
     return status_data(rows, x_field, y_field, query_reasons)
 
@@ -138,7 +138,7 @@
     @classmethod
     def select(klass, db):
         """Return all possible machine groups"""
-        rows = db.select('distinct machine_group', 'machines',
+        rows = db.select('distinct machine_group', 'tko_machines',
                                         'machine_group is not null')
         groupnames = sorted([row[0] for row in rows])
         return [klass(db, groupname) for groupname in groupnames]
@@ -155,7 +155,7 @@
 
     def tests(self, where = {}):
         values = [self.name]
-        sql = 't inner join machines m on m.machine_idx=t.machine_idx'
+        sql = 't inner join tko_machines m on m.machine_idx=t.machine_idx'
         sql += ' where m.machine_group=%s'
         for key in where.keys():
             sql += ' and %s=%%s' % key
@@ -168,7 +168,7 @@
     def select(klass, db, where = {}):
         fields = ['machine_idx', 'hostname', 'machine_group', 'owner']
         machines = []
-        for row in db.select(','.join(fields), 'machines', where):
+        for row in db.select(','.join(fields), 'tko_machines', where):
             machines.append(klass(db, *row))
         return machines
 
@@ -185,7 +185,7 @@
     @classmethod
     def select(klass, db, where = {}):
         fields = ['kernel_idx', 'kernel_hash', 'base', 'printable']
-        rows = db.select(','.join(fields), 'kernels', where)
+        rows = db.select(','.join(fields), 'tko_kernels', where)
         return [klass(db, *row) for row in rows]
 
 
@@ -204,7 +204,7 @@
         fields = ['test_idx', 'job_idx', 'test', 'subdir',
                   'kernel_idx', 'status', 'reason', 'machine_idx']
         tests = []
-        for row in db.select(','.join(fields), 'tests', where,
+        for row in db.select(','.join(fields), 'tko_tests', where,
                              wherein,distinct):
             tests.append(klass(db, *row))
         return tests
@@ -215,7 +215,7 @@
         fields = ['test_idx', 'job_idx', 'test', 'subdir',
                   'kernel_idx', 'status', 'reason', 'machine_idx']
         fields = ['t.'+field for field in fields]
-        rows = db.select_sql(','.join(fields), 'tests', sql, values)
+        rows = db.select_sql(','.join(fields), 'tko_tests', sql, values)
         return [klass(db, *row) for row in rows]
 
 
@@ -279,7 +279,7 @@
 class job:
     def __init__(self, db, job_idx):
         where = {'job_idx' : job_idx}
-        rows = db.select('tag, machine_idx', 'jobs', where)
+        rows = db.select('tag, machine_idx', 'tko_jobs', where)
         if rows:
             self.tag, self.machine_idx = rows[0]
             self.job_idx = job_idx
@@ -290,7 +290,7 @@
     def select(klass, db, where):
         fields = ['iteration', 'attribute', 'value']
         iterations = []
-        rows = db.select(','.join(fields), 'iteration_result', where)
+        rows = db.select(','.join(fields), 'tko_iteration_result', where)
         for row in rows:
             iterations.append(klass(*row))
         return iterations
diff --git a/tko/machine_benchmark.cgi b/tko/machine_benchmark.cgi
index c9cd636..083abc4 100755
--- a/tko/machine_benchmark.cgi
+++ b/tko/machine_benchmark.cgi
@@ -21,7 +21,7 @@
     display.set_brief_mode()
 
     ## getting available tests
-    rows = db.select('test', 'tests', {}, distinct=True)
+    rows = db.select('test', 'tko_tests', {}, distinct=True)
     all_benchmarks = []
     for row in rows:
         benchmark = row[0]
@@ -34,10 +34,10 @@
         fields_tests = 'test_idx, count(status_word)'
         where_tests = { 'subdir': benchmark, 'status_word' : 'GOOD' }
         fields_params = 'attribute'
-        for (id, count) in db.select(fields_tests, 'test_view',
+        for (id, count) in db.select(fields_tests, 'tko_test_view',
                                      where_tests, group_by='machine_hostname'):
             where_params = {'test_idx': id}
-            for (attribute) in db.select(fields_params, 'iteration_result',
+            for (attribute) in db.select(fields_params, 'tko_iteration_result',
                                          where_params):
                 available_params.add("%s - %s" % (benchmark,
                                                      attribute[0]))
@@ -87,7 +87,7 @@
         fields = 'machine_idx,machine_hostname,count(status_word)'
         where = { 'subdir': benchmark, 'status_word' : 'GOOD' }
         data = {}
-        for (idx, machine, count) in db.select(fields, 'test_view',
+        for (idx, machine, count) in db.select(fields, 'tko_test_view',
                                             where, group_by='machine_hostname'):
             data[machine] = count
             machine_idx[machine] = idx
diff --git a/tko/migrations/031_rename_tko_tables.py b/tko/migrations/031_rename_tko_tables.py
new file mode 100644
index 0000000..127ef43
--- /dev/null
+++ b/tko/migrations/031_rename_tko_tables.py
@@ -0,0 +1,378 @@
+import common
+from autotest_lib.database import db_utils
+
+
+RECREATE_VIEWS_UP = """
+CREATE VIEW tko_test_view AS
+SELECT  tko_tests.test_idx,
+        tko_tests.job_idx,
+        tko_tests.test,
+        tko_tests.subdir,
+        tko_tests.kernel_idx,
+        tko_tests.status,
+        tko_tests.reason,
+        tko_tests.machine_idx,
+        tko_tests.started_time AS test_started_time,
+        tko_tests.finished_time AS test_finished_time,
+        tko_jobs.tag AS job_tag,
+        tko_jobs.label AS job_label,
+        tko_jobs.username AS job_username,
+        tko_jobs.queued_time AS job_queued_time,
+        tko_jobs.started_time AS job_started_time,
+        tko_jobs.finished_time AS job_finished_time,
+        tko_machines.hostname AS machine_hostname,
+        tko_machines.machine_group,
+        tko_machines.owner AS machine_owner,
+        tko_kernels.kernel_hash,
+        tko_kernels.base AS kernel_base,
+        tko_kernels.printable AS kernel_printable,
+        tko_status.word AS status_word
+FROM tko_tests
+INNER JOIN tko_jobs ON tko_jobs.job_idx = tko_tests.job_idx
+INNER JOIN tko_machines ON tko_machines.machine_idx = tko_jobs.machine_idx
+INNER JOIN tko_kernels ON tko_kernels.kernel_idx = tko_tests.kernel_idx
+INNER JOIN tko_status ON tko_status.status_idx = tko_tests.status;
+
+
+CREATE VIEW tko_perf_view AS
+SELECT  tko_tests.test_idx,
+        tko_tests.job_idx,
+        tko_tests.test,
+        tko_tests.subdir,
+        tko_tests.kernel_idx,
+        tko_tests.status,
+        tko_tests.reason,
+        tko_tests.machine_idx,
+        tko_tests.started_time AS test_started_time,
+        tko_tests.finished_time AS test_finished_time,
+        tko_jobs.tag AS job_tag,
+        tko_jobs.label AS job_label,
+        tko_jobs.username AS job_username,
+        tko_jobs.queued_time AS job_queued_time,
+        tko_jobs.started_time AS job_started_time,
+        tko_jobs.finished_time AS job_finished_time,
+        tko_machines.hostname AS machine_hostname,
+        tko_machines.machine_group,
+        tko_machines.owner AS machine_owner,
+        tko_kernels.kernel_hash,
+        tko_kernels.base AS kernel_base,
+        tko_kernels.printable AS kernel_printable,
+        tko_status.word AS status_word,
+        tko_iteration_result.iteration,
+        tko_iteration_result.attribute AS iteration_key,
+        tko_iteration_result.value AS iteration_value
+FROM tko_tests
+INNER JOIN tko_jobs ON tko_jobs.job_idx = tko_tests.job_idx
+INNER JOIN tko_machines ON tko_machines.machine_idx = tko_jobs.machine_idx
+INNER JOIN tko_kernels ON tko_kernels.kernel_idx = tko_tests.kernel_idx
+INNER JOIN tko_status ON tko_status.status_idx = tko_tests.status
+INNER JOIN tko_iteration_result ON
+        tko_iteration_result.test_idx = tko_tests.test_idx;
+
+
+CREATE VIEW tko_test_view_2 AS
+SELECT  tko_tests.test_idx,
+        tko_tests.job_idx,
+        tko_tests.test AS test_name,
+        tko_tests.subdir,
+        tko_tests.kernel_idx,
+        tko_tests.status AS status_idx,
+        tko_tests.reason,
+        tko_tests.machine_idx,
+        tko_tests.started_time AS test_started_time,
+        tko_tests.finished_time AS test_finished_time,
+        tko_jobs.tag AS job_tag,
+        tko_jobs.label AS job_name,
+        tko_jobs.username AS job_owner,
+        tko_jobs.queued_time AS job_queued_time,
+        tko_jobs.started_time AS job_started_time,
+        tko_jobs.finished_time AS job_finished_time,
+        tko_jobs.afe_job_id AS afe_job_id,
+        tko_machines.hostname AS hostname,
+        tko_machines.machine_group AS platform,
+        tko_machines.owner AS machine_owner,
+        tko_kernels.kernel_hash,
+        tko_kernels.base AS kernel_base,
+        tko_kernels.printable AS kernel,
+        tko_status.word AS status
+FROM tko_tests
+INNER JOIN tko_jobs ON tko_jobs.job_idx = tko_tests.job_idx
+INNER JOIN tko_machines ON tko_machines.machine_idx = tko_jobs.machine_idx
+INNER JOIN tko_kernels ON tko_kernels.kernel_idx = tko_tests.kernel_idx
+INNER JOIN tko_status ON tko_status.status_idx = tko_tests.status;
+
+
+CREATE VIEW tko_test_view_outer_joins AS
+SELECT  tko_tests.test_idx,
+        tko_tests.job_idx,
+        tko_tests.test AS test_name,
+        tko_tests.subdir,
+        tko_tests.kernel_idx,
+        tko_tests.status AS status_idx,
+        tko_tests.reason,
+        tko_tests.machine_idx,
+        tko_tests.started_time AS test_started_time,
+        tko_tests.finished_time AS test_finished_time,
+        tko_jobs.tag AS job_tag,
+        tko_jobs.label AS job_name,
+        tko_jobs.username AS job_owner,
+        tko_jobs.queued_time AS job_queued_time,
+        tko_jobs.started_time AS job_started_time,
+        tko_jobs.finished_time AS job_finished_time,
+        tko_machines.hostname AS hostname,
+        tko_machines.machine_group AS platform,
+        tko_machines.owner AS machine_owner,
+        tko_kernels.kernel_hash,
+        tko_kernels.base AS kernel_base,
+        tko_kernels.printable AS kernel,
+        tko_status.word AS status
+FROM tko_tests
+LEFT OUTER JOIN tko_jobs ON tko_jobs.job_idx = tko_tests.job_idx
+LEFT OUTER JOIN tko_machines ON tko_machines.machine_idx = tko_jobs.machine_idx
+LEFT OUTER JOIN tko_kernels ON tko_kernels.kernel_idx = tko_tests.kernel_idx
+LEFT OUTER JOIN tko_status ON tko_status.status_idx = tko_tests.status;
+
+
+CREATE VIEW tko_perf_view_2 AS
+SELECT  tko_tests.test_idx,
+        tko_tests.job_idx,
+        tko_tests.test AS test_name,
+        tko_tests.subdir,
+        tko_tests.kernel_idx,
+        tko_tests.status AS status_idx,
+        tko_tests.reason,
+        tko_tests.machine_idx,
+        tko_tests.started_time AS test_started_time,
+        tko_tests.finished_time AS test_finished_time,
+        tko_jobs.tag AS job_tag,
+        tko_jobs.label AS job_name,
+        tko_jobs.username AS job_owner,
+        tko_jobs.queued_time AS job_queued_time,
+        tko_jobs.started_time AS job_started_time,
+        tko_jobs.finished_time AS job_finished_time,
+        tko_machines.hostname AS hostname,
+        tko_machines.machine_group AS platform,
+        tko_machines.owner AS machine_owner,
+        tko_kernels.kernel_hash,
+        tko_kernels.base AS kernel_base,
+        tko_kernels.printable AS kernel,
+        tko_status.word AS status,
+        tko_iteration_result.iteration,
+        tko_iteration_result.attribute AS iteration_key,
+        tko_iteration_result.value AS iteration_value
+FROM tko_tests
+LEFT OUTER JOIN tko_jobs ON tko_jobs.job_idx = tko_tests.job_idx
+LEFT OUTER JOIN tko_machines ON tko_machines.machine_idx = tko_jobs.machine_idx
+LEFT OUTER JOIN tko_kernels ON tko_kernels.kernel_idx = tko_tests.kernel_idx
+LEFT OUTER JOIN tko_status ON tko_status.status_idx = tko_tests.status
+LEFT OUTER JOIN tko_iteration_result ON
+        tko_iteration_result.test_idx = tko_tests.test_idx;
+"""
+
+
+RECREATE_VIEWS_DOWN = """
+CREATE VIEW test_view AS
+SELECT  tests.test_idx,
+        tests.job_idx,
+        tests.test,
+        tests.subdir,
+        tests.kernel_idx,
+        tests.status,
+        tests.reason,
+        tests.machine_idx,
+        tests.started_time AS test_started_time,
+        tests.finished_time AS test_finished_time,
+        jobs.tag AS job_tag,
+        jobs.label AS job_label,
+        jobs.username AS job_username,
+        jobs.queued_time AS job_queued_time,
+        jobs.started_time AS job_started_time,
+        jobs.finished_time AS job_finished_time,
+        machines.hostname AS machine_hostname,
+        machines.machine_group,
+        machines.owner AS machine_owner,
+        kernels.kernel_hash,
+        kernels.base AS kernel_base,
+        kernels.printable AS kernel_printable,
+        status.word AS status_word
+FROM tests
+INNER JOIN jobs ON jobs.job_idx = tests.job_idx
+INNER JOIN machines ON machines.machine_idx = jobs.machine_idx
+INNER JOIN kernels ON kernels.kernel_idx = tests.kernel_idx
+INNER JOIN status ON status.status_idx = tests.status;
+
+
+CREATE VIEW perf_view AS
+SELECT  tests.test_idx,
+        tests.job_idx,
+        tests.test,
+        tests.subdir,
+        tests.kernel_idx,
+        tests.status,
+        tests.reason,
+        tests.machine_idx,
+        tests.started_time AS test_started_time,
+        tests.finished_time AS test_finished_time,
+        jobs.tag AS job_tag,
+        jobs.label AS job_label,
+        jobs.username AS job_username,
+        jobs.queued_time AS job_queued_time,
+        jobs.started_time AS job_started_time,
+        jobs.finished_time AS job_finished_time,
+        machines.hostname AS machine_hostname,
+        machines.machine_group,
+        machines.owner AS machine_owner,
+        kernels.kernel_hash,
+        kernels.base AS kernel_base,
+        kernels.printable AS kernel_printable,
+        status.word AS status_word,
+        iteration_result.iteration,
+        iteration_result.attribute AS iteration_key,
+        iteration_result.value AS iteration_value
+FROM tests
+INNER JOIN jobs ON jobs.job_idx = tests.job_idx
+INNER JOIN machines ON machines.machine_idx = jobs.machine_idx
+INNER JOIN kernels ON kernels.kernel_idx = tests.kernel_idx
+INNER JOIN status ON status.status_idx = tests.status
+INNER JOIN iteration_result ON iteration_result.test_idx = tests.test_idx;
+
+
+CREATE VIEW test_view_outer_joins AS
+SELECT  tests.test_idx,
+        tests.job_idx,
+        tests.test AS test_name,
+        tests.subdir,
+        tests.kernel_idx,
+        tests.status AS status_idx,
+        tests.reason,
+        tests.machine_idx,
+        tests.started_time AS test_started_time,
+        tests.finished_time AS test_finished_time,
+        jobs.tag AS job_tag,
+        jobs.label AS job_name,
+        jobs.username AS job_owner,
+        jobs.queued_time AS job_queued_time,
+        jobs.started_time AS job_started_time,
+        jobs.finished_time AS job_finished_time,
+        machines.hostname AS hostname,
+        machines.machine_group AS platform,
+        machines.owner AS machine_owner,
+        kernels.kernel_hash,
+        kernels.base AS kernel_base,
+        kernels.printable AS kernel,
+        status.word AS status
+FROM tests
+LEFT OUTER JOIN jobs ON jobs.job_idx = tests.job_idx
+LEFT OUTER JOIN machines ON machines.machine_idx = jobs.machine_idx
+LEFT OUTER JOIN kernels ON kernels.kernel_idx = tests.kernel_idx
+LEFT OUTER JOIN status ON status.status_idx = tests.status;
+
+
+CREATE VIEW test_view_2 AS
+SELECT  tests.test_idx,
+        tests.job_idx,
+        tests.test AS test_name,
+        tests.subdir,
+        tests.kernel_idx,
+        tests.status AS status_idx,
+        tests.reason,
+        tests.machine_idx,
+        tests.started_time AS test_started_time,
+        tests.finished_time AS test_finished_time,
+        jobs.tag AS job_tag,
+        jobs.label AS job_name,
+        jobs.username AS job_owner,
+        jobs.queued_time AS job_queued_time,
+        jobs.started_time AS job_started_time,
+        jobs.finished_time AS job_finished_time,
+        jobs.afe_job_id AS afe_job_id,
+        machines.hostname AS hostname,
+        machines.machine_group AS platform,
+        machines.owner AS machine_owner,
+        kernels.kernel_hash,
+        kernels.base AS kernel_base,
+        kernels.printable AS kernel,
+        status.word AS status
+FROM tests
+INNER JOIN jobs ON jobs.job_idx = tests.job_idx
+INNER JOIN machines ON machines.machine_idx = jobs.machine_idx
+INNER JOIN kernels ON kernels.kernel_idx = tests.kernel_idx
+INNER JOIN status ON status.status_idx = tests.status;
+
+
+CREATE VIEW perf_view_2 AS
+SELECT  tests.test_idx,
+        tests.job_idx,
+        tests.test AS test_name,
+        tests.subdir,
+        tests.kernel_idx,
+        tests.status AS status_idx,
+        tests.reason,
+        tests.machine_idx,
+        tests.started_time AS test_started_time,
+        tests.finished_time AS test_finished_time,
+        jobs.tag AS job_tag,
+        jobs.label AS job_name,
+        jobs.username AS job_owner,
+        jobs.queued_time AS job_queued_time,
+        jobs.started_time AS job_started_time,
+        jobs.finished_time AS job_finished_time,
+        machines.hostname AS hostname,
+        machines.machine_group AS platform,
+        machines.owner AS machine_owner,
+        kernels.kernel_hash,
+        kernels.base AS kernel_base,
+        kernels.printable AS kernel,
+        status.word AS status,
+        iteration_result.iteration,
+        iteration_result.attribute AS iteration_key,
+        iteration_result.value AS iteration_value
+FROM tests
+LEFT OUTER JOIN jobs ON jobs.job_idx = tests.job_idx
+LEFT OUTER JOIN machines ON machines.machine_idx = jobs.machine_idx
+LEFT OUTER JOIN kernels ON kernels.kernel_idx = tests.kernel_idx
+LEFT OUTER JOIN status ON status.status_idx = tests.status
+LEFT OUTER JOIN iteration_result ON iteration_result.test_idx = tests.test_idx;
+"""
+
+
+ORIG_NAMES = (
+        'embedded_graphing_queries',
+        'iteration_attributes',
+        'iteration_result',
+        'jobs',
+        'kernels',
+        'machines',
+        'patches',
+        'query_history',
+        'saved_queries',
+        'status',
+        'test_attributes',
+        'test_labels',
+        'test_labels_tests',
+        'tests',
+        )
+
+RENAMES_UP = dict((name, 'tko_' + name) for name in ORIG_NAMES)
+VIEWS_TO_DROP_UP = (
+        'test_view',
+        'test_view_2',
+        'test_view_outer_joins',
+        'perf_view',
+        'perf_view_2',
+        )
+
+RENAMES_DOWN = dict((value, key) for key, value in RENAMES_UP.iteritems())
+VIEWS_TO_DROP_DOWN = ['tko_' + view for view in VIEWS_TO_DROP_UP]
+
+
+def migrate_up(manager):
+    db_utils.drop_views(manager, VIEWS_TO_DROP_UP)
+    db_utils.rename(manager, RENAMES_UP)
+    manager.execute_script(RECREATE_VIEWS_UP)
+
+
+def migrate_down(manager):
+    db_utils.drop_views(manager, VIEWS_TO_DROP_DOWN)
+    db_utils.rename(manager, RENAMES_DOWN)
+    manager.execute_script(RECREATE_VIEWS_DOWN)
diff --git a/tko/migrations/common.py b/tko/migrations/common.py
new file mode 100644
index 0000000..4c8760b
--- /dev/null
+++ b/tko/migrations/common.py
@@ -0,0 +1,8 @@
+import os, sys
+dirname = os.path.dirname(sys.modules[__name__].__file__)
+autotest_dir = os.path.abspath(os.path.join(dirname, "..", '..'))
+client_dir = os.path.join(autotest_dir, "client")
+sys.path.insert(0, client_dir)
+import setup_modules
+sys.path.pop(0)
+setup_modules.setup(base_path=autotest_dir, root_module_name="autotest_lib")
diff --git a/tko/nightly.py b/tko/nightly.py
index 5a08608..16c8044 100644
--- a/tko/nightly.py
+++ b/tko/nightly.py
@@ -8,7 +8,7 @@
 
 
 def add_kernel_jobs(label_pattern):
-    cmd = "select job_idx from jobs where label like '%s'" % label_pattern
+    cmd = "select job_idx from tko_jobs where label like '%s'" % label_pattern
     nrows = perf.db_cur.execute(cmd)
     return [row[0] for row in perf.db_cur.fetchall()]
 
@@ -23,7 +23,7 @@
 
 
 def get_test_attributes(testrunx):
-    cmd = ( "select attribute, value from test_attributes"
+    cmd = ( "select attribute, value from tko_test_attributes"
             " where test_idx = %d" % testrunx )
     nrows = perf.db_cur.execute(cmd)
     return dict(perf.db_cur.fetchall())
@@ -49,7 +49,7 @@
     # TODO: add filtering on test series?
     runs = {}   # platform --> list of test runs
     for jobx in jobs:
-        cmd = ( "select test_idx, machine_idx from  tests"
+        cmd = ( "select test_idx, machine_idx from tko_tests"
                 " where job_idx = %s and test = %s" )
         args = [jobx, test]
         nrows = perf.db_cur.execute(cmd, args)
@@ -178,11 +178,11 @@
 
 
 def get_testrun_context(testrun):
-    cmd = ( 'select jobs.label, jobs.tag, tests.subdir,'
-            ' tests.started_time'
-            ' from jobs, tests'
-            ' where jobs.job_idx = tests.job_idx'
-            ' and tests.test_idx = %d' % testrun )
+    cmd = ( 'select tko_jobs.label, tko_jobs.tag, tko_tests.subdir,'
+            ' tko_tests.started_time'
+            ' from tko_jobs, tko_tests'
+            ' where tko_jobs.job_idx = tko_tests.job_idx'
+            ' and tko_tests.test_idx = %d' % testrun )
     nrows = perf.db_cur.execute(cmd)
     assert nrows == 1
     row = perf.db_cur.fetchone()
diff --git a/tko/parse.py b/tko/parse.py
index a82ab0a..7639a76 100755
--- a/tko/parse.py
+++ b/tko/parse.py
@@ -84,7 +84,7 @@
             tko_utils.dprint("! Job is already parsed, done")
             return
 
-        raw_old_tests = db.select("test_idx,subdir,test", "tests",
+        raw_old_tests = db.select("test_idx,subdir,test", "tko_tests",
                                   {"job_idx": old_job_idx})
         if raw_old_tests:
             old_tests = dict(((test, subdir), test_idx)
diff --git a/tko/query_history.cgi b/tko/query_history.cgi
index a1e2f3a..627238d 100755
--- a/tko/query_history.cgi
+++ b/tko/query_history.cgi
@@ -14,7 +14,7 @@
     where = (condition,[])
     try:
         rows = db_obj.select("time_created,user_comment,url",
-                     "query_history", where)
+                     "tko_query_history", where)
     except MySQLdb.ProgrammingError, err:
         print err
         rows = ()
diff --git a/tko/retrieve_jobs b/tko/retrieve_jobs
index 58985c6..74bdbca 100755
--- a/tko/retrieve_jobs
+++ b/tko/retrieve_jobs
@@ -7,5 +7,5 @@
     arg = ''
 
 db = db.db()
-for record in db.select('* from jobs ' + arg):
+for record in db.select('* from tko_jobs ' + arg):
     print record
diff --git a/tko/save_query.cgi b/tko/save_query.cgi
index 04db50b..d651cf3 100755
--- a/tko/save_query.cgi
+++ b/tko/save_query.cgi
@@ -31,7 +31,7 @@
     data_to_insert = {'uid':uid, 'time_created':tm,
               'user_comment':comment, 'url':HTTP_REFERER }
     try:
-        db_obj.insert('query_history', data_to_insert)
+        db_obj.insert('tko_query_history', data_to_insert)
     except:
         raise QueryHistoryError("Could not save query")
 
@@ -41,7 +41,7 @@
     db_obj = db.db()
     data_to_delete = {'time_created':time_stamp}
     try:
-        db_obj.delete('query_history', data_to_delete)
+        db_obj.delete('tko_query_history', data_to_delete)
     except Exception:
         raise QueryHistoryError("Could not delete query")
     
@@ -78,6 +78,3 @@
 
 
 main()
-
-
-
diff --git a/utils/test_importer.py b/utils/test_importer.py
index bc74cf7..28ebce9 100755
--- a/utils/test_importer.py
+++ b/utils/test_importer.py
@@ -3,7 +3,7 @@
 # Copyright 2008 Google Inc. All Rights Reserved.
 """
 This utility allows for easy updating, removing and importing
-of tests into the autotest_web autotests table.
+of tests into the autotest_web afe_autotests table.
 
 Example of updating client side tests:
 ./test_importer.py -t /usr/local/autotest/client/tests
@@ -178,7 +178,7 @@
     connection=db_connect()
     cursor = connection.cursor()
     # Get tests
-    sql = "SELECT id, path FROM autotests";
+    sql = "SELECT id, path FROM afe_autotests";
     cursor.execute(sql)
     results = cursor.fetchall()
     for test_id, path in results:
@@ -186,13 +186,15 @@
         if not os.path.isfile(full_path):
             if verbose:
                 print "Removing " + path
-            db_execute(cursor, "DELETE FROM autotests WHERE id=%s" % test_id)
-            db_execute(cursor, "DELETE FROM autotests_dependency_labels WHERE "
+            db_execute(cursor,
+                       "DELETE FROM afe_autotests WHERE id=%s" % test_id)
+            db_execute(cursor,
+                       "DELETE FROM afe_autotests_dependency_labels WHERE "
                                "test_id=%s" % test_id)
 
     # Find profilers that are no longer present
     profilers = []
-    sql = "SELECT name FROM profilers"
+    sql = "SELECT name FROM afe_profilers"
     cursor.execute(sql)
     results = cursor.fetchall()
     for path in results:
@@ -200,7 +202,7 @@
         if not os.path.exists(full_path):
             if verbose:
                 print "Removing " + path[0]
-            sql = "DELETE FROM profilers WHERE name='%s'" % path[0]
+            sql = "DELETE FROM afe_profilers WHERE name='%s'" % path[0]
             db_execute(cursor, sql)
 
 
@@ -223,17 +225,18 @@
         else:
             doc = profilers[profiler]
         # check if test exists
-        sql = "SELECT name FROM profilers WHERE name='%s'" % name
+        sql = "SELECT name FROM afe_profilers WHERE name='%s'" % name
         cursor.execute(sql)
         results = cursor.fetchall()
         if results:
-            sql = "UPDATE profilers SET name='%s', description='%s' "\
+            sql = "UPDATE afe_profilers SET name='%s', description='%s' "\
                   "WHERE name='%s'"
             sql %= (MySQLdb.escape_string(name), MySQLdb.escape_string(doc),
                     MySQLdb.escape_string(name))
         else:
             # Insert newly into DB
-            sql = "INSERT into profilers (name, description) VALUES('%s', '%s')"
+            sql = ("INSERT into afe_profilers (name, description) "
+                   "VALUES('%s', '%s')")
             sql %= (MySQLdb.escape_string(name), MySQLdb.escape_string(doc))
 
         db_execute(cursor, sql)
@@ -277,13 +280,13 @@
         # clean tests for insertion into db
         new_test = dict_db_clean(new_test)
         new_test_dicts.append(new_test)
-        sql = "SELECT name,path FROM autotests WHERE path='%s' LIMIT 1"
+        sql = "SELECT name,path FROM afe_autotests WHERE path='%s' LIMIT 1"
         sql %= new_test['path']
         cursor.execute(sql)
         # check for entries already in existence
         results = cursor.fetchall()
         if results:
-            sql = ("UPDATE autotests SET name='%s', test_class='%s',"
+            sql = ("UPDATE afe_autotests SET name='%s', test_class='%s',"
                   "description='%s', test_type=%d, path='%s',"
                   "author='%s', dependencies='%s',"
                   "experimental=%d, run_verify=%d, test_time=%d,"
@@ -299,7 +302,7 @@
         else:
             # Create a relative path
             path = test.replace(autotest_dir, '')
-            sql = ("INSERT INTO autotests"
+            sql = ("INSERT INTO afe_autotests"
                   "(name, test_class, description, test_type, path, "
                   "author, dependencies, experimental, "
                   "run_verify, test_time, test_category, sync_count) "
@@ -372,15 +375,15 @@
     if not tests:
         return
 
-    label_name_to_id = get_id_map(cursor, 'labels', 'name')
-    test_path_to_id = get_id_map(cursor, 'autotests', 'path')
+    label_name_to_id = get_id_map(cursor, 'afe_labels', 'name')
+    test_path_to_id = get_id_map(cursor, 'afe_autotests', 'path')
 
     # clear out old relationships
     test_ids = ','.join(str(test_path_to_id[test['path']])
                         for test in tests)
     db_execute(cursor,
-               'DELETE FROM autotests_dependency_labels WHERE test_id IN (%s)' %
-               test_ids)
+               'DELETE FROM afe_autotests_dependency_labels '
+                       'WHERE test_id IN (%s)' % test_ids)
 
     value_pairs = []
     for test in tests:
@@ -398,7 +401,7 @@
     if not value_pairs:
         return
 
-    query = ('INSERT INTO autotests_dependency_labels (test_id, label_id) '
+    query = ('INSERT INTO afe_autotests_dependency_labels (test_id, label_id) '
              'VALUES ' + ','.join(value_pairs))
     db_execute(cursor, query)